def check_virsh_command_and_option(command, option=None): """ Check if virsh command exists :param command: the command to be checked :param option: the command option to be checked """ msg = "This version of libvirt does not support " if not virsh.has_help_command(command): test.cancel(msg + "virsh command '%s'" % command) if option and not virsh.has_command_help_match(command, option): test.cancel(msg + "virsh command '%s' with option '%s'" % (command, option))
def check_virsh_command_and_option(test, command, option=None): """ Check if virsh command exists :param test: test object :param command: the command to validate :param option: the option for the command :raise: test.cancel if command is not supported """ msg = "This version of libvirt does not support " if not virsh.has_help_command(command): test.cancel(msg + "virsh command '%s'" % command) if option and not virsh.has_command_help_match(command, option): test.cancel(msg + "virsh command '%s' with option '%s'" % (command, option))
def run_virsh_snapshot_create_as(test, params, env): """ Test snapshot-create-as command Make sure that the clean repo can be used because qemu-guest-agent need to be installed in guest The command create a snapshot (disk and RAM) from arguments which including the following point * virsh snapshot-create-as --print-xml --diskspec --name --description * virsh snapshot-create-as --print-xml with multi --diskspec * virsh snapshot-create-as --print-xml --memspec * virsh snapshot-create-as --description * virsh snapshot-create-as --no-metadata * virsh snapshot-create-as --no-metadata --print-xml (negtive test) * virsh snapshot-create-as --atomic --disk-only * virsh snapshot-create-as --quiesce --disk-only (positive and negtive) * virsh snapshot-create-as --reuse-external * virsh snapshot-create-as --disk-only --diskspec * virsh snapshot-create-as --memspec --reuse-external --atomic(negtive) * virsh snapshot-create-as --disk-only and --memspec (negtive) * Create multi snapshots with snapshot-create-as * Create snapshot with name a--a a--a--snap1 """ if not virsh.has_help_command('snapshot-create-as'): raise error.TestNAError("This version of libvirt does not support " "the snapshot-create-as test") vm_name = params.get("main_vm") status_error = params.get("status_error", "no") options = params.get("snap_createas_opts") multi_num = params.get("multi_num", "1") diskspec_num = params.get("diskspec_num", "1") bad_disk = params.get("bad_disk") external_disk = params.get("external_disk") start_ga = params.get("start_ga", "yes") domain_state = params.get("domain_state") memspec_opts = params.get("memspec_opts") diskspec_opts = params.get("diskspec_opts") opt_names = locals() if memspec_opts is not None: mem_options = compose_disk_options(test, params, memspec_opts) # if the parameters have the disk without "file=" then we only need to # add testdir for it. if mem_options is None: mem_options = os.path.join(test.virtdir, memspec_opts) options += " --memspec " + mem_options tag_diskspec = 0 dnum = int(diskspec_num) if diskspec_opts is not None: tag_diskspec = 1 opt_names['diskopts_1'] = diskspec_opts # diskspec_opts[n] is used in cfg when more than 1 --diskspec is used if dnum > 1: tag_diskspec = 1 for i in range(1, dnum + 1): opt_names["diskopts_%s" % i] = params.get("diskspec_opts%s" % i) if tag_diskspec == 1: for i in range(1, dnum + 1): disk_options = compose_disk_options(test, params, opt_names["diskopts_%s" % i]) options += " --diskspec " + disk_options logging.debug("options are %s", options) vm = env.get_vm(vm_name) option_dict = {} option_dict = utils_misc.valued_option_dict(options, r' --(?!-)') logging.debug("option_dict is %s", option_dict) # A backup of original vm vmxml_backup = vm_xml.VMXML.new_from_dumpxml(vm_name) logging.debug("original xml is %s", vmxml_backup) # Generate empty image for negtive test if bad_disk is not None: bad_disk = os.path.join(test.virtdir, bad_disk) os.open(bad_disk, os.O_RDWR | os.O_CREAT) # Gererate external disk if external_disk is not None: external_disk = os.path.join(test.virtdir, external_disk) commands.getoutput("qemu-img create -f qcow2 %s 1G" % external_disk) # Start qemu-ga on guest if have --quiesce if options.find("quiesce") >= 0: if vm.is_alive(): vm.destroy() virt_xml_obj = libvirt_xml.VMXML(virsh_instance=virsh) virt_xml_obj.set_agent_channel(vm_name) vm.start() if start_ga == "yes": session = vm.wait_for_login() # Check if qemu-ga already started automatically cmd = "rpm -q qemu-guest-agent || yum install -y qemu-guest-agent" stat_install = session.cmd_status(cmd, 300) if stat_install != 0: xml_recover(vmxml_backup) raise error.TestFail("Fail to install qemu-guest-agent, make" "sure that you have usable repo in guest") # Check if qemu-ga already started stat_ps = session.cmd_status("ps aux |grep [q]emu-ga") if stat_ps != 0: session.cmd("qemu-ga -d") # Check if the qemu-ga really started stat_ps = session.cmd_status("ps aux |grep [q]emu-ga") if stat_ps != 0: xml_recover(vmxml_backup) raise error.TestFail("Fail to run qemu-ga in guest") if domain_state == "paused": virsh.suspend(vm_name) # Record the previous snapshot-list snaps_before = virsh.snapshot_list(vm_name) # Run virsh command # May create several snapshots, according to configuration for count in range(int(multi_num)): cmd_result = virsh.snapshot_create_as(vm_name, options, ignore_status=True, debug=True) output = cmd_result.stdout.strip() status = cmd_result.exit_status # check status_error if status_error == "yes": if status == 0: xml_recover(vmxml_backup) raise error.TestFail("Run successfully with wrong command!") else: # Check memspec file should be removed if failed if (options.find("memspec") >= 0 and options.find("atomic") >= 0): if os.path.isfile(option_dict['memspec']): os.remove(option_dict['memspec']) xml_recover(vmxml_backup) raise error.TestFail("Run failed but file %s exist" % option_dict['memspec']) else: logging.info("Run failed as expected and memspec file" " already beed removed") else: logging.info("Run failed as expected") elif status_error == "no": if status != 0: xml_recover(vmxml_backup) raise error.TestFail("Run failed with right command: %s" % output) else: # Check the special options snaps_list = virsh.snapshot_list(vm_name) logging.debug("snaps_list is %s", snaps_list) no_metadata = options.find("--no-metadata") fdisks = "disks" # command with print-xml will not really create snapshot if options.find("print-xml") >= 0: xtf = xml_utils.XMLTreeFile(output) # With --print-xml there isn't new snapshot created if len(snaps_before) != len(snaps_list): xml_recover(vmxml_backup) raise error.TestFail("--print-xml create new snapshot") else: # The following does not check with print-xml get_sname = output.split()[2] # check domain/snapshot xml depends on if have metadata if no_metadata < 0: output_dump = virsh.snapshot_dumpxml(vm_name, get_sname) else: output_dump = virsh.dumpxml(vm_name) fdisks = "devices" xtf = xml_utils.XMLTreeFile(output_dump) find = 0 for snap in snaps_list: if snap == get_sname: find = 1 break # Should find snap in snaplist without --no-metadata if (find == 0 and no_metadata < 0): xml_recover(vmxml_backup) raise error.TestFail("Can not find snapshot %s!" % get_sname) # Should not find snap in list without metadata elif (find == 1 and no_metadata >= 0): xml_recover(vmxml_backup) raise error.TestFail("Can find snapshot metadata even " "if have --no-metadata") elif (find == 0 and no_metadata >= 0): logging.info("Can not find snapshot %s as no-metadata " "is given" % get_sname) # Check snapshot only in qemu-img if (options.find("--disk-only") < 0 and options.find("--memspec") < 0): ret = check_snap_in_image(vm_name, get_sname) if ret == False: xml_recover(vmxml_backup) raise error.TestFail("No snap info in image") else: logging.info("Find snapshot %s in snapshot list." % get_sname) # Check if the disk file exist when disk-only is given if options.find("disk-only") >= 0: for disk in xtf.find(fdisks).findall('disk'): diskpath = disk.find('source').get('file') if os.path.isfile(diskpath): logging.info("disk file %s exist" % diskpath) os.remove(diskpath) else: xml_recover(vmxml_backup) raise error.TestFail("Can not find disk %s" % diskpath) # Check if the guest is halted when 'halt' is given if options.find("halt") >= 0: domstate = virsh.domstate(vm_name) if re.match("shut off", domstate.stdout): logging.info("Domain is halted after create " "snapshot") else: xml_recover(vmxml_backup) raise error.TestFail("Domain is not halted after " "snapshot created") # Check the snapshot xml regardless of having print-xml or not if (options.find("name") >= 0 and no_metadata < 0): if xtf.findtext('name') == option_dict["name"]: logging.info("get snapshot name same as set") else: xml_recover(vmxml_backup) raise error.TestFail("Get wrong snapshot name %s" % xtf.findtext('name')) if (options.find("description") >= 0 and no_metadata < 0): desc = xtf.findtext('description') if desc == option_dict["description"]: logging.info("get snapshot description same as set") else: xml_recover(vmxml_backup) raise error.TestFail("Get wrong description on xml") if options.find("diskspec") >= 0: if isinstance(option_dict['diskspec'], list): index = len(option_dict['diskspec']) else: index = 1 disks = xtf.find(fdisks).findall('disk') for num in range(index): if isinstance(option_dict['diskspec'], list): option_disk = option_dict['diskspec'][num] else: option_disk = option_dict['diskspec'] option_disk = "name=" + option_disk disk_dict = utils_misc.valued_option_dict(option_disk, ",", 0, "=") logging.debug("disk_dict is %s", disk_dict) # For no metadata snapshot do not check name and # snapshot if no_metadata < 0: dname = disks[num].get('name') logging.debug("dname is %s", dname) if dname == disk_dict['name']: logging.info("get disk%d name same as set in " "diskspec", num) else: xml_recover(vmxml_backup) raise error.TestFail("Get wrong disk%d name %s" % num, dname) if option_disk.find('snapshot=') >= 0: dsnap = disks[num].get('snapshot') logging.debug("dsnap is %s", dsnap) if dsnap == disk_dict['snapshot']: logging.info("get disk%d snapshot type same" " as set in diskspec", num) else: xml_recover(vmxml_backup) raise error.TestFail("Get wrong disk%d " "snapshot type %s" % num, dsnap) if option_disk.find('driver=') >= 0: dtype = disks[num].find('driver').get('type') if dtype == disk_dict['driver']: logging.info("get disk%d driver type same as " "set in diskspec", num) else: xml_recover(vmxml_backup) raise error.TestFail("Get wrong disk%d driver " "type %s" % num, dtype) if option_disk.find('file=') >=0: sfile = disks[num].find('source').get('file') if sfile == disk_dict['file']: logging.info("get disk%d source file same as " "set in diskspec", num) else: xml_recover(vmxml_backup) raise error.TestFail("Get wrong disk%d source " "file %s" % num, sfile) # For memspec check if the xml is same as setting # Also check if the mem file exists if options.find("memspec") >= 0: memspec = option_dict['memspec'] if re.search('file=', option_dict['memspec']) < 0: memspec = 'file=' + option_dict['memspec'] mem_dict = utils_misc.valued_option_dict(memspec, ",", 0, "=") logging.debug("mem_dict is %s", mem_dict) if no_metadata < 0: if memspec.find('snapshot=') >= 0: snap = xtf.find('memory').get('snapshot') if snap == mem_dict['snapshot']: logging.info("get memory snapshot type same as" " set in diskspec") else: xml_recover(vmxml_backup) raise error.TestFail("Get wrong memory snapshot" " type on print xml") memfile = xtf.find('memory').get('file') if memfile == mem_dict['file']: logging.info("get memory file same as set in " "diskspec") else: xml_recover(vmxml_backup) raise error.TestFail("Get wrong memory file on " "print xml %s", memfile) if options.find("print-xml") < 0: if os.path.isfile(mem_dict['file']): logging.info("memory file generated") os.remove(mem_dict['file']) else: xml_recover(vmxml_backup) raise error.TestFail("Fail to generate memory file" " %s", mem_dict['file']) # Environment clean if options.find("quiesce") >= 0 and start_ga == "yes": session.cmd("rpm -e qemu-guest-agent") # recover domain xml xml_recover(vmxml_backup) path = "/var/lib/libvirt/qemu/snapshot/" + vm_name if os.path.isfile(path): raise error.TestFail("Still can find snapshot metadata") # rm bad disks if bad_disk is not None: os.remove(bad_disk)
def run(test, params, env): """ Test the command virsh freecell (1) Call virsh freecell (2) Call virsh freecell --all (3) Call virsh freecell with a numeric argument (4) Call virsh freecell xyz (5) Call virsh freecell with libvirtd service stop """ args = params.get("virsh_freecell_args") option = params.get("virsh_freecell_opts") # Prepare libvirtd service check_libvirtd = "libvirtd" in params if check_libvirtd: libvirtd = params.get("libvirtd") if libvirtd == "off": utils_libvirtd.libvirtd_stop() # Run test case cmd_result = virsh.freecell(cellno=args, options=option) output = cmd_result.stdout.strip() status = cmd_result.exit_status # Recover libvirtd service start if libvirtd == "off": utils_libvirtd.libvirtd_start() # Check the output if virsh.has_help_command('numatune'): OLD_LIBVIRT = False else: OLD_LIBVIRT = True if option == '--all': test.cancel("Older libvirt virsh freecell " "doesn't support --all option") def output_check(freecell_output): if not re.search("ki?B", freecell_output, re.IGNORECASE): test.fail( "virsh freecell output invalid: " + freecell_output) # Check status_error status_error = params.get("status_error") if status_error == "yes": if status == 0: if libvirtd == "off": test.fail("Command 'virsh freecell' succeeded " "with libvirtd service stopped, incorrect") else: # newer libvirt if not OLD_LIBVIRT: test.fail("Command 'virsh freecell %s' succeeded" "(incorrect command)" % option) else: # older libvirt test.cancel('Older libvirt virsh freecell ' 'incorrectly processes extranious' 'command-line options') elif status_error == "no": output_check(output) if status != 0: test.fail("Command 'virsh freecell %s' failed " "(correct command)" % option)
def run(test, params, env): """ Test virsh reset command """ if not virsh.has_help_command('reset'): raise error.TestNAError("This version of libvirt does not support " "the reset test") vm_name = params.get("main_vm", "avocado-vt-vm1") vm_ref = params.get("reset_vm_ref") readonly = params.get("readonly", False) status_error = ("yes" == params.get("status_error", "no")) start_vm = ("yes" == params.get("start_vm")) vm = env.get_vm(vm_name) domid = vm.get_id() domuuid = vm.get_uuid() bef_pid = commands.getoutput("pidof -s qemu-kvm") if vm_ref == 'id': vm_ref = domid elif vm_ref == 'uuid': vm_ref = domuuid else: vm_ref = vm_name uri = params.get("virsh_uri") unprivileged_user = params.get('unprivileged_user') if unprivileged_user: if unprivileged_user.count('EXAMPLE'): unprivileged_user = '******' if not libvirt_version.version_compare(1, 1, 1): if params.get('setup_libvirt_polkit') == 'yes': raise error.TestNAError("API acl test not supported in current" " libvirt version.") # change the disk cache to default vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) def change_cache(vmxml, mode): """ Change the cache mode :param vmxml: instance of VMXML :param mode: cache mode you want to change """ devices = vmxml.devices disk_index = devices.index(devices.by_device_tag('disk')[0]) disk = devices[disk_index] disk_driver = disk.driver disk_driver['cache'] = mode disk.driver = disk_driver vmxml.devices = devices vmxml.define() try: change_cache(vmxml_backup.copy(), "default") tmpfile = "/home/%s" % utils_misc.generate_random_string(6) logging.debug("tmpfile is %s", tmpfile) if start_vm: session = vm.wait_for_login() session.cmd("rm -rf %s && sync" % tmpfile) status = session.get_command_status("touch %s && ls %s" % (tmpfile, tmpfile)) if status == 0: logging.info("Succeed generate file %s", tmpfile) else: raise error.TestFail("Touch command failed!") # record the pid before reset for compare output = virsh.reset(vm_ref, readonly=readonly, unprivileged_user=unprivileged_user, uri=uri, ignore_status=True, debug=True) if output.exit_status != 0: if status_error: logging.info("Failed to reset guest as expected, Error:%s.", output.stderr) return else: raise error.TestFail("Failed to reset guest, Error:%s." % output.stderr) elif status_error: raise error.TestFail("Expect fail, but succeed indeed.") session.close() session = vm.wait_for_login() status = session.get_command_status("ls %s" % tmpfile) if status == 0: raise error.TestFail("Fail to reset guest, tmpfile still exist!") else: aft_pid = commands.getoutput("pidof -s qemu-kvm") if bef_pid == aft_pid: logging.info("Succeed to check reset, tmpfile is removed.") else: raise error.TestFail("Domain pid changed after reset!") session.close() finally: vmxml_backup.sync()
def run(test, params, env): """ Test virsh domdisplay command, return the graphic url This test covered vnc and spice type, also readonly and readwrite mode If have --include-passwd option, also need to check passwd list in result """ if not virsh.has_help_command('domdisplay'): test.cancel("This version of libvirt doesn't support " "domdisplay test") vm_name = params.get("main_vm", "avocado-vt-vm1") status_error = ("yes" == params.get("status_error", "no")) options = params.get("domdisplay_options", "") graphic = params.get("domdisplay_graphic", "vnc") readonly = ("yes" == params.get("readonly", "no")) passwd = params.get("domdisplay_passwd") is_ssl = ("yes" == params.get("domdisplay_ssl", "no")) is_domid = ("yes" == params.get("domdisplay_domid", "no")) is_domuuid = ("yes" == params.get("domdisplay_domuuid", "no")) qemu_conf = params.get("qemu_conf_file", "/etc/libvirt/qemu.conf") # Do xml backup for final recovery vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) tmp_file = os.path.join(data_dir.get_tmp_dir(), "qemu.conf.bk") if "--type" in options: if not libvirt_version.version_compare(1, 2, 6): test.cancel("--type option is not supported in this" " libvirt version.") elif "vnc" in options and graphic != "vnc" or \ "spice" in options and graphic != "spice": status_error = True def restart_libvirtd(): # make modification effect libvirtd_instance = utils_libvirtd.Libvirtd() libvirtd_instance.restart() def clean_ssl_env(): """ Clean ssl spice connection firstly """ # modify qemu.conf with open(qemu_conf, "r") as f_obj: cont = f_obj.read() # remove the existing setting left_cont = re.sub(r'\s*spice_tls\s*=.*', '', cont) left_cont = re.sub(r'\s*spice_tls_x509_cert_dir\s*=.*', '', left_cont) # write back to origin file with cut left content with open(qemu_conf, "w") as f_obj: f_obj.write(left_cont) def prepare_ssl_env(): """ Do prepare for ssl spice connection """ # modify qemu.conf clean_ssl_env() # Append ssl spice configuration with open(qemu_conf, "a") as f_obj: f_obj.write("spice_tls = 1\n") f_obj.write("spice_tls_x509_cert_dir = \"/etc/pki/libvirt-spice\"") # Generate CA cert utils_misc.create_x509_dir("/etc/pki/libvirt-spice", "/C=IL/L=Raanana/O=Red Hat/CN=my CA", "/C=IL/L=Raanana/O=Red Hat/CN=my server", passwd) os.chmod('/etc/pki/libvirt-spice/server-key.pem', 0o644) os.chmod('/etc/pki/libvirt-spice/ca-key.pem', 0o644) try: graphic_count = len(vmxml_backup.get_graphics_devices()) shutil.copyfile(qemu_conf, tmp_file) if is_ssl: # Do backup for qemu.conf in tmp_file prepare_ssl_env() restart_libvirtd() if graphic_count: Graphics.del_graphic(vm_name) Graphics.add_graphic(vm_name, passwd, "spice", True) else: clean_ssl_env() restart_libvirtd() if graphic_count: Graphics.del_graphic(vm_name) Graphics.add_graphic(vm_name, passwd, graphic) vm = env.get_vm(vm_name) if not vm.is_alive(): vm.start() dom_id = virsh.domid(vm_name).stdout.strip() dom_uuid = virsh.domuuid(vm_name).stdout.strip() if is_domid: vm_name = dom_id if is_domuuid: vm_name = dom_uuid # Do test result = virsh.domdisplay(vm_name, options, readonly=readonly, debug=True) logging.debug("result is %s", result) if result.exit_status: if not status_error: test.fail("Fail to get domain display info. Error:" "%s." % result.stderr.strip()) else: logging.info( "Get domain display info failed as expected. " "Error:%s.", result.stderr.strip()) return elif status_error: test.fail("Expect fail, but succeed indeed!") output = result.stdout.strip() # Different result depends on the domain xml listen address if output.find("localhost:") >= 0: expect_addr = "localhost" else: expect_addr = "127.0.0.1" # Get active domain xml info vmxml_act = vm_xml.VMXML.new_from_dumpxml(vm_name, "--security-info") logging.debug("xml is %s", vmxml_act.get_xmltreefile()) graphics = vmxml_act.devices.by_device_tag('graphics') for graph in graphics: if graph.type_name == graphic: graphic_act = graph port = graph.port # Do judgement for result if graphic == "vnc": expect = "vnc://%s:%s" % (expect_addr, str(int(port) - 5900)) elif graphic == "spice" and is_ssl: tlsport = graphic_act.tlsPort expect = "spice://%s:%s?tls-port=%s" % \ (expect_addr, port, tlsport) elif graphic == "spice": expect = "spice://%s:%s" % (expect_addr, port) if options == "--include-password" and passwd is not None: # have --include-passwd and have passwd in xml if graphic == "vnc": expect = "vnc://:%s@%s:%s" % \ (passwd, expect_addr, str(int(port)-5900)) elif graphic == "spice" and is_ssl: expect = expect + "&password="******"spice": expect = expect + "?password="******"Get correct display:%s", output) else: test.fail("Expect %s, but get %s" % (expect, output)) finally: # qemu.conf recovery shutil.move(tmp_file, qemu_conf) restart_libvirtd() # Domain xml recovery vmxml_backup.sync()
def run_virsh_snapshot_dumpxml(test, params, env): """ Test snapshot-dumpxml command, make sure that the xml you get is correct Test scenaries: 1. live snapshot dump 2. shutoff snapshot dump 3. dumpxml with security info 4. readonly mode """ if not virsh.has_help_command('snapshot-dumpxml'): raise error.TestNAError("This version of libvirt does not support " "the snapshot-dumpxml test") vm_name = params.get("main_vm") status_error = params.get("status_error", "no") passwd = params.get("snapshot_passwd") secu_opt = params.get("snapshot_secure_option") desc_opt = params.get("snapshot_desc_option") mem_opt = params.get("snapshot_mem_option") disk_opt = params.get("disk_only_snap") snap_name = params.get("snapshot_name", "snap_test") readonly = params.get("readonly", False) try: snap_opt = "" opt_dict = {} # collect all the parameters at one time opt_name = locals() for opt in ["snap_name", "desc_opt", "mem_opt", "disk_opt"]: if opt_name[opt] is not None: # Integrate snapshot create options snap_opt = snap_opt + " " + opt_name[opt] # Do xml backup for final recovery vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) # Add passwd in guest graphics if passwd is not None: vm = env.get_vm(vm_name) if vm.is_alive(): vm.destroy() vm_xml.VMXML.add_security_info( vm_xml.VMXML.new_from_dumpxml(vm_name), passwd) vm.start() if secu_opt is not None: opt_dict['passwd'] = passwd logging.debug("snapshot create options are %s", snap_opt) # Get state to do snapshot xml state check dom_state = virsh.domstate(vm_name).stdout.strip() # Create disk snapshot before all to make the origin image clean virsh.snapshot_create_as(vm_name, "--disk-only") # Create snapshot with options snapshot_result = virsh.snapshot_create_as(vm_name, snap_opt, readonly=readonly) if snapshot_result.exit_status: if status_error == "no": raise error.TestFail("Failed to create snapshot. Error:%s." % snapshot_result.stderr.strip()) elif status_error == "yes": logging.info("Create snapshot failed as expected, Error:%s.", snapshot_result.stderr.strip()) return ctime = get_snap_createtime(vm_name, snap_name) # Run virsh command for snapshot-dumpxml dumpxml_result = virsh.snapshot_dumpxml(vm_name, snap_name, secu_opt) if dumpxml_result.exit_status: if status_error == "no": raise error.TestFail("Failed to dump snapshot xml. Error:%s." % dumpxml_result.stderr.strip()) elif status_error == "yes": logging.info("Dumpxml snapshot failed as expected, Error:%s.", dumpxml_result.stderr.strip()) return # Record all the parameters in dict at one time check_name = locals() for var in [ "vm_name", "snap_name", "desc_opt", "dom_state", "ctime", "disk_opt" ]: if check_name[var] is not None: opt_dict[var] = check_name[var] logging.debug("opt_dict is %s", opt_dict) output = dumpxml_result.stdout.strip() snapshot_dumpxml_check(output, opt_dict) finally: # Recovery utils_test.libvirt.clean_up_snapshots(vm_name) vmxml_backup.sync("--snapshots-metadata")
def run(test, params, env): """ Test the command virsh vcpupin (1) Get the host and guest cpu count (2) Call virsh vcpupin for each vcpu with pinning of each cpu (3) Check whether the virsh vcpupin has pinned the respective vcpu to cpu """ def affinity_from_vcpuinfo(vm_name, vcpu): """ This function returns list of the vcpu's affinity from virsh vcpuinfo output :param vm_name: VM Name to operate on :param vcpu: vcpu number for which the affinity is required """ output = virsh.vcpuinfo(vm_name).stdout.rstrip() affinity = re.findall('CPU Affinity: +[-y]+', output) total_affinity = affinity[int(vcpu)].split()[-1].strip() actual_affinity = list(total_affinity) return actual_affinity def affinity_from_vcpupin(vm_name, vcpu): """ This function returns list of vcpu's affinity from vcpupin output :param vm_name: VM Name :param vcpu: VM cpu pid :return : list of affinity to vcpus """ total_cpu = process.run("ls -d /sys/devices/system/cpu/cpu[0-9]* |wc -l", shell=True).stdout.strip() vcpus_affinity = {} output = virsh.vcpupin(vm_name).stdout for item in output.split('\n')[2:-2]: vcpus_affinity[item.split(':')[0].strip()] = item.split(':')[1].strip() return utils_test.libvirt.cpus_string_to_affinity_list( vcpus_affinity[str(vcpu)], int(total_cpu)) def check_vcpupin(vm_name, vcpu, cpu_list, pid, vcpu_pid): """ This function checks the actual and the expected affinity of given vcpu and raises error if not matchs :param vm_name: VM Name to operate on :param vcpu: vcpu number for which the affinity is required :param cpu: cpu details for the affinity :param pid: VM pid :param vcpu: VM cpu pid """ total_cpu = process.run("ls -d /sys/devices/system/cpu/cpu[0-9]* |wc -l", shell=True).stdout.strip() logging.debug("Debug: cpulist %s", cpu_list) expected_output = utils_test.libvirt.cpus_string_to_affinity_list( cpu_list, int(total_cpu)) logging.debug("Expected affinity: %s", expected_output) # Check for affinity value from vcpuinfo output actual_output = affinity_from_vcpuinfo(vm_name, vcpu) logging.debug("Actual affinity in vcpuinfo output: %s", actual_output) if expected_output == actual_output: logging.info("successfully pinned cpu_list: %s --> vcpu: %s", cpu_list, vcpu) else: test.fail("Cpu pinning details not updated properly in" " virsh vcpuinfo command output") # Check for affinity value from vcpupin output actual_output_vcpupin = affinity_from_vcpupin(vm_name, vcpu) logging.debug("Actual affinity in vcpupin output: %s", actual_output_vcpupin) if expected_output == actual_output_vcpupin: logging.info("successfully pinned cpu_list: %s --> vcpu: %s", cpu_list, vcpu) else: test.fail("Cpu pinning details not updated properly in" " virsh vcpupin command output") if pid is None: return # Get the actual cpu affinity value in the proc entry output = utils_test.libvirt.cpu_allowed_list_by_task(pid, vcpu_pid) actual_output_proc = utils_test.libvirt.cpus_string_to_affinity_list( output, int(total_cpu)) logging.debug("Actual affinity in guest proc: %s", actual_output_proc) if expected_output == actual_output_proc: logging.info("successfully pinned vcpu: %s --> cpu: %s" " in respective proc entry", vcpu, cpu_list) else: test.fail("Cpu pinning details are not " "updated properly in /proc/" "%s/task/%s/status" % (pid, vcpu_pid)) def run_and_check_vcpupin(vm, vm_ref, vcpu, cpu_list, options): """ Run the vcpupin command and then check the result. """ if vm_ref == "name": vm_ref = vm.name elif vm_ref == "uuid": vm_ref = vm.get_uuid() # Execute virsh vcpupin command. cmdResult = virsh.vcpupin(vm_ref, vcpu, cpu_list, options, debug=True) if cmdResult.exit_status: if not status_error: # Command fail and it is positive case. test.fail(cmdResult) else: # Command fail and it is negative case. return else: if status_error: # Command success and it is negative case. test.fail(cmdResult) else: # Command success and it is positive case. # "--config" will take effect after VM destroyed. pid = None vcpu_pid = None if options == "--config": virsh.destroy(vm.name) else: pid = vm.get_pid() logging.debug("vcpus_pid: %s", vm.get_vcpus_pid()) vcpu_pid = vm.get_vcpus_pid()[vcpu] # Check the result of vcpupin command. check_vcpupin(vm.name, vcpu, cpu_list, pid, vcpu_pid) def offline_pin_and_check(vm, vcpu, cpu_list): """ Edit domain xml to pin vcpu and check the result. """ cputune = vm_xml.VMCPUTuneXML() cputune.vcpupins = [{'vcpu': str(vcpu), 'cpuset': cpu_list}] vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm.name) vmxml.cputune = cputune vmxml.sync() utils_misc.wait_for(lambda: vm.state() == "shut off", 10) cmdResult = virsh.start(vm.name, debug=True) libvirt.check_exit_status(cmdResult, status_error) pid = vm.get_pid() vcpu_pid = vm.get_vcpus_pid()[vcpu] check_vcpupin(vm.name, vcpu, cpu_list, pid, vcpu_pid) if not virsh.has_help_command('vcpucount'): test.cancel("This version of libvirt doesn't" " support this test") vm_name = params.get("main_vm", "avocado-vt-vm1") vm = env.get_vm(vm_name) # Get the variables for vcpupin command. vm_ref = params.get("vcpupin_vm_ref", "name") options = params.get("vcpupin_options", "--current") cpu_list = params.get("vcpupin_cpu_list", "x") start_vm = ("yes" == params.get("start_vm", "yes")) vcpupin_initial = ("yes" == params.get("vcpupin_initial", "no")) # Get status of this case. status_error = ("yes" == params.get("status_error", "no")) # Edit domain xml to pin vcpus offline_pin = ("yes" == params.get("offline_pin", "no")) # Backup for recovery. vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) vmxml_backup = vmxml.copy() if start_vm and vm.state() == "shut off": cmdResult = virsh.start(vm.name, debug=True) libvirt.check_exit_status(cmdResult, status_error) # Get the guest vcpu count if offline_pin: vcpucount_option = "--config --active" else: vcpucount_option = "--live --active" guest_vcpu_count = virsh.vcpucount(vm_name, vcpucount_option).stdout.strip() # Find the alive cpus list cpus_list = map(str, cpuutils.cpu_online_list()) logging.info("Active cpus in host are %s", cpus_list) try: # Control multi domain vcpu affinity multi_dom = ("yes" == params.get("multi_dom_pin", "no")) vm2 = None # Before doing any vcpupin actions, lets check whether # initial pinning state is fine if vcpupin_initial: pid = vm.get_pid() logging.debug("vcpus_pid: %s vcpu count: %s", vm.get_vcpus_pid(), guest_vcpu_count) for vcpu in range(int(guest_vcpu_count)): vcpu_pid = vm.get_vcpus_pid()[vcpu] # Check the result of vcpupin command. check_vcpupin(vm.name, vcpu, str(','.join(cpus_list)), pid, vcpu_pid) return if multi_dom: vm_names = params.get("vms").split() if len(vm_names) > 1: vm2 = env.get_vm(vm_names[1]) else: test.error("Need more than one domains") if not vm2: test.cancel("No %s find" % vm_names[1]) vm2.destroy() vm2xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm2.name) vm2xml_backup = vm2xml.copy() # Make sure vm2 has the same cpu numbers with vm vm2xml.set_vm_vcpus(vm2.name, int(guest_vcpu_count), guest_vcpu_count) if start_vm: vm2.start() # Run cases when guest is shutoff. if not offline_pin: if vm.is_dead() and not start_vm: run_and_check_vcpupin(vm, vm_ref, 0, 0, "") return # Get the host cpu count host_online_cpu_count = len(cpus_list) online_cpu_max = max(map(int, cpus_list)) host_cpu_count = cpuutils.total_cpus_count() cpu_max = int(host_cpu_count) - 1 if (host_online_cpu_count < 2) and (not cpu_list == "x"): test.cancel("We need more cpus on host in this " "case for the cpu_list=%s. But " "current number of cpu on host is %s." % (cpu_list, host_online_cpu_count)) # Run test case for vcpu in range(int(guest_vcpu_count)): if cpu_list == "x": for cpu in cpus_list: left_cpus = "0-%s,^%s" % (online_cpu_max, cpu) if offline_pin: offline_pin_and_check(vm, vcpu, str(cpu)) if multi_dom: offline_pin_and_check(vm2, vcpu, left_cpus) else: run_and_check_vcpupin(vm, vm_ref, vcpu, str(cpu), options) if multi_dom: run_and_check_vcpupin(vm2, "name", vcpu, left_cpus, options) else: if cpu_list == "x-y": cpus = "0-%s" % online_cpu_max elif cpu_list == "x,y": cpus = ','.join(random.sample(cpus_list, 2)) logging.info(cpus) elif cpu_list == "x-y,^z": cpus = "0-%s,^%s" % (online_cpu_max, online_cpu_max) elif cpu_list == "r": cpus = "r" elif cpu_list == "-1": cpus = "-1" elif cpu_list == "out_of_max": cpus = str(cpu_max + 1) else: test.cancel("Cpu_list=%s is not recognized." % cpu_list) if offline_pin: offline_pin_and_check(vm, vcpu, cpus) else: run_and_check_vcpupin(vm, vm_ref, vcpu, cpus, options) finally: # Recover xml of vm. vmxml_backup.sync() if vm2: vm2xml_backup.sync()
def run_virsh_vcpupin(test, params, env): """ Test the command virsh vcpupin (1) Get the host and guest cpu count (2) Call virsh vcpupin for each vcpu with pinning of each cpu (3) Check whether the virsh vcpupin has pinned the respective vcpu to cpu """ def affinity_from_vcpuinfo(domname, vcpu): """ This function returns list of the vcpu's affinity from virsh vcpuinfo output :param domname: VM Name to operate on :param vcpu: vcpu number for which the affinity is required """ output = virsh.vcpuinfo(domname).stdout.rstrip() affinity = re.findall('CPU Affinity: +[-y]+', output) total_affinity = affinity[int(vcpu)].split()[-1].strip() actual_affinity = list(total_affinity) return actual_affinity def check_vcpupin(domname, vcpu, cpu_list, pid): """ This function checks the actual and the expected affinity of given vcpu and raises error if not matchs :param domname: VM Name to operate on :param vcpu: vcpu number for which the affinity is required :param cpu: cpu details for the affinity """ expected_output = utils_test.libvirt.cpus_string_to_affinity_list( cpu_list, host_cpu_count) actual_output = affinity_from_vcpuinfo(domname, vcpu) if expected_output == actual_output: logging.info("successfully pinned cpu_list: %s --> vcpu: %s", cpu_list, vcpu) else: raise error.TestFail("Command 'virsh vcpupin %s %s %s'not " "succeeded, cpu pinning details not " "updated properly in virsh vcpuinfo " "command output" % (vm_name, vcpu, cpu_list)) if pid is None: return # Get the vcpus pid vcpus_pid = vm.get_vcpus_pid() vcpu_pid = vcpus_pid[vcpu] # Get the actual cpu affinity value in the proc entry output = utils_test.libvirt.cpu_allowed_list_by_task(pid, vcpu_pid) actual_output_proc = utils_test.libvirt.cpus_string_to_affinity_list( output, host_cpu_count) if expected_output == actual_output_proc: logging.info("successfully pinned cpu: %s --> vcpu: %s" " in respective proc entry", cpu_list, vcpu) else: raise error.TestFail("Command 'virsh vcpupin %s %s %s'not " "succeeded cpu pinning details not " "updated properly in /proc/%s/task/%s/status" % (vm_name, vcpu, cpu_list, pid, vcpu_pid)) def run_and_check_vcpupin(vm_name, vcpu, cpu_list, options, pid): """ Run the vcpupin command and then check the result. """ # Execute virsh vcpupin command. cmdResult = virsh.vcpupin(vm_name, vcpu, cpu_list, options) if cmdResult.exit_status: if not status_error: # Command fail and it is in positive case. raise error.TestFail(cmdResult) else: # Command fail and it is in negative case. return else: if status_error: # Command success and it is in negative case. raise error.TestFail(cmdResult) else: # Command success and it is in positive case. # "--config" will take effect after VM destroyed. if options == "--config": virsh.destroy(vm_name) pid = None # Check the result of vcpupin command. check_vcpupin(vm_name, vcpu, cpu_list, pid) if not virsh.has_help_command('vcpucount'): raise error.TestNAError("This version of libvirt doesn't" " support this test") # Get the vm name, pid of vm and check for alive vm_name = params.get("main_vm", "virt-tests-vm1") vm = env.get_vm(vm_name) pid = vm.get_pid() # Get the variables for vcpupin command. args = params.get("vcpupin_args", "dom_name") if args == "dom_name": args = vm_name options = params.get("vcpupin_options", "--current") cpu_list = params.get("vcpupin_cpu_list", "x") # Get status of this case. status_error = ("yes" == params.get("status_error", "no")) # Run cases when guest is shutoff. if vm.is_dead() and (params.get("start_vm") == "no"): run_and_check_vcpupin(args, 0, 0, "", 0) return # Get the host cpu count host_cpu_count = utils.count_cpus() if (int(host_cpu_count) < 2) and (not cpu_list == "x"): raise error.TestNAError("We need more cpus on host in this case " "for the cpu_list=%s. But current number of " "cpu on host is %s." % (cpu_list, host_cpu_count)) # Get the guest vcpu count guest_vcpu_count = virsh.vcpucount(vm_name, "--live --active").stdout.strip() # Run test case for vcpu in range(int(guest_vcpu_count)): if cpu_list == "x": for cpu in range(int(host_cpu_count)): run_and_check_vcpupin(args, vcpu, str(cpu), options, pid) else: cpu_max = int(host_cpu_count) - 1 if cpu_list == "x-y": cpus = "0-%s" % cpu_max elif cpu_list == "x,y": cpus = "0,%s" % cpu_max elif cpu_list == "x-y,^z": cpus = "0-%s,^%s" % (cpu_max, cpu_max) elif cpu_list == "r": cpus = "r" elif cpu_list == "-1": cpus = "-1" elif cpu_list == "out_of_max": cpus = str(cpu_max + 1) else: raise error.TestNAError("Cpu_list=%s is not recognized." % cpu_list) run_and_check_vcpupin(args, vcpu, cpus, options, pid)
def run(test, params, env): """ Test virsh domdisplay command, return the graphic url This test covered vnc and spice type, also readonly and readwrite mode If have --include-passwd option, also need to check passwd list in result """ if not virsh.has_help_command('domdisplay'): raise error.TestNAError("This version of libvirt doesn't support " "domdisplay test") vm_name = params.get("main_vm", "virt-tests-vm1") status_error = ("yes" == params.get("status_error", "no")) options = params.get("domdisplay_options", "") graphic = params.get("domdisplay_graphic", "vnc") readonly = ("yes" == params.get("readonly", "no")) passwd = params.get("domdisplay_passwd") is_ssl = ("yes" == params.get("domdisplay_ssl", "no")) is_domid = ("yes" == params.get("domdisplay_domid", "no")) is_domuuid = ("yes" == params.get("domdisplay_domuuid", "no")) qemu_conf = params.get("qemu_conf_file", "/etc/libvirt/qemu.conf") # Do xml backup for final recovery vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) tmp_file = os.path.join(test.tmpdir, "qemu.conf.bk") def prepare_ssl_env(): """ Do prepare for ssl spice connection """ # modify qemu.conf f_obj = open(qemu_conf, "r") cont = f_obj.read() # remove the existing setting left_cont = re.sub(r'\s*spice_tls\s*=.*', '', cont) left_cont = re.sub(r'\s*spice_tls_x509_cert_dir\s*=.*', '', left_cont) # write back to origin file with cut left content f_obj = open(qemu_conf, "w") f_obj.write(left_cont) f_obj.write("spice_tls = 1\n") f_obj.write("spice_tls_x509_cert_dir = \"/etc/pki/libvirt-spice\"") f_obj.close() # make modification effect utils_libvirtd.libvirtd_restart() # Generate CA cert utils_misc.create_x509_dir("/etc/pki/libvirt-spice", "/C=IL/L=Raanana/O=Red Hat/CN=my CA", "/C=IL/L=Raanana/O=Red Hat/CN=my server", passwd) try: if is_ssl: # Do backup for qemu.conf in tmp_file shutil.copyfile(qemu_conf, tmp_file) prepare_ssl_env() Graphics.del_graphic(vm_name) Graphics.add_ssl_spice_graphic(vm_name, passwd) else: # Only change graphic type and passwd Graphics.change_graphic_type_passwd(vm_name, graphic, passwd) vm = env.get_vm(vm_name) if not vm.is_alive(): vm.start() dom_id = virsh.domid(vm_name).stdout.strip() dom_uuid = virsh.domuuid(vm_name).stdout.strip() if is_domid: vm_name = dom_id if is_domuuid: vm_name = dom_uuid # Do test result = virsh.domdisplay(vm_name, options, readonly=readonly, debug=True) logging.debug("result is %s", result) if result.exit_status: if not status_error: raise error.TestFail("Fail to get domain display info. Error:" "%s." % result.stderr.strip()) else: logging.info( "Get domain display info failed as expected. " "Error:%s.", result.stderr.strip()) return elif status_error: raise error.TestFail("Expect fail, but succeed indeed!") output = result.stdout.strip() # Different result depends on the domain xml listen address if output.find("localhost:") >= 0: expect_addr = "localhost" else: expect_addr = "127.0.0.1" # Get active domain xml info vmxml_act = vm_xml.VMXML.new_from_dumpxml(vm_name, "--security-info") logging.debug("xml is %s", vmxml_act.get_xmltreefile()) graphic_act = vmxml_act.devices.by_device_tag('graphics')[0] port = graphic_act.port # Do judgement for result if graphic == "vnc": expect = "vnc://%s:%s" % (expect_addr, str(int(port) - 5900)) elif graphic == "spice" and is_ssl: tlsport = graphic_act.tlsPort expect = "spice://%s:%s?tls-port=%s" % \ (expect_addr, port, tlsport) elif graphic == "spice": expect = "spice://%s:%s" % (expect_addr, port) if options != "" and passwd is not None: # have --include-passwd and have passwd in xml if graphic == "vnc": expect = "vnc://:%s@%s:%s" % \ (passwd, expect_addr, str(int(port)-5900)) elif graphic == "spice" and is_ssl: expect = expect + "&password="******"spice": expect = expect + "?password="******"Get correct display:%s", output) else: raise error.TestFail("Expect %s, but get %s", expect, output) finally: # Domain xml recovery vmxml_backup.sync() if is_ssl: # qemu.conf recovery shutil.move(tmp_file, qemu_conf) utils_libvirtd.libvirtd_restart()
def run(test, params, env): """ Test command: migrate-compcache <domain> [--size <number>] 1) Run migrate-compcache command and check return code. """ vm_ref = params.get("vm_ref", "name") vm_name = params.get('main_vm') start_vm = 'yes' == params.get('start_vm', 'yes') pause_vm = 'yes' == params.get('pause_after_start_vm', 'no') expect_succeed = 'yes' == params.get('expect_succeed', 'yes') size_option = params.get('size_option', 'valid') action = params.get('compcache_action', 'get') vm = env.get_vm(vm_name) # Check if the virsh command migrate-compcache is available if not virsh.has_help_command('migrate-compcache'): raise error.TestNAError("This version of libvirt does not support " "virsh command migrate-compcache") # Prepare the VM state if it's not correct. if start_vm and not vm.is_alive(): vm.start() elif not start_vm and vm.is_alive(): vm.destroy() if pause_vm and not vm.is_paused(): vm.pause() # Setup domain reference if vm_ref == 'domname': vm_ref = vm_name # Setup size according to size_option: # minimal: Same as memory page size # maximal: Same as guest memory # empty: An empty string # small: One byte less than page size # large: Larger than guest memory # huge : Largest int64 page_size = get_page_size() if size_option == 'minimal': size = str(page_size) elif size_option == 'maximal': size = str(vm.get_max_mem() * 1024) elif size_option == 'empty': size = '""' elif size_option == 'small': size = str(page_size - 1) elif size_option == 'large': # Guest memory is larger than the max mem set, # add 50MB to ensure size exceeds guest memory. size = str(vm.get_max_mem() * 1024 + 50000000) elif size_option == 'huge': size = str(2 ** 64 - 1) else: size = size_option # If we need to get, just omit the size option if action == 'get': size = None # Run testing command result = virsh.migrate_compcache(vm_ref, size=size) logging.debug(result) # Shut down the VM to make sure the compcache setting cleared if vm.is_alive(): vm.destroy() # Check test result if expect_succeed: if result.exit_status != 0: raise error.TestFail( 'Expected succeed, but failed with result:\n%s' % result) elif expect_succeed: if result.exit_status == 0: raise error.TestFail( 'Expected fail, but succeed with result:\n%s' % result)
def run(test, params, env): """ Test snapshot-create-as command Make sure that the clean repo can be used because qemu-guest-agent need to be installed in guest The command create a snapshot (disk and RAM) from arguments which including the following point * virsh snapshot-create-as --print-xml --diskspec --name --description * virsh snapshot-create-as --print-xml with multi --diskspec * virsh snapshot-create-as --print-xml --memspec * virsh snapshot-create-as --description * virsh snapshot-create-as --no-metadata * virsh snapshot-create-as --no-metadata --print-xml (negative test) * virsh snapshot-create-as --atomic --disk-only * virsh snapshot-create-as --quiesce --disk-only (positive and negative) * virsh snapshot-create-as --reuse-external * virsh snapshot-create-as --disk-only --diskspec * virsh snapshot-create-as --memspec --reuse-external --atomic(negative) * virsh snapshot-create-as --disk-only and --memspec (negative) * Create multi snapshots with snapshot-create-as * Create snapshot with name a--a a--a--snap1 """ if not virsh.has_help_command('snapshot-create-as'): raise error.TestNAError("This version of libvirt does not support " "the snapshot-create-as test") vm_name = params.get("main_vm") status_error = params.get("status_error", "no") options = params.get("snap_createas_opts") multi_num = params.get("multi_num", "1") diskspec_num = params.get("diskspec_num", "1") bad_disk = params.get("bad_disk") external_disk = params.get("external_disk") start_ga = params.get("start_ga", "yes") domain_state = params.get("domain_state") memspec_opts = params.get("memspec_opts") diskspec_opts = params.get("diskspec_opts") opt_names = locals() if memspec_opts is not None: mem_options = compose_disk_options(test, params, memspec_opts) # if the parameters have the disk without "file=" then we only need to # add testdir for it. if mem_options is None: mem_options = os.path.join(test.virtdir, memspec_opts) options += " --memspec " + mem_options tag_diskspec = 0 dnum = int(diskspec_num) if diskspec_opts is not None: tag_diskspec = 1 opt_names['diskopts_1'] = diskspec_opts # diskspec_opts[n] is used in cfg when more than 1 --diskspec is used if dnum > 1: tag_diskspec = 1 for i in range(1, dnum + 1): opt_names["diskopts_%s" % i] = params.get("diskspec_opts%s" % i) if tag_diskspec == 1: for i in range(1, dnum + 1): disk_options = compose_disk_options(test, params, opt_names["diskopts_%s" % i]) options += " --diskspec " + disk_options logging.debug("options are %s", options) vm = env.get_vm(vm_name) option_dict = {} option_dict = utils_misc.valued_option_dict(options, r' --(?!-)') logging.debug("option_dict is %s", option_dict) # A backup of original vm vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) logging.debug("original xml is %s", vmxml_backup) # Generate empty image for negative test if bad_disk is not None: bad_disk = os.path.join(test.virtdir, bad_disk) os.open(bad_disk, os.O_RDWR | os.O_CREAT) # Generate external disk if external_disk is not None: external_disk = os.path.join(test.virtdir, external_disk) commands.getoutput("qemu-img create -f qcow2 %s 1G" % external_disk) try: # Start qemu-ga on guest if have --quiesce if options.find("quiesce") >= 0: if vm.is_alive(): vm.destroy() virt_xml_obj = libvirt_xml.VMXML(virsh_instance=virsh) virt_xml_obj.set_agent_channel(vm_name) vm.start() if start_ga == "yes": session = vm.wait_for_login() # Check if qemu-ga already started automatically cmd = "rpm -q qemu-guest-agent || yum install -y qemu-guest-agent" stat_install = session.cmd_status(cmd, 300) if stat_install != 0: raise error.TestFail("Fail to install qemu-guest-agent, make" "sure that you have usable repo in guest") # Check if qemu-ga already started stat_ps = session.cmd_status("ps aux |grep [q]emu-ga") if stat_ps != 0: session.cmd("qemu-ga -d") # Check if the qemu-ga really started stat_ps = session.cmd_status("ps aux |grep [q]emu-ga") if stat_ps != 0: raise error.TestFail("Fail to run qemu-ga in guest") if domain_state == "paused": virsh.suspend(vm_name) # Record the previous snapshot-list snaps_before = virsh.snapshot_list(vm_name) # Run virsh command # May create several snapshots, according to configuration for count in range(int(multi_num)): cmd_result = virsh.snapshot_create_as(vm_name, options, ignore_status=True, debug=True) output = cmd_result.stdout.strip() status = cmd_result.exit_status # check status_error if status_error == "yes": if status == 0: raise error.TestFail("Run successfully with wrong command!") else: # Check memspec file should be removed if failed if (options.find("memspec") >= 0 and options.find("atomic") >= 0): if os.path.isfile(option_dict['memspec']): os.remove(option_dict['memspec']) raise error.TestFail("Run failed but file %s exist" % option_dict['memspec']) else: logging.info("Run failed as expected and memspec file" " already beed removed") else: logging.info("Run failed as expected") elif status_error == "no": if status != 0: raise error.TestFail("Run failed with right command: %s" % output) else: # Check the special options snaps_list = virsh.snapshot_list(vm_name) logging.debug("snaps_list is %s", snaps_list) check_snapslist(vm_name, options, option_dict, output, snaps_before, snaps_list) finally: # Environment clean if options.find("quiesce") >= 0 and start_ga == "yes": session.cmd("rpm -e qemu-guest-agent") # recover domain xml xml_recover(vmxml_backup) path = "/var/lib/libvirt/qemu/snapshot/" + vm_name if os.path.isfile(path): raise error.TestFail("Still can find snapshot metadata") # rm bad disks if bad_disk is not None: os.remove(bad_disk)
def run(test, params, env): """ Test send-key command, include all types of codeset and sysrq For normal sendkey test, we create a file to check the command execute by send-key. For sysrq test, check the /var/log/messages in RHEL or /var/log/syslog in Ubuntu and guest status """ if not virsh.has_help_command('send-key'): test.cancel("This version of libvirt does not support the send-key " "test") vm_name = params.get("main_vm", "avocado-vt-vm1") status_error = ("yes" == params.get("status_error", "no")) keystrokes = params.get("sendkey", "") codeset = params.get("codeset", "") holdtime = params.get("holdtime", "") sysrq_test = ("yes" == params.get("sendkey_sysrq", "no")) sleep_time = int(params.get("sendkey_sleeptime", 5)) readonly = params.get("readonly", False) username = params.get("username") password = params.get("password") create_file = params.get("create_file_name") uri = params.get("virsh_uri") simultaneous = params.get("sendkey_simultaneous", "yes") == "yes" unprivileged_user = params.get('unprivileged_user') if unprivileged_user: if unprivileged_user.count('EXAMPLE'): unprivileged_user = '******' if not libvirt_version.version_compare(1, 1, 1): if params.get('setup_libvirt_polkit') == 'yes': test.cancel("API acl test not supported in current libvirt " "version.") def send_line(send_str): """ send string to guest with send-key and end with Enter """ for send_ch in list(send_str): virsh.sendkey(vm_name, "KEY_%s" % send_ch.upper(), ignore_status=False) virsh.sendkey(vm_name, "KEY_ENTER", ignore_status=False) vm = env.get_vm(vm_name) vm.wait_for_login().close() # Boot the guest in text only mode so that send-key commands would succeed # in creating a file try: utils_test.update_boot_option(vm, args_added="3") except Exception as info: test.error(info) session = vm.wait_for_login() if sysrq_test: # In postprocess of previous testcase would pause and resume the VM # that would change the domstate to running (unpaused) and cause # sysrq reboot testcase to fail as the domstate persist across reboot # so it is better to destroy and start VM before the test starts if "KEY_B" in keystrokes: cmd_result = virsh.domstate(vm_name, '--reason', ignore_status=True) if "unpaused" in cmd_result.stdout.strip(): vm.destroy() vm.start() session = vm.wait_for_login() LOG_FILE = "/var/log/messages" if "ubuntu" in vm.get_distro().lower(): LOG_FILE = "/var/log/syslog" # Is 'rsyslog' installed on guest? It'll be what writes out # to LOG_FILE if not utils_package.package_install("rsyslog", session): test.fail("Fail to install rsyslog, make sure that you have " "usable repo in guest") # clear messages, restart rsyslog, and make sure it's running session.cmd("echo '' > %s" % LOG_FILE) session.cmd("service rsyslog restart") ps_stat = session.cmd_status("ps aux |grep rsyslog") if ps_stat != 0: test.fail("rsyslog is not running in guest") # enable sysrq session.cmd("echo 1 > /proc/sys/kernel/sysrq") # make sure the environment is clear if create_file is not None: session.cmd("rm -rf %s" % create_file) try: # wait for tty started tty_stat = "ps aux|grep tty" timeout = 60 while timeout >= 0 and \ session.get_command_status(tty_stat) != 0: time.sleep(1) timeout = timeout - 1 if timeout < 0: test.fail("Can not wait for tty started in 60s") # send user and passwd to guest to login send_line(username) time.sleep(2) send_line(password) time.sleep(2) if sysrq_test or simultaneous: output = virsh.sendkey(vm_name, keystrokes, codeset=codeset, holdtime=holdtime, readonly=readonly, unprivileged_user=unprivileged_user, uri=uri) else: # If multiple keycodes are specified, they are all sent # simultaneously to the guest, and they may be received # in random order. If you need distinct keypresses, you # must use multiple send-key invocations. for keystroke in keystrokes.split(): output = virsh.sendkey(vm_name, keystroke, codeset=codeset, holdtime=holdtime, readonly=readonly, unprivileged_user=unprivileged_user, uri=uri) if output.exit_status: test.fail("Failed to send key %s to guest: %s" % (keystroke, output.stderr)) time.sleep(sleep_time) if output.exit_status != 0: if status_error: logging.info("Failed to sendkey to guest as expected, Error:" "%s.", output.stderr) return else: test.fail("Failed to send key to guest, Error:%s." % output.stderr) elif status_error: test.fail("Expect fail, but succeed indeed.") if create_file is not None: # check if created file exist cmd_ls = "ls %s" % create_file sec_status, sec_output = session.get_command_status_output(cmd_ls) if sec_status == 0: logging.info("Succeed to create file with send key") else: test.fail("Fail to create file with send key, Error:%s" % sec_output) elif sysrq_test: # check LOG_FILE info according to different key # Since there's no guarantee when messages will be written # we'll do a check and wait loop for up to 60 seconds timeout = 60 while timeout >= 0: if "KEY_H" in keystrokes: cmd = "cat %s | grep 'SysRq.*HELP'" % LOG_FILE get_status = session.cmd_status(cmd) elif "KEY_M" in keystrokes: cmd = "cat %s | grep 'SysRq.*Show Memory'" % LOG_FILE get_status = session.cmd_status(cmd) elif "KEY_T" in keystrokes: cmd = "cat %s | grep 'SysRq.*Show State'" % LOG_FILE get_status = session.cmd_status(cmd) # Sometimes SysRq.*Show State string missed in LOG_FILE # as a fall back check for runnable tasks logged if get_status != 0: cmd = "cat %s | grep 'runnable tasks:'" % LOG_FILE get_status = session.cmd_status(cmd) elif "KEY_B" in keystrokes: session = vm.wait_for_login() result = virsh.domstate(vm_name, '--reason', ignore_status=True) output = result.stdout.strip() logging.debug("The guest state: %s", output) if not output.count("booted"): get_status = 1 else: get_status = 0 session.close() if get_status == 0: timeout = -1 else: session.cmd("echo \"virsh sendkey waiting\" >> %s" % LOG_FILE) time.sleep(1) timeout = timeout - 1 if get_status != 0: test.fail("SysRq does not take effect in guest, keystrokes is " "%s" % keystrokes) else: logging.info("Succeed to send SysRq command") else: test.fail("Test cfg file invalid: either sysrq_params or " "create_file_name must be defined") finally: if create_file is not None: session = vm.wait_for_login() session.cmd("rm -rf %s" % create_file) session.close()
def run(test, params, env): """ Test virsh domblkerror in 2 types error 1. unspecified error 2. no space """ if not virsh.has_help_command('domblkerror'): raise error.TestNAError("This version of libvirt does not support " "domblkerror test") vm_name = params.get("main_vm", "avocado-vt-vm1") error_type = params.get("domblkerror_error_type") timeout = params.get("domblkerror_timeout", 240) mnt_dir = params.get("domblkerror_mnt_dir", "/home/test") export_file = params.get("nfs_export_file", "/etc/exports") img_name = params.get("domblkerror_img_name", "libvirt-disk") img_size = params.get("domblkerror_img_size") target_dev = params.get("domblkerror_target_dev", "vdb") pool_name = params.get("domblkerror_pool_name", "fs_pool") vol_name = params.get("domblkerror_vol_name", "vol1") vm = env.get_vm(vm_name) # backup /etc/exports shutil.copyfile(export_file, "%s.bak" % export_file) selinux_bak = "" # backup xml vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) try: # Gerenate tmp dir tmp_dir = data_dir.get_tmp_dir() img_dir = os.path.join(tmp_dir, 'images') if not os.path.exists(img_dir): os.mkdir(img_dir) # Generate attached disk utils.run("qemu-img create %s %s" % (os.path.join(img_dir, img_name), img_size)) # Get unspecified error if error_type == "unspecified error": # In this situation, guest will attach a disk on nfs, stop nfs # service will cause guest paused and get unspecified error nfs_dir = os.path.join(tmp_dir, 'mnt') if not os.path.exists(nfs_dir): os.mkdir(nfs_dir) mount_opt = "rw,no_root_squash,async" res = utils_test.libvirt.setup_or_cleanup_nfs( is_setup=True, mount_dir=nfs_dir, is_mount=False, export_options=mount_opt, export_dir=img_dir) selinux_bak = res["selinux_status_bak"] utils.run("mount -o nolock,soft,timeo=1,retrans=1,retry=0 " "127.0.0.1:%s %s" % (img_dir, nfs_dir)) img_path = os.path.join(nfs_dir, img_name) nfs_service = Factory.create_service("nfs") elif error_type == "no space": # Steps to generate no space block error: # 1. Prepare a iscsi disk and build fs pool with it # 2. Create vol with larger capacity and 0 allocation # 3. Attach this disk in guest # 4. In guest, create large image in the vol, which may cause # guest paused pool_target = os.path.join(tmp_dir, pool_name) _pool_vol = utils_test.libvirt.PoolVolumeTest(test, params) _pool_vol.pre_pool(pool_name, "fs", pool_target, img_name, image_size=img_size) _pool_vol.pre_vol(vol_name, "raw", "100M", "0", pool_name) img_path = os.path.join(pool_target, vol_name) # Generate disk xml # Guest will attach a disk with cache=none and error_policy=stop img_disk = Disk(type_name="file") img_disk.device = "disk" img_disk.source = img_disk.new_disk_source( **{'attrs': { 'file': img_path }}) img_disk.driver = { 'name': "qemu", 'type': "raw", 'cache': "none", 'error_policy': "stop" } img_disk.target = {'dev': target_dev, 'bus': "virtio"} logging.debug("disk xml is %s", img_disk.xml) # Start guest and get session if not vm.is_alive(): vm.start() session = vm.wait_for_login() # Get disk list before operation get_disks_cmd = "fdisk -l|grep '^Disk /dev'|cut -d: -f1|cut -d' ' -f2" bef_list = session.cmd_output(get_disks_cmd).split("\n") # Attach disk to guest ret = virsh.attach_device(domain_opt=vm_name, file_opt=img_disk.xml) if ret.exit_status != 0: raise error.TestFail("Fail to attach device %s" % ret.stderr) time.sleep(2) logging.debug("domain xml is %s", virsh.dumpxml(vm_name)) # get disk list after attach aft_list = session.cmd_output(get_disks_cmd).split("\n") # Find new disk after attach new_disk = "".join(list(set(bef_list) ^ set(aft_list))) logging.debug("new disk is %s", new_disk) def create_large_image(): """ Create large image in guest """ # create partition and file system session.cmd("parted -s %s mklabel msdos" % new_disk) session.cmd("parted -s %s mkpart primary ext3 '0%%' '100%%'" % new_disk) # mount disk and write file in it session.cmd("mkfs.ext3 %s1" % new_disk) session.cmd("mkdir -p %s && mount %s1 %s" % (mnt_dir, new_disk, mnt_dir)) # The following step may cause guest paused before it return try: session.cmd("dd if=/dev/zero of=%s/big_file bs=1024 " "count=51200 && sync" % mnt_dir) except Exception, err: logging.debug("Expected Fail %s" % err) session.close() create_large_image() if error_type == "unspecified error": # umount nfs to trigger error after create large image nfs_service.stop() logging.debug("nfs status is %s", nfs_service.status()) # wait and check the guest status with timeout def _check_state(): """ Check domain state """ return (vm.state() == "paused") if not utils_misc.wait_for(_check_state, timeout): raise error.TestFail("Guest does not paused, it is %s now" % vm.state()) else: logging.info("Now domain state changed to paused status") output = virsh.domblkerror(vm_name) if output.exit_status == 0: expect_result = "%s: %s" % (img_disk.target['dev'], error_type) if output.stdout.strip() == expect_result: logging.info("Get expect result: %s", expect_result) else: raise error.TestFail( "Failed to get expect result, get %s" % output.stdout.strip()) else: raise error.TestFail("Fail to get domblkerror info:%s" % output.stderr)
def run(test, params, env): """ Test snapshot-create-as command Make sure that the clean repo can be used because qemu-guest-agent need to be installed in guest The command create a snapshot (disk and RAM) from arguments which including the following point * virsh snapshot-create-as --print-xml --diskspec --name --description * virsh snapshot-create-as --print-xml with multi --diskspec * virsh snapshot-create-as --print-xml --memspec * virsh snapshot-create-as --description * virsh snapshot-create-as --no-metadata * virsh snapshot-create-as --no-metadata --print-xml (negative test) * virsh snapshot-create-as --atomic --disk-only * virsh snapshot-create-as --quiesce --disk-only (positive and negative) * virsh snapshot-create-as --reuse-external * virsh snapshot-create-as --disk-only --diskspec * virsh snapshot-create-as --memspec --reuse-external --atomic(negative) * virsh snapshot-create-as --disk-only and --memspec (negative) * Create multi snapshots with snapshot-create-as * Create snapshot with name a--a a--a--snap1 """ if not virsh.has_help_command('snapshot-create-as'): raise error.TestNAError("This version of libvirt does not support " "the snapshot-create-as test") vm_name = params.get("main_vm") status_error = params.get("status_error", "no") options = params.get("snap_createas_opts") multi_num = params.get("multi_num", "1") diskspec_num = params.get("diskspec_num", "1") bad_disk = params.get("bad_disk") reuse_external = "yes" == params.get("reuse_external", "no") start_ga = params.get("start_ga", "yes") domain_state = params.get("domain_state") memspec_opts = params.get("memspec_opts") config_format = "yes" == params.get("config_format", "no") snapshot_image_format = params.get("snapshot_image_format") diskspec_opts = params.get("diskspec_opts") create_autodestroy = 'yes' == params.get("create_autodestroy", "no") unix_channel = "yes" == params.get("unix_channel", "yes") dac_denial = "yes" == params.get("dac_denial", "no") check_json_no_savevm = "yes" == params.get("check_json_no_savevm", "no") disk_snapshot_attr = params.get('disk_snapshot_attr', 'external') set_snapshot_attr = "yes" == params.get("set_snapshot_attr", "no") # gluster related params replace_vm_disk = "yes" == params.get("replace_vm_disk", "no") disk_src_protocol = params.get("disk_source_protocol") restart_tgtd = params.get("restart_tgtd", "no") vol_name = params.get("vol_name") tmp_dir = data_dir.get_tmp_dir() pool_name = params.get("pool_name", "gluster-pool") brick_path = os.path.join(tmp_dir, pool_name) uri = params.get("virsh_uri") usr = params.get('unprivileged_user') if usr: if usr.count('EXAMPLE'): usr = '******' if disk_src_protocol == 'iscsi': if not libvirt_version.version_compare(1, 0, 4): raise error.TestNAError("'iscsi' disk doesn't support in" " current libvirt version.") if not libvirt_version.version_compare(1, 1, 1): if params.get('setup_libvirt_polkit') == 'yes': raise error.TestNAError("API acl test not supported in current" " libvirt version.") if not libvirt_version.version_compare(1, 2, 7): # As bug 1017289 closed as WONTFIX, the support only # exist on 1.2.7 and higher if disk_src_protocol == 'gluster': raise error.TestNAError("Snapshot on glusterfs not support in " "current version. Check more info with " "https://bugzilla.redhat.com/buglist.cgi?" "bug_id=1017289,1032370") opt_names = locals() if memspec_opts is not None: mem_options = compose_disk_options(test, params, memspec_opts) # if the parameters have the disk without "file=" then we only need to # add testdir for it. if mem_options is None: mem_options = os.path.join(test.tmpdir, memspec_opts) options += " --memspec " + mem_options tag_diskspec = 0 dnum = int(diskspec_num) if diskspec_opts is not None: tag_diskspec = 1 opt_names['diskopts_1'] = diskspec_opts # diskspec_opts[n] is used in cfg when more than 1 --diskspec is used if dnum > 1: tag_diskspec = 1 for i in range(1, dnum + 1): opt_names["diskopts_%s" % i] = params.get("diskspec_opts%s" % i) if tag_diskspec == 1: for i in range(1, dnum + 1): disk_options = compose_disk_options(test, params, opt_names["diskopts_%s" % i]) options += " --diskspec " + disk_options logging.debug("options are %s", options) vm = env.get_vm(vm_name) option_dict = {} option_dict = utils_misc.valued_option_dict(options, r' --(?!-)') logging.debug("option_dict is %s", option_dict) # A backup of original vm vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) logging.debug("original xml is %s", vmxml_backup) # Generate empty image for negative test if bad_disk is not None: bad_disk = os.path.join(test.tmpdir, bad_disk) os.open(bad_disk, os.O_RDWR | os.O_CREAT) # Generate external disk if reuse_external: disk_path = '' for i in range(dnum): external_disk = "external_disk%s" % i if params.get(external_disk): disk_path = os.path.join(test.tmpdir, params.get(external_disk)) utils.run("qemu-img create -f qcow2 %s 1G" % disk_path) # Only chmod of the last external disk for negative case if dac_denial: utils.run("chmod 500 %s" % disk_path) qemu_conf = None libvirtd_conf = None libvirtd_log_path = None libvirtd = utils_libvirtd.Libvirtd() try: # Config "snapshot_image_format" option in qemu.conf if config_format: qemu_conf = utils_config.LibvirtQemuConfig() qemu_conf.snapshot_image_format = snapshot_image_format logging.debug("the qemu config file content is:\n %s" % qemu_conf) libvirtd.restart() if check_json_no_savevm: libvirtd_conf = utils_config.LibvirtdConfig() libvirtd_conf["log_level"] = '1' libvirtd_conf["log_filters"] = '"1:json 3:remote 4:event"' libvirtd_log_path = os.path.join(test.tmpdir, "libvirtd.log") libvirtd_conf["log_outputs"] = '"1:file:%s"' % libvirtd_log_path logging.debug("the libvirtd config file content is:\n %s" % libvirtd_conf) libvirtd.restart() if replace_vm_disk: libvirt.set_vm_disk(vm, params, tmp_dir) if set_snapshot_attr: if vm.is_alive(): vm.destroy(gracefully=False) vmxml_new = vm_xml.VMXML.new_from_dumpxml(vm_name) disk_xml = vmxml_backup.get_devices(device_type="disk")[0] vmxml_new.del_device(disk_xml) # set snapshot attribute in disk xml disk_xml.snapshot = disk_snapshot_attr new_disk = disk.Disk(type_name='file') new_disk.xmltreefile = disk_xml.xmltreefile vmxml_new.add_device(new_disk) logging.debug("The vm xml now is: %s" % vmxml_new.xmltreefile) vmxml_new.sync() vm.start() # Start qemu-ga on guest if have --quiesce if unix_channel and options.find("quiesce") >= 0: vm.prepare_guest_agent() session = vm.wait_for_login() if start_ga == "no": # The qemu-ga could be running and should be killed session.cmd("kill -9 `pidof qemu-ga`") # Check if the qemu-ga get killed stat_ps = session.cmd_status("ps aux |grep [q]emu-ga") if not stat_ps: # As managed by systemd and set as autostart, qemu-ga # could be restarted, so use systemctl to stop it. session.cmd("systemctl stop qemu-guest-agent") stat_ps = session.cmd_status("ps aux |grep [q]emu-ga") if not stat_ps: raise error.TestNAError("Fail to stop agent in " "guest") if domain_state == "paused": virsh.suspend(vm_name) else: # Remove channel if exist if vm.is_alive(): vm.destroy(gracefully=False) xml_inst = vm_xml.VMXML.new_from_dumpxml(vm_name) xml_inst.remove_agent_channels() vm.start() # Record the previous snapshot-list snaps_before = virsh.snapshot_list(vm_name) # Attach disk before create snapshot if not print xml and multi disks # specified in cfg if dnum > 1 and "--print-xml" not in options: for i in range(1, dnum): disk_path = os.path.join(test.tmpdir, 'disk%s.qcow2' % i) utils.run("qemu-img create -f qcow2 %s 200M" % disk_path) virsh.attach_disk(vm_name, disk_path, 'vd%s' % list(string.lowercase)[i], debug=True) # Run virsh command # May create several snapshots, according to configuration for count in range(int(multi_num)): if create_autodestroy: # Run virsh command in interactive mode vmxml_backup.undefine() vp = virsh.VirshPersistent() vp.create(vmxml_backup['xml'], '--autodestroy') cmd_result = vp.snapshot_create_as(vm_name, options, ignore_status=True, debug=True) vp.close_session() vmxml_backup.define() else: cmd_result = virsh.snapshot_create_as(vm_name, options, unprivileged_user=usr, uri=uri, ignore_status=True, debug=True) # for multi snapshots without specific snapshot name, the # snapshot name is using time string with 1 second # incremental, to avoid get snapshot failure with same name, # sleep 1 second here. if int(multi_num) > 1: time.sleep(1.1) output = cmd_result.stdout.strip() status = cmd_result.exit_status # check status_error if status_error == "yes": if status == 0: raise error.TestFail("Run successfully with wrong command!") else: # Check memspec file should be removed if failed if (options.find("memspec") >= 0 and options.find("atomic") >= 0): if os.path.isfile(option_dict['memspec']): os.remove(option_dict['memspec']) raise error.TestFail("Run failed but file %s exist" % option_dict['memspec']) else: logging.info("Run failed as expected and memspec" " file already been removed") # Check domain xml is not updated if reuse external fail elif reuse_external and dac_denial: output = virsh.dumpxml(vm_name).stdout.strip() if "reuse_external" in output: raise error.TestFail("Domain xml should not be " "updated with snapshot image") else: logging.info("Run failed as expected") elif status_error == "no": if status != 0: raise error.TestFail("Run failed with right command: %s" % output) else: # Check the special options snaps_list = virsh.snapshot_list(vm_name) logging.debug("snaps_list is %s", snaps_list) check_snapslist(vm_name, options, option_dict, output, snaps_before, snaps_list) # For cover bug 872292 if check_json_no_savevm: pattern = "The command savevm has not been found" with open(libvirtd_log_path) as f: for line in f: if pattern in line and "error" in line: raise error.TestFail("'%s' was found: %s" % (pattern, line)) finally: if vm.is_alive(): vm.destroy() # recover domain xml xml_recover(vmxml_backup) path = "/var/lib/libvirt/qemu/snapshot/" + vm_name if os.path.isfile(path): raise error.TestFail("Still can find snapshot metadata") if disk_src_protocol == 'gluster': libvirt.setup_or_cleanup_gluster(False, vol_name, brick_path) libvirtd.restart() if disk_src_protocol == 'iscsi': libvirt.setup_or_cleanup_iscsi(False, restart_tgtd=restart_tgtd) # rm bad disks if bad_disk is not None: os.remove(bad_disk) # rm attach disks and reuse external disks if dnum > 1 and "--print-xml" not in options: for i in range(dnum): disk_path = os.path.join(test.tmpdir, 'disk%s.qcow2' % i) if os.path.exists(disk_path): os.unlink(disk_path) if reuse_external: external_disk = "external_disk%s" % i disk_path = os.path.join(test.tmpdir, params.get(external_disk)) if os.path.exists(disk_path): os.unlink(disk_path) # restore config if config_format and qemu_conf: qemu_conf.restore() if libvirtd_conf: libvirtd_conf.restore() if libvirtd_conf or (config_format and qemu_conf): libvirtd.restart() if libvirtd_log_path and os.path.exists(libvirtd_log_path): os.unlink(libvirtd_log_path)
def run(test, params, env): """ Test domfsthaw command, make sure that all supported options work well Test scenaries: 1. fsthaw fs which has been freezed 2. fsthaw fs which has not been freezed Note: --mountpoint still not supported so will not test here """ if not virsh.has_help_command('domfsthaw'): raise error.TestNAError("This version of libvirt does not support " "the domfsthaw test") vm_name = params.get("main_vm", "virt-tests-vm1") start_vm = ("yes" == params.get("start_vm", "no")) no_freeze = ("yes" == params.get("no_freeze", "yes")) has_qemu_ga = not ("yes" == params.get("no_qemu_ga", "no")) start_qemu_ga = not ("yes" == params.get("no_start_qemu_ga", "no")) status_error = ("yes" == params.get("status_error", "no")) options = params.get("domfsthaw_options", "") vm_ref = params.get("vm_ref", "") # Do backup for origin xml xml_backup = vm_xml.VMXML.new_from_dumpxml(vm_name) try: vm = env.get_vm(vm_name) vm.destroy() if not vm.is_alive(): vm.start() # Firstly, freeze all filesytems if not no_freeze: # Add channel device for qemu-ga vm.prepare_guest_agent() cmd_result = virsh.domfsfreeze(vm_name, debug=True) if cmd_result.exit_status != 0: raise error.TestFail("Fail to do virsh domfsfreeze, error %s" % cmd_result.stderr) if has_qemu_ga: vm.prepare_guest_agent(start=start_qemu_ga) else: # Remove qemu-ga channel vm.prepare_guest_agent(channel=has_qemu_ga, start=False) if start_vm: if not vm.is_alive(): vm.start() else: vm.destroy() if vm_ref == "none": vm_name = " " cmd_result = virsh.domfsthaw(vm_name, options=options, debug=True) if not status_error: if cmd_result.exit_status != 0: raise error.TestFail("Fail to do virsh domfsthaw, error %s" % cmd_result.stderr) else: if cmd_result.exit_status == 0: raise error.TestFail("Command 'virsh domfsthaw' failed ") finally: # Do domain recovery xml_backup.sync()
def run(test, params, env): """ Test virsh cpu-stats command. The command can display domain per-CPU and total statistics. 1. Call virsh cpu-stats [domain] 2. Call virsh cpu-stats [domain] with valid options 3. Call virsh cpu-stats [domain] with invalide options """ if not virsh.has_help_command('cpu-stats'): raise error.TestNAError("This version of libvirt does not support " "the cpu-stats test") vm_name = params.get("main_vm", "vm1") vm_ref = params.get("cpu_stats_vm_ref") status_error = params.get("status_error", "no") options = params.get("cpu_stats_options") logging.debug("options are %s", options) if vm_ref == "name": vm_ref = vm_name # get host cpus num cpus = multiprocessing.cpu_count() logging.debug("host cpu num is %s", cpus) # get options and put into a dict get_total = re.search('total', options) get_start = re.search('start', options) get_count = re.search('count', options) # command without options get_noopt = 0 if not get_total and not get_start and not get_count: get_noopt = 1 # command with only --total option get_totalonly = 0 if not get_start and not get_count and get_total: get_totalonly = 1 option_dict = {} if options.strip(): option_list = options.split('--') logging.debug("option_list is %s", option_list) for match in option_list[1:]: if get_start or get_count: option_dict[match.split(' ')[0]] = match.split(' ')[1] # Run virsh command cmd_result = virsh.cpu_stats(vm_ref, options, ignore_status=True, debug=True) output = cmd_result.stdout.strip() status = cmd_result.exit_status # check status_error if status_error == "yes": if status == 0: raise error.TestFail("Run successfully with wrong command!") elif status_error == "no": if status != 0: raise error.TestFail("Run failed with right command") else: # Get cgroup cpu_time if not get_totalonly: vm = env.get_vm(vm_ref) cgpath = utils_cgroup.resolve_task_cgroup_path( vm.get_pid(), "cpuacct") # When a VM has an 'emulator' child cgroup present, we must # strip off that suffix when detecting the cgroup for a machine if os.path.basename(cgpath) == "emulator": cgpath = os.path.dirname(cgpath) usage_file = os.path.join(cgpath, "cpuacct.usage_percpu") cgtime = file(usage_file).read().strip().split() logging.debug("cgtime get is %s", cgtime) # Cut CPUs from output and format to list output = re.sub(r'\.', '', output) if get_total: mt_start = re.search('Total', output).start() else: mt_start = len(output) output_cpus = " ".join(output[:mt_start].split()) cpus_list = re.compile(r'CPU\d+:').split(output_cpus) # conditions that list total time info if get_noopt or get_total: mt_end = re.search('Total', output).end() total_list = output[mt_end + 1:].split() total_time = int(total_list[1]) user_time = int(total_list[4]) system_time = int(total_list[7]) # check Total cpu_time >= User + System cpu_time if user_time + system_time >= total_time: raise error.TestFail("total cpu_time < user_time + " "system_time") logging.debug("Check total cpu_time %d >= user + system " "cpu_time %d", total_time, user_time + system_time) start_num = 0 if get_start: start_num = int(option_dict["start"]) end_num = int(cpus) if get_count: count_num = int(option_dict["count"]) if end_num > start_num + count_num: end_num = start_num + count_num # for only give --total option it only shows "Total" cpu info if get_totalonly: end_num = -1 # find CPU[N] in output and sum the cpu_time and cgroup cpu_time sum_cputime = 0 sum_cgtime = 0 logging.debug("start_num %d, end_num %d", start_num, end_num) for i in range(start_num, end_num): if not re.search('CPU' + "%i" % i, output): raise error.TestFail("Fail to find CPU" + "%i" % i + "in " "result") logging.debug("Check CPU" + "%i" % i + " exist") sum_cputime += int(cpus_list[i - start_num + 1].split()[1]) sum_cgtime += int(cgtime[i]) # check cgroup cpu_time > sum of cpu_time if end_num >= 0: if sum_cputime > sum_cgtime: raise error.TestFail("Check sum of cgroup cpu_time < sum " "of output cpu_time") logging.debug("Check sum of cgroup cpu_time %d >= cpu_time %d", sum_cgtime, sum_cputime) # check Total cpu_time >= sum of cpu_time when no options if get_noopt: if total_time < sum_cputime: raise error.TestFail("total time < sum of output cpu_time") logging.debug("Check total time %d >= sum of output cpu_time" " %d", total_time, sum_cputime)
def run(test, params, env): """ Test command: migrate-compcache <domain> [--size <number>] 1) Run migrate-compcache command and check return code. """ vm_ref = params.get("vm_ref", "name") vm_name = params.get("migrate_main_vm") start_vm = "yes" == params.get("start_vm", "yes") pause_vm = "yes" == params.get("pause_after_start_vm", "no") expect_succeed = "yes" == params.get("expect_succeed", "yes") size_option = params.get("size_option", "valid") action = params.get("compcache_action", "get") vm = env.get_vm(vm_name) # Check if the virsh command migrate-compcache is available if not virsh.has_help_command("migrate-compcache"): raise error.TestNAError("This version of libvirt does not support " "virsh command migrate-compcache") # Prepare the VM state if it's not correct. if start_vm and not vm.is_alive(): vm.start() elif not start_vm and vm.is_alive(): vm.destroy() if pause_vm and not vm.is_paused(): vm.pause() # Setup domain reference if vm_ref == "domname": vm_ref = vm_name # Setup size according to size_option: # minimal: Same as memory page size # maximal: Same as guest memory # empty: An empty string # small: One byte less than page size # large: Larger than guest memory # huge : Largest int64 page_size = get_page_size() if size_option == "minimal": size = str(page_size) elif size_option == "maximal": size = str(vm.get_max_mem() * 1024) elif size_option == "empty": size = '""' elif size_option == "small": size = str(page_size - 1) elif size_option == "large": # Guest memory is larger than the max mem set, # add 50MB to ensure size exceeds guest memory. size = str(vm.get_max_mem() * 1024 + 50000000) elif size_option == "huge": size = str(2 ** 64 - 1) else: size = size_option # If we need to get, just omit the size option if action == "get": size = None # Run testing command result = virsh.migrate_compcache(vm_ref, size=size) logging.debug(result) remote_uri = params.get("compcache_remote_uri") remote_host = params.get("migrate_dest_host") remote_user = params.get("migrate_dest_user", "root") remote_pwd = params.get("migrate_dest_pwd") check_job_compcache = False compressed_size = None if not remote_host.count("EXAMPLE") and size is not None and expect_succeed: # Config ssh autologin for remote host ssh_key.setup_ssh_key(remote_host, remote_user, remote_pwd, port=22) if vm.is_dead(): vm.start() if vm.is_paused(): vm.resume() vm.wait_for_login() # Do actual migration to verify compression cache of migrate jobs command = "virsh migrate %s %s --compressed --unsafe --verbose" % (vm_name, remote_uri) logging.debug("Start migrating: %s", command) p = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) # Give enough time for starting job t = 0 while t < 5: jobinfo = virsh.domjobinfo(vm_ref, debug=True, ignore_status=True).stdout jobtype = "None" for line in jobinfo.splitlines(): key = line.split(":")[0] if key.count("type"): jobtype = line.split(":")[-1].strip() elif key.strip() == "Compression cache": compressed_size = line.split(":")[-1].strip() if "None" == jobtype or compressed_size is None: t += 1 time.sleep(1) continue else: check_job_compcache = True logging.debug("Job started: %s", jobtype) break if p.poll(): try: p.kill() except OSError: pass # Cleanup in case of successful migration utlv.MigrationTest().cleanup_dest_vm(vm, None, remote_uri) # Shut down the VM to make sure the compcache setting cleared if vm.is_alive(): vm.destroy() # Check test result if expect_succeed: if result.exit_status != 0: raise error.TestFail("Expected succeed, but failed with result:\n%s" % result) if check_job_compcache: value = compressed_size.split()[0].strip() unit = compressed_size.split()[-1].strip() value = int(float(value)) if unit == "KiB": size = int(int(size) / 1024) elif unit == "MiB": size = int(int(size) / 1048576) elif unit == "GiB": size = int(int(size) / 1073741824) if value != size: raise error.TestFail("Compression cache is not match" " with setted") else: return raise error.TestFail("Get compression cache in job failed.") else: logging.warn("The compressed size wasn't been verified " "during migration.") elif not expect_succeed: if result.exit_status == 0: raise error.TestFail("Expected fail, but succeed with result:\n%s" % result)
def run_virsh_snapshot_dumpxml(test, params, env): """ Test snapshot-dumpxml command, make sure that the xml you get is correct Test scenaries: 1. live snapshot dump 2. shutoff snapshot dump 3. dumpxml with security info 4. readonly mode """ if not virsh.has_help_command('snapshot-dumpxml'): raise error.TestNAError("This version of libvirt does not support " "the snapshot-dumpxml test") vm_name = params.get("main_vm") status_error = params.get("status_error", "no") passwd = params.get("snapshot_passwd") secu_opt = params.get("snapshot_secure_option") desc_opt = params.get("snapshot_desc_option") mem_opt = params.get("snapshot_mem_option") disk_opt = params.get("disk_only_snap") snap_name = params.get("snapshot_name", "snap_test") readonly = params.get("readonly", False) try: snap_opt = "" opt_dict = {} # collect all the parameters at one time opt_name = locals() for opt in ["snap_name", "desc_opt", "mem_opt", "disk_opt"]: if opt_name[opt] is not None: # Integrate snapshot create options snap_opt = snap_opt + " " + opt_name[opt] # Do xml backup for final recovery vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) # Add passwd in guest graphics if passwd is not None: vm = env.get_vm(vm_name) if vm.is_alive(): vm.destroy() vm_xml.VMXML.add_security_info( vm_xml.VMXML.new_from_dumpxml(vm_name), passwd) vm.start() if secu_opt is not None: opt_dict['passwd'] = passwd logging.debug("snapshot create options are %s", snap_opt) # Get state to do snapshot xml state check dom_state = virsh.domstate(vm_name).stdout.strip() # Create disk snapshot before all to make the origin image clean virsh.snapshot_create_as(vm_name, "--disk-only") # Create snapshot with options snapshot_result = virsh.snapshot_create_as(vm_name, snap_opt, readonly=readonly) if snapshot_result.exit_status: if status_error == "no": raise error.TestFail("Failed to create snapshot. Error:%s." % snapshot_result.stderr.strip()) elif status_error == "yes": logging.info("Create snapshot failed as expected, Error:%s.", snapshot_result.stderr.strip()) return ctime = get_snap_createtime(vm_name, snap_name) # Run virsh command for snapshot-dumpxml dumpxml_result = virsh.snapshot_dumpxml(vm_name, snap_name, secu_opt) if dumpxml_result.exit_status: if status_error == "no": raise error.TestFail("Failed to dump snapshot xml. Error:%s." % dumpxml_result.stderr.strip()) elif status_error == "yes": logging.info("Dumpxml snapshot failed as expected, Error:%s.", dumpxml_result.stderr.strip()) return # Record all the parameters in dict at one time check_name = locals() for var in ["vm_name", "snap_name", "desc_opt", "dom_state", "ctime", "disk_opt"]: if check_name[var] is not None: opt_dict[var] = check_name[var] logging.debug("opt_dict is %s", opt_dict) output = dumpxml_result.stdout.strip() snapshot_dumpxml_check(output, opt_dict) finally: # Recovery utils_test.libvirt.clean_up_snapshots(vm_name) vmxml_backup.sync("--snapshots-metadata")
def run(test, params, env): """ Test send-key command, include all types of codeset and sysrq For normal sendkey test, we create a file to check the command execute by send-key. For sysrq test, check the /var/log/messages and guest status """ if not virsh.has_help_command('send-key'): test.cancel("This version of libvirt does not support the send-key " "test") vm_name = params.get("main_vm", "avocado-vt-vm1") status_error = ("yes" == params.get("status_error", "no")) options = params.get("sendkey_options", "") sysrq_test = ("yes" == params.get("sendkey_sysrq", "no")) sleep_time = int(params.get("sendkey_sleeptime", 2)) readonly = params.get("readonly", False) username = params.get("username") password = params.get("password") create_file = params.get("create_file_name") uri = params.get("virsh_uri") unprivileged_user = params.get('unprivileged_user') if unprivileged_user: if unprivileged_user.count('EXAMPLE'): unprivileged_user = '******' if not libvirt_version.version_compare(1, 1, 1): if params.get('setup_libvirt_polkit') == 'yes': test.cancel("API acl test not supported in current libvirt " "version.") def send_line(send_str): """ send string to guest with send-key and end with Enter """ for send_ch in list(send_str): virsh.sendkey(vm_name, "KEY_%s" % send_ch.upper(), ignore_status=False) virsh.sendkey(vm_name, "KEY_ENTER", ignore_status=False) vm = env.get_vm(vm_name) session = vm.wait_for_login() if sysrq_test: # Is 'rsyslog' installed on guest? It'll be what writes out # to /var/log/messages if not utils_package.package_install("rsyslog", session): test.fail("Fail to install rsyslog, make sure that you have " "usable repo in guest") # clear messages, restart rsyslog, and make sure it's running session.cmd("echo '' > /var/log/messages") session.cmd("service rsyslog restart") ps_stat = session.cmd_status("ps aux |grep rsyslog") if ps_stat != 0: test.fail("rsyslog is not running in guest") # enable sysrq session.cmd("echo 1 > /proc/sys/kernel/sysrq") # make sure the environment is clear if create_file is not None: session.cmd("rm -rf %s" % create_file) try: # wait for tty started tty_stat = "ps aux|grep tty" timeout = 60 while timeout >= 0 and \ session.get_command_status(tty_stat) != 0: time.sleep(1) timeout = timeout - 1 if timeout < 0: test.fail("Can not wait for tty started in 60s") # send user and passwd to guest to login send_line(username) time.sleep(2) send_line(password) time.sleep(2) output = virsh.sendkey(vm_name, options, readonly=readonly, unprivileged_user=unprivileged_user, uri=uri) time.sleep(sleep_time) if output.exit_status != 0: if status_error: logging.info("Failed to sendkey to guest as expected, Error:" "%s.", output.stderr) return else: test.fail("Failed to send key to guest, Error:%s." % output.stderr) elif status_error: test.fail("Expect fail, but succeed indeed.") if create_file is not None: # check if created file exist cmd_ls = "ls %s" % create_file sec_status, sec_output = session.get_command_status_output(cmd_ls) if sec_status == 0: logging.info("Succeed to create file with send key") else: test.fail("Fail to create file with send key, Error:%s" % sec_output) elif sysrq_test: # check /var/log/message info according to different key # Since there's no guarantee when messages will be written # we'll do a check and wait loop for up to 60 seconds timeout = 60 while timeout >= 0: if "KEY_H" in options: get_status = session.cmd_status("cat /var/log/messages|" "grep 'SysRq.*HELP'") elif "KEY_M" in options: get_status = session.cmd_status("cat /var/log/messages|" "grep 'SysRq.*Show Memory'") elif "KEY_T" in options: get_status = session.cmd_status("cat /var/log/messages|" "grep 'SysRq.*Show State'") elif "KEY_B" in options: client_session = vm.wait_for_login() result = virsh.domstate(vm_name, '--reason', ignore_status=True) output = result.stdout.strip() logging.debug("The guest state: %s", output) if not output.count("booted"): get_status = 1 else: get_status = 0 client_session.close() if get_status == 0: timeout = -1 else: session.cmd("echo \"virsh sendkey waiting\" >> /var/log/messages") time.sleep(1) timeout = timeout - 1 if get_status != 0: test.fail("SysRq does not take effect in guest, options is " "%s" % options) else: logging.info("Succeed to send SysRq command") else: test.fail("Test cfg file invalid: either sysrq_params or " "create_file_name must be defined") finally: if create_file is not None: session.cmd("rm -rf %s" % create_file) session.close()
def run(test, params, env): """ 1. Configure kernel cmdline to support kdump 2. Start kdump service 3. Inject NMI to the guest 4. Check NMI times """ for cmd in 'inject-nmi', 'qemu-monitor-command': if not virsh.has_help_command(cmd): test.cancel( "This version of libvirt does not " " support the %s test", cmd) vm_name = params.get("main_vm", "avocado-vt-vm1") vm = env.get_vm(vm_name) start_vm = params.get("start_vm") expected_nmi_times = params.get("expected_nmi_times", '0') kernel_params = params.get("kernel_params", "") unprivileged_user = params.get('unprivileged_user') if unprivileged_user: if unprivileged_user.count('EXAMPLE'): unprivileged_user = '******' if not libvirt_version.version_compare(1, 1, 1): if params.get('setup_libvirt_polkit') == 'yes': test.cancel("API acl test not supported in current" " libvirt version.") try: if kernel_params: update_boot_option_and_reboot(vm, kernel_params, test) if start_vm == "yes": # start kdump service in the guest cmd = "which kdump" try: run_cmd_in_guest(vm, cmd, test) except Exception: try: # try to install kexec-tools on fedoraX/rhelx.y guest run_cmd_in_guest(vm, "yum install -y kexec-tools", test) except Exception: test.error( "Requires kexec-tools(or the equivalent for your distro)" ) # enable kdump service in the guest cmd = "service kdump start" run_cmd_in_guest(vm, cmd, test, timeout=120) # filter original 'NMI' information from the /proc/interrupts cmd = "grep NMI /proc/interrupts" nmi_str = run_cmd_in_guest(vm, cmd, test) # filter CPU from the /proc/cpuinfo and count number cmd = "grep -E '^process' /proc/cpuinfo | wc -l" vcpu_num = run_cmd_in_guest(vm, cmd, test).strip() logging.info("Inject NMI to the guest via virsh inject_nmi") virsh.inject_nmi(vm_name, debug=True, ignore_status=False) logging.info( "Inject NMI to the guest via virsh qemu_monitor_command") virsh.qemu_monitor_command(vm_name, '{"execute":"inject-nmi"}') # injects a Non-Maskable Interrupt into the default CPU (x86/s390) # or all CPUs (ppc64), as usual, the default CPU index is 0 cmd = "grep NMI /proc/interrupts | awk '{print $2}'" nmi_from_default_vcpu = run_cmd_in_guest(vm, cmd, test) real_nmi_times = nmi_from_default_vcpu.splitlines()[0] logging.debug("The current Non-Maskable Interrupts: %s", real_nmi_times) # check Non-maskable interrupts times if real_nmi_times != expected_nmi_times: test.fail("NMI times aren't expected %s:%s" % (real_nmi_times, expected_nmi_times)) finally: if kernel_params: cmd = "grubby --update-kernel=`grubby --default-kernel` --remove-args='%s'" % kernel_params run_cmd_in_guest(vm, cmd, test) vm.reboot()
def run(test, params, env): """ Test domfsfreeze command, make sure that all supported options work well Test scenaries: 1. fsfreeze all fs without options 2. fsfreeze a mountpoint with --mountpoint 3. fsfreeze a mountpoint without --mountpoint """ def check_freeze(session): """ Check whether file system has been frozen by touch a test file and see if command will hang. :param session: Guest session to be tested. """ try: output = session.cmd_output('touch freeze_test', timeout=10) test.fail("Failed to freeze file system. " "Create file succeeded:%s\n" % output) except aexpect.ShellTimeoutError: pass if not virsh.has_help_command('domfsfreeze'): test.cancel("This version of libvirt does not support " "the domfsfreeze test") if not libvirt_version.version_compare(1, 1, 1): if params.get('setup_libvirt_polkit') == 'yes': test.cancel("API acl test not supported in current" " libvirt version.") vm_ref = params.get("vm_ref", "name") vm_name = params.get("main_vm", "virt-tests-vm1") mountpoint = params.get("domfsfreeze_mnt", None) options = params.get("domfsfreeze_options", "") uri = params.get("virsh_uri") unprivileged_user = params.get('unprivileged_user') start_vm = ("yes" == params.get("start_vm", "yes")) has_channel = ("no" == params.get("no_qemu_ga", "no")) start_qemu_ga = ("no" == params.get("no_start_qemu_ga", "no")) status_error = ("yes" == params.get("status_error", "no")) # Do backup for origin xml xml_backup = vm_xml.VMXML.new_from_dumpxml(vm_name) try: vm = env.get_vm(vm_name) if vm.is_alive(): vm.destroy() if has_channel: # Add channel device for qemu-ga vm.prepare_guest_agent(start=start_qemu_ga) else: # Remove qemu-ga channel vm.prepare_guest_agent(channel=False, start=False) if start_vm: if not vm.is_alive(): vm.start() domid = vm.get_id() session = vm.wait_for_login() else: vm.destroy() domuuid = vm.get_uuid() if vm_ref == "id": vm_ref = domid elif vm_ref == "uuid": vm_ref = domuuid elif vm_ref.count("invalid"): vm_ref = uuid.uuid1() elif vm_ref == "none": vm_ref = "" elif vm_ref == "name": vm_ref = vm_name result = virsh.domfsfreeze(vm_ref, mountpoint=mountpoint, options=options, unprivileged_user=unprivileged_user, uri=uri, debug=True) libvirt.check_exit_status(result, status_error) if not result.exit_status: check_freeze(session) finally: # Do domain recovery xml_backup.sync()
def run(test, params, env): """ Test domfstrim command, make sure that all supported options work well Test scenaries: 1. fstrim without options 2. fstrim with --minimum with large options 3. fstrim with --minimum with small options Note: --mountpoint still not supported so will not test here """ def recompose_xml(vm_name, scsi_disk): """ Add scsi disk, guest agent and scsi controller for guest :param: vm_name: Name of domain :param: scsi_disk: scsi_debug disk name """ vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name) disk_path = scsi_disk # Add scsi disk xml scsi_disk = Disk(type_name="block") scsi_disk.device = "lun" scsi_disk.source = scsi_disk.new_disk_source( **{'attrs': {'dev': disk_path}}) scsi_disk.target = {'dev': "sdb", 'bus': "scsi"} find_scsi = "no" controllers = vmxml.xmltreefile.findall("devices/controller") for controller in controllers: if controller.get("type") == "scsi": find_scsi = "yes" vmxml.add_device(scsi_disk) # Add scsi disk controller if find_scsi == "no": scsi_controller = Controller("controller") scsi_controller.type = "scsi" scsi_controller.index = "0" scsi_controller.model = "virtio-scsi" vmxml.add_device(scsi_controller) # Redefine guest vmxml.sync() if not virsh.has_help_command('domfstrim'): test.cancel("This version of libvirt does not support " "the domfstrim test") try: utils_path.find_command("lsscsi") except utils_path.CmdNotFoundError: test.cancel("Command 'lsscsi' is missing. You must " "install it.") vm_name = params.get("main_vm", "avocado-vt-vm1") status_error = ("yes" == params.get("status_error", "no")) minimum = params.get("domfstrim_minimum") mountpoint = params.get("domfstrim_mountpoint") options = params.get("domfstrim_options", "") is_fulltrim = ("yes" == params.get("is_fulltrim", "yes")) uri = params.get("virsh_uri") unprivileged_user = params.get('unprivileged_user') has_qemu_ga = not ("yes" == params.get("no_qemu_ga", "no")) start_qemu_ga = not ("yes" == params.get("no_start_qemu_ga", "no")) if unprivileged_user: if unprivileged_user.count('EXAMPLE'): unprivileged_user = '******' if not libvirt_version.version_compare(1, 1, 1): if params.get('setup_libvirt_polkit') == 'yes': test.cancel("API acl test not supported in current" " libvirt version.") # Do backup for origin xml xml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) try: vm = env.get_vm(vm_name) if not vm.is_alive(): vm.start() session = vm.wait_for_login() bef_list = session.cmd_output("fdisk -l|grep ^/dev|" "cut -d' ' -f1").split("\n") session.close() vm.destroy() # Load module and get scsi disk name linux_modules.load_module("scsi_debug lbpu=1 lbpws=1") time.sleep(5) scsi_disk = process.run("lsscsi|grep scsi_debug|" "awk '{print $6}'", shell=True).stdout_text.strip() # Create partition with open("/tmp/fdisk-cmd", "w") as cmd_file: cmd_file.write("n\np\n\n\n\nw\n") output = process.run("fdisk %s < /tmp/fdisk-cmd" % scsi_disk, shell=True).stdout_text.strip() logging.debug("fdisk output %s", output) os.remove("/tmp/fdisk-cmd") # Format disk output = process.run("mkfs.ext3 %s1" % scsi_disk, shell=True).stdout_text.strip() logging.debug("output %s", output) # Add scsi disk in guest recompose_xml(vm_name, scsi_disk) # Prepare guest agent and start guest if has_qemu_ga: vm.prepare_guest_agent(start=start_qemu_ga) else: # Remove qemu-ga channel vm.prepare_guest_agent(channel=has_qemu_ga, start=False) guest_session = vm.wait_for_login() # Get new generated disk af_list = guest_session.cmd_output("fdisk -l|grep ^/dev|" "cut -d' ' -f1").split('\n') new_disk = "".join(list(set(bef_list) ^ set(af_list))) # Mount disk in guest guest_session.cmd("mkdir -p /home/test && mount %s /home/test" % new_disk) # Do first fstrim before all to get original map for compare cmd_result = virsh.domfstrim(vm_name) if cmd_result.exit_status != 0: if not status_error: test.fail("Fail to do virsh domfstrim, error %s" % cmd_result.stderr) def get_diskmap_size(): """ Collect size from disk map :return: disk size """ map_cmd = "cat /sys/bus/pseudo/drivers/scsi_debug/map" diskmap = process.run(map_cmd, shell=True).stdout_text.strip('\n\x00') sum = 0 for i in diskmap.split(","): sum = sum + int(i.split("-")[1]) - int(i.split("-")[0]) logging.debug("disk map (size:%d) is %s", sum, diskmap) return sum ori_size = get_diskmap_size() # Write date in disk dd_cmd = "dd if=/dev/zero of=/home/test/file bs=1048576 count=5; sync" guest_session.cmd(dd_cmd) def _full_mapped(): """ Do full map check :return: True or False """ full_size = get_diskmap_size() return (ori_size < full_size) if not utils_misc.wait_for(_full_mapped, timeout=30): test.error("Scsi map is not updated after dd command.") full_size = get_diskmap_size() # Remove disk content in guest guest_session.cmd("rm -rf /home/test/*; sync") guest_session.close() def _trim_completed(): """ Do empty fstrim check :return: True of False """ cmd_result = virsh.domfstrim(vm_name, minimum, mountpoint, options, unprivileged_user=unprivileged_user, uri=uri) if cmd_result.exit_status != 0: if not status_error: test.fail("Fail to do virsh domfstrim, error %s" % cmd_result.stderr) else: logging.info("Fail to do virsh domfstrim as expected: %s", cmd_result.stderr) return True empty_size = get_diskmap_size() logging.info("Trimmed disk to %d", empty_size) if is_fulltrim: return empty_size <= ori_size else: # For partly trim will check later return False if not utils_misc.wait_for(_trim_completed, timeout=30): # Get result again to check partly fstrim empty_size = get_diskmap_size() if not is_fulltrim: if ori_size < empty_size <= full_size: logging.info("Success to do fstrim partly") return True test.fail("Fail to do fstrim. (original size: %s), " "(current size: %s), (full size: %s)" % (ori_size, empty_size, full_size)) logging.info("Success to do fstrim") finally: # Do domain recovery vm.shutdown() xml_backup.sync() linux_modules.unload_module("scsi_debug")
def run(test, params, env): """ Test snapshot-create-as command Make sure that the clean repo can be used because qemu-guest-agent need to be installed in guest The command create a snapshot (disk and RAM) from arguments which including the following point * virsh snapshot-create-as --print-xml --diskspec --name --description * virsh snapshot-create-as --print-xml with multi --diskspec * virsh snapshot-create-as --print-xml --memspec * virsh snapshot-create-as --description * virsh snapshot-create-as --no-metadata * virsh snapshot-create-as --no-metadata --print-xml (negative test) * virsh snapshot-create-as --atomic --disk-only * virsh snapshot-create-as --quiesce --disk-only (positive and negative) * virsh snapshot-create-as --reuse-external * virsh snapshot-create-as --disk-only --diskspec * virsh snapshot-create-as --memspec --reuse-external --atomic(negative) * virsh snapshot-create-as --disk-only and --memspec (negative) * Create multi snapshots with snapshot-create-as * Create snapshot with name a--a a--a--snap1 """ if not virsh.has_help_command('snapshot-create-as'): test.cancel("This version of libvirt does not support " "the snapshot-create-as test") vm_name = params.get("main_vm") status_error = params.get("status_error", "no") options = params.get("snap_createas_opts") multi_num = params.get("multi_num", "1") diskspec_num = params.get("diskspec_num", "1") bad_disk = params.get("bad_disk") reuse_external = "yes" == params.get("reuse_external", "no") start_ga = params.get("start_ga", "yes") domain_state = params.get("domain_state") memspec_opts = params.get("memspec_opts") config_format = "yes" == params.get("config_format", "no") snapshot_image_format = params.get("snapshot_image_format") diskspec_opts = params.get("diskspec_opts") create_autodestroy = 'yes' == params.get("create_autodestroy", "no") unix_channel = "yes" == params.get("unix_channel", "yes") dac_denial = "yes" == params.get("dac_denial", "no") check_json_no_savevm = "yes" == params.get("check_json_no_savevm", "no") disk_snapshot_attr = params.get('disk_snapshot_attr', 'external') set_snapshot_attr = "yes" == params.get("set_snapshot_attr", "no") # gluster related params replace_vm_disk = "yes" == params.get("replace_vm_disk", "no") disk_src_protocol = params.get("disk_source_protocol") restart_tgtd = params.get("restart_tgtd", "no") vol_name = params.get("vol_name") tmp_dir = data_dir.get_tmp_dir() pool_name = params.get("pool_name", "gluster-pool") brick_path = os.path.join(tmp_dir, pool_name) uri = params.get("virsh_uri") usr = params.get('unprivileged_user') if usr: if usr.count('EXAMPLE'): usr = '******' if disk_src_protocol == 'iscsi': if not libvirt_version.version_compare(1, 0, 4): test.cancel("'iscsi' disk doesn't support in" " current libvirt version.") if not libvirt_version.version_compare(1, 1, 1): if params.get('setup_libvirt_polkit') == 'yes': test.cancel("API acl test not supported in current" " libvirt version.") if not libvirt_version.version_compare(1, 2, 7): # As bug 1017289 closed as WONTFIX, the support only # exist on 1.2.7 and higher if disk_src_protocol == 'gluster': test.cancel("Snapshot on glusterfs not support in " "current version. Check more info with " "https://bugzilla.redhat.com/buglist.cgi?" "bug_id=1017289,1032370") if libvirt_version.version_compare(5, 5, 0): # libvirt-5.5.0-2 commit 68e1a05f starts to allow --no-metadata and # --print-xml to be used together. if "--no-metadata" in options and "--print-xml" in options: logging.info("--no-metadata and --print-xml can be used together " "in this libvirt version. Not expecting a failure.") status_error = "no" opt_names = locals() if memspec_opts is not None: mem_options = compose_disk_options(test, params, memspec_opts) # if the parameters have the disk without "file=" then we only need to # add testdir for it. if mem_options is None: mem_options = os.path.join(data_dir.get_tmp_dir(), memspec_opts) options += " --memspec " + mem_options tag_diskspec = 0 dnum = int(diskspec_num) if diskspec_opts is not None: tag_diskspec = 1 opt_names['diskopts_1'] = diskspec_opts # diskspec_opts[n] is used in cfg when more than 1 --diskspec is used if dnum > 1: tag_diskspec = 1 for i in range(1, dnum + 1): opt_names["diskopts_%s" % i] = params.get("diskspec_opts%s" % i) if tag_diskspec == 1: for i in range(1, dnum + 1): disk_options = compose_disk_options(test, params, opt_names["diskopts_%s" % i]) options += " --diskspec " + disk_options logging.debug("options are %s", options) vm = env.get_vm(vm_name) option_dict = {} option_dict = utils_misc.valued_option_dict(options, r' --(?!-)') logging.debug("option_dict is %s", option_dict) # A backup of original vm vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) logging.debug("original xml is %s", vmxml_backup) # Generate empty image for negative test if bad_disk is not None: bad_disk = os.path.join(data_dir.get_tmp_dir(), bad_disk) with open(bad_disk, 'w') as bad_file: pass # Generate external disk if reuse_external: disk_path = '' for i in range(dnum): external_disk = "external_disk%s" % i if params.get(external_disk): disk_path = os.path.join(data_dir.get_tmp_dir(), params.get(external_disk)) process.run("qemu-img create -f qcow2 %s 1G" % disk_path, shell=True) # Only chmod of the last external disk for negative case if dac_denial: process.run("chmod 500 %s" % disk_path, shell=True) qemu_conf = None libvirtd_conf = None libvirtd_log_path = None libvirtd = utils_libvirtd.Libvirtd() try: # Config "snapshot_image_format" option in qemu.conf if config_format: qemu_conf = utils_config.LibvirtQemuConfig() qemu_conf.snapshot_image_format = snapshot_image_format logging.debug("the qemu config file content is:\n %s" % qemu_conf) libvirtd.restart() if check_json_no_savevm: libvirtd_conf = utils_config.LibvirtdConfig() libvirtd_conf["log_level"] = '1' libvirtd_conf["log_filters"] = '"1:json 3:remote 4:event"' libvirtd_log_path = os.path.join(data_dir.get_tmp_dir(), "libvirtd.log") libvirtd_conf["log_outputs"] = '"1:file:%s"' % libvirtd_log_path logging.debug("the libvirtd config file content is:\n %s" % libvirtd_conf) libvirtd.restart() if replace_vm_disk: libvirt.set_vm_disk(vm, params, tmp_dir) if set_snapshot_attr: if vm.is_alive(): vm.destroy(gracefully=False) vmxml_new = vm_xml.VMXML.new_from_dumpxml(vm_name) disk_xml = vmxml_backup.get_devices(device_type="disk")[0] vmxml_new.del_device(disk_xml) # set snapshot attribute in disk xml disk_xml.snapshot = disk_snapshot_attr new_disk = disk.Disk(type_name='file') new_disk.xmltreefile = disk_xml.xmltreefile vmxml_new.add_device(new_disk) logging.debug("The vm xml now is: %s" % vmxml_new.xmltreefile) vmxml_new.sync() vm.start() # Start qemu-ga on guest if have --quiesce if unix_channel and options.find("quiesce") >= 0: vm.prepare_guest_agent() session = vm.wait_for_login() if start_ga == "no": # The qemu-ga could be running and should be killed session.cmd("kill -9 `pidof qemu-ga`") # Check if the qemu-ga get killed stat_ps = session.cmd_status("ps aux |grep [q]emu-ga") if not stat_ps: # As managed by systemd and set as autostart, qemu-ga # could be restarted, so use systemctl to stop it. session.cmd("systemctl stop qemu-guest-agent") stat_ps = session.cmd_status("ps aux |grep [q]emu-ga") if not stat_ps: test.cancel("Fail to stop agent in " "guest") if domain_state == "paused": virsh.suspend(vm_name) else: # Remove channel if exist if vm.is_alive(): vm.destroy(gracefully=False) xml_inst = vm_xml.VMXML.new_from_dumpxml(vm_name) xml_inst.remove_agent_channels() vm.start() # Record the previous snapshot-list snaps_before = virsh.snapshot_list(vm_name) # Attach disk before create snapshot if not print xml and multi disks # specified in cfg if dnum > 1 and "--print-xml" not in options: for i in range(1, dnum): disk_path = os.path.join(data_dir.get_tmp_dir(), 'disk%s.qcow2' % i) process.run("qemu-img create -f qcow2 %s 200M" % disk_path, shell=True) virsh.attach_disk(vm_name, disk_path, 'vd%s' % list(string.ascii_lowercase)[i], debug=True) # Run virsh command # May create several snapshots, according to configuration for count in range(int(multi_num)): if create_autodestroy: # Run virsh command in interactive mode vmxml_backup.undefine() vp = virsh.VirshPersistent() vp.create(vmxml_backup['xml'], '--autodestroy') cmd_result = vp.snapshot_create_as(vm_name, options, ignore_status=True, debug=True) vp.close_session() vmxml_backup.define() else: cmd_result = virsh.snapshot_create_as(vm_name, options, unprivileged_user=usr, uri=uri, ignore_status=True, debug=True) # for multi snapshots without specific snapshot name, the # snapshot name is using time string with 1 second # incremental, to avoid get snapshot failure with same name, # sleep 1 second here. if int(multi_num) > 1: time.sleep(1.1) output = cmd_result.stdout.strip() status = cmd_result.exit_status # check status_error if status_error == "yes": if status == 0: test.fail("Run successfully with wrong command!") else: # Check memspec file should be removed if failed if (options.find("memspec") >= 0 and options.find("atomic") >= 0): if os.path.isfile(option_dict['memspec']): os.remove(option_dict['memspec']) test.fail("Run failed but file %s exist" % option_dict['memspec']) else: logging.info("Run failed as expected and memspec" " file already been removed") # Check domain xml is not updated if reuse external fail elif reuse_external and dac_denial: output = virsh.dumpxml(vm_name).stdout.strip() if "reuse_external" in output: test.fail("Domain xml should not be " "updated with snapshot image") else: logging.info("Run failed as expected") elif status_error == "no": if status != 0: test.fail("Run failed with right command: %s" % output) else: # Check the special options snaps_list = virsh.snapshot_list(vm_name) logging.debug("snaps_list is %s", snaps_list) check_snapslist(test, vm_name, options, option_dict, output, snaps_before, snaps_list) # For cover bug 872292 if check_json_no_savevm: pattern = "The command savevm has not been found" with open(libvirtd_log_path) as f: for line in f: if pattern in line and "error" in line: test.fail("'%s' was found: %s" % (pattern, line)) finally: if vm.is_alive(): vm.destroy() # recover domain xml xml_recover(vmxml_backup) path = "/var/lib/libvirt/qemu/snapshot/" + vm_name if os.path.isfile(path): test.fail("Still can find snapshot metadata") if disk_src_protocol == 'gluster': gluster.setup_or_cleanup_gluster(False, brick_path=brick_path, **params) libvirtd.restart() if disk_src_protocol == 'iscsi': libvirt.setup_or_cleanup_iscsi(False, restart_tgtd=restart_tgtd) # rm bad disks if bad_disk is not None: os.remove(bad_disk) # rm attach disks and reuse external disks if dnum > 1 and "--print-xml" not in options: for i in range(dnum): disk_path = os.path.join(data_dir.get_tmp_dir(), 'disk%s.qcow2' % i) if os.path.exists(disk_path): os.unlink(disk_path) if reuse_external: external_disk = "external_disk%s" % i disk_path = os.path.join(data_dir.get_tmp_dir(), params.get(external_disk)) if os.path.exists(disk_path): os.unlink(disk_path) # restore config if config_format and qemu_conf: qemu_conf.restore() if libvirtd_conf: libvirtd_conf.restore() if libvirtd_conf or (config_format and qemu_conf): libvirtd.restart() if libvirtd_log_path and os.path.exists(libvirtd_log_path): os.unlink(libvirtd_log_path)
def run_virsh_vcpupin(test, params, env): """ Test the command virsh vcpupin (1) Get the host and guest cpu count (2) Call virsh vcpupin for each vcpu with pinning of each cpu (3) Check whether the virsh vcpupin has pinned the respective vcpu to cpu (4) TODO: Right now the testcase covers the pinning one cpu at a time this can be improved by a random number of cpus """ # Initialize the variables expected_affinity = [] total_affinity = [] actual_affinity = [] def build_actual_info(domname, vcpu): """ This function returns list of the vcpu's affinity from virsh vcpuinfo output @param: domname: VM Name to operate on @param: vcpu: vcpu number for which the affinity is required """ output = virsh.vcpuinfo(domname) cmd = re.findall('[^Affinity:][-y]+', str(output)) total_affinity = cmd[vcpu].lstrip() actual_affinity = list(total_affinity) return actual_affinity def build_expected_info(vcpu, cpu): """ This function returns the list of vcpu's expected affinity build @param: vcpu: vcpu number for which the affinity is required @param: cpu: cpu details for the affinity """ expected_affinity = [] for i in range(int(host_cpu_count)): expected_affinity.append('y') for i in range(int(host_cpu_count)): if cpu != i: expected_affinity[i] = '-' expected_affinity_proc = int(math.pow(2, cpu)) return expected_affinity,expected_affinity_proc def virsh_check_vcpupin(domname, vcpu, cpu, pid): """ This function checks the actual and the expected affinity of given vcpu and raises error if not matchs @param: domname: VM Name to operate on @param: vcpu: vcpu number for which the affinity is required @param: cpu: cpu details for the affinity """ expected_output,expected_output_proc = build_expected_info(vcpu, cpu) actual_output = build_actual_info(domname, vcpu) # Get the vcpus pid vcpus_pid = vm.get_vcpus_pid() vcpu_pid=vcpus_pid[vcpu] # Get the actual cpu affinity value in the proc entry output = utils.cpu_affinity_by_task(pid, vcpu_pid) actual_output_proc = int(output, 16) if expected_output == actual_output: logging.info("successfully pinned cpu: %s --> vcpu: %s", cpu, vcpu) else: raise error.TestFail("Command 'virsh vcpupin %s %s %s'not succeeded" ", cpu pinning details not updated properly in" " virsh vcpuinfo command output" % (vm_name, vcpu, cpu)) if expected_output_proc == actual_output_proc: logging.info("successfully pinned cpu: %s --> vcpu: %s" " in respective proc entry" ,cpu, vcpu) else: raise error.TestFail("Command 'virsh vcpupin %s %s %s'not succeeded" " cpu pinning details not updated properly in" " /proc/%s/task/%s/status" %(vm_name, vcpu, cpu, pid, vcpu_pid)) if not virsh.has_help_command('vcpucount'): raise error.TestNAError("This version of libvirt doesn't support this test") # Get the vm name, pid of vm and check for alive vm_name = params.get("main_vm") vm = env.get_vm(params["main_vm"]) vm.verify_alive() pid = vm.get_pid() # Get the host cpu count host_cpu_count = utils.count_cpus() # Get the guest vcpu count guest_vcpu_count = virsh.vcpucount_live(vm_name) # Run test case for vcpu in range(int(guest_vcpu_count)): for cpu in range(int(host_cpu_count)): vm.vcpupin(vcpu, cpu) virsh_check_vcpupin(vm_name, vcpu, cpu, pid)
def run_virsh_domfstrim(test, params, env): """ Test domfstrim command, make sure that all supported options work well Test scenaries: 1. fstrim without options 2. fstrim with --minimum with large options 3. fstrim with --minimum with small options Note: --mountpoint still not supported so will not test here """ if not virsh.has_help_command('domfstrim'): raise error.TestNAError("This version of libvirt does not support " "the domfstrim test") try: utils_misc.find_command("lsscsi") except ValueError: raise error.TestNAError("Command 'lsscsi' is missing. You must " "install it.") vm_name = params.get("main_vm", "virt-tests-vm1") status_error = ("yes" == params.get("status_error", "no")) minimum = params.get("domfstrim_minimum") mountpoint = params.get("domfstrim_mountpoint") options = params.get("domfstrim_options", "") is_fulltrim = ("yes" == params.get("is_fulltrim", "yes")) def recompose_xml(vm_name, scsi_disk): """ Add scsi disk, guest agent and scsi controller for guest :param: vm_name: Name of domain :param: scsi_disk: scsi_debug disk name """ # Get disk path of scsi_disk path_cmd = "udevadm info --name %s | grep /dev/disk/by-path/ | " \ "cut -d' ' -f4" % scsi_disk disk_path = utils.run(path_cmd).stdout.strip() # Add qemu guest agent in guest xml vm_xml.VMXML.set_agent_channel(vm_name) vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name) # Add scsi disk xml scsi_disk = Disk(type_name="block") scsi_disk.device = "lun" scsi_disk.source = scsi_disk.new_disk_source( **{'attrs': {'dev': disk_path}}) scsi_disk.target = {'dev': "sdb", 'bus': "scsi"} vmxml.add_device(scsi_disk) # Add scsi disk controller scsi_controller = Controller("controller") scsi_controller.type = "scsi" scsi_controller.index = "0" scsi_controller.model = "virtio-scsi" vmxml.add_device(scsi_controller) # Redefine guest vmxml.sync() def start_guest_agent(session): """ Start guest agent service in guest :param: session: session in guest """ # Check if qemu-ga installed check_cmd = "rpm -q qemu-guest-agent||yum install -y qemu-guest-agent" session.cmd(check_cmd) session.cmd("service qemu-guest-agent start") # Check if the qemu-ga really started stat_ps = session.cmd_status("ps aux |grep [q]emu-ga") if stat_ps != 0: raise error.TestFail("Fail to run qemu-ga in guest") # Do backup for origin xml xml_backup = vm_xml.VMXML.new_from_dumpxml(vm_name) try: vm = env.get_vm(vm_name) if not vm.is_alive(): vm.start() session = vm.wait_for_login() bef_list = session.cmd_output("fdisk -l|grep ^/dev|" "cut -d' ' -f1").split("\n") session.close() vm.destroy() # Load module and get scsi disk name utils.load_module("scsi_debug lbpu=1 lbpws=1") scsi_disk = utils.run("lsscsi|grep scsi_debug|" "awk '{print $6}'").stdout.strip() # Create partition open("/tmp/fdisk-cmd", "w").write("n\np\n\n\n\nw\n") output = utils.run("fdisk %s < /tmp/fdisk-cmd" % scsi_disk).stdout.strip() logging.debug("fdisk output %s", output) # Format disk output = utils.run("mkfs.ext3 %s1" % scsi_disk).stdout.strip() logging.debug("output %s", output) # Add scsi disk and agent channel in guest recompose_xml(vm_name, scsi_disk) vm.start() guest_session = vm.wait_for_login() start_guest_agent(guest_session) # Get new generated disk af_list = guest_session.cmd_output("fdisk -l|grep ^/dev|" "cut -d' ' -f1").split('\n') new_disk = "".join(list(set(bef_list) ^ set(af_list))) # Mount disk in guest guest_session.cmd("mkdir -p /home/test && mount %s /home/test" % new_disk) # Do first fstrim before all to get original map for compare cmd_result = virsh.domfstrim(vm_name) if cmd_result.exit_status != 0: raise error.TestFail("Fail to do virsh domfstrim, error %s" % cmd_result.stderr) def get_diskmap_size(): """ Collect size from disk map :return: disk size """ map_cmd = "cat /sys/bus/pseudo/drivers/scsi_debug/map" diskmap = utils.run(map_cmd).stdout.strip('\n\x00') logging.debug("disk map is %s", diskmap) sum = 0 for i in diskmap.split(","): sum = sum + int(i.split("-")[1]) - int(i.split("-")[0]) return sum ori_size = get_diskmap_size() # Write date in disk dd_cmd = "dd if=/dev/zero of=/home/test/file bs=1048576 count=5" guest_session.cmd(dd_cmd) def _full_mapped(): """ Do full map check :return: True or False """ full_size = get_diskmap_size() return (ori_size < full_size) if not utils_misc.wait_for(_full_mapped, timeout=30): raise error.TestError("Scsi map is not updated after dd command.") full_size = get_diskmap_size() # Remove disk content in guest guest_session.cmd("rm -rf /home/test/*") guest_session.close() def _trim_completed(): """ Do empty fstrim check :return: True of False """ cmd_result = virsh.domfstrim(vm_name, minimum, mountpoint, options) if cmd_result.exit_status != 0: if not status_error: raise error.TestFail("Fail to do virsh domfstrim, error %s" % cmd_result.stderr) else: logging.info("Fail to do virsh domfstrim as expected: %s", cmd_result.stderr) return True empty_size = get_diskmap_size() if is_fulltrim: return empty_size <= ori_size else: # For partly trim will check later return False if not utils_misc.wait_for(_trim_completed, timeout=30): if not is_fulltrim: # Get result again to check partly fstrim empty_size = get_diskmap_size() if ori_size < empty_size <= full_size: logging.info("Success to do fstrim partly") return True raise error.TestFail("Fail to do fstrim %s, %s") logging.info("Success to do fstrim") finally: # Do domain recovery xml_backup.sync() utils.unload_module("scsi_debug")
def run(test, params, env): """ Test snapshot-create-as command Make sure that the clean repo can be used because qemu-guest-agent need to be installed in guest The command create a snapshot (disk and RAM) from arguments which including the following point * virsh snapshot-create-as --print-xml --diskspec --name --description * virsh snapshot-create-as --print-xml with multi --diskspec * virsh snapshot-create-as --print-xml --memspec * virsh snapshot-create-as --description * virsh snapshot-create-as --no-metadata * virsh snapshot-create-as --no-metadata --print-xml (negative test) * virsh snapshot-create-as --atomic --disk-only * virsh snapshot-create-as --quiesce --disk-only (positive and negative) * virsh snapshot-create-as --reuse-external * virsh snapshot-create-as --disk-only --diskspec * virsh snapshot-create-as --memspec --reuse-external --atomic(negative) * virsh snapshot-create-as --disk-only and --memspec (negative) * Create multi snapshots with snapshot-create-as * Create snapshot with name a--a a--a--snap1 """ if not virsh.has_help_command('snapshot-create-as'): raise error.TestNAError("This version of libvirt does not support " "the snapshot-create-as test") vm_name = params.get("main_vm") status_error = params.get("status_error", "no") options = params.get("snap_createas_opts") multi_num = params.get("multi_num", "1") diskspec_num = params.get("diskspec_num", "1") bad_disk = params.get("bad_disk") reuse_external = "yes" == params.get("reuse_external", "no") start_ga = params.get("start_ga", "yes") domain_state = params.get("domain_state") memspec_opts = params.get("memspec_opts") config_format = "yes" == params.get("config_format", "no") snapshot_image_format = params.get("snapshot_image_format") diskspec_opts = params.get("diskspec_opts") create_autodestroy = 'yes' == params.get("create_autodestroy", "no") unix_channel = "yes" == params.get("unix_channel", "yes") dac_denial = "yes" == params.get("dac_denial", "no") check_json_no_savevm = "yes" == params.get("check_json_no_savevm", "no") uri = params.get("virsh_uri") usr = params.get('unprivileged_user') if usr: if usr.count('EXAMPLE'): usr = '******' if not libvirt_version.version_compare(1, 1, 1): if params.get('setup_libvirt_polkit') == 'yes': raise error.TestNAError("API acl test not supported in current" + " libvirt version.") opt_names = locals() if memspec_opts is not None: mem_options = compose_disk_options(test, params, memspec_opts) # if the parameters have the disk without "file=" then we only need to # add testdir for it. if mem_options is None: mem_options = os.path.join(test.tmpdir, memspec_opts) options += " --memspec " + mem_options tag_diskspec = 0 dnum = int(diskspec_num) if diskspec_opts is not None: tag_diskspec = 1 opt_names['diskopts_1'] = diskspec_opts # diskspec_opts[n] is used in cfg when more than 1 --diskspec is used if dnum > 1: tag_diskspec = 1 for i in range(1, dnum + 1): opt_names["diskopts_%s" % i] = params.get("diskspec_opts%s" % i) if tag_diskspec == 1: for i in range(1, dnum + 1): disk_options = compose_disk_options(test, params, opt_names["diskopts_%s" % i]) options += " --diskspec " + disk_options logging.debug("options are %s", options) vm = env.get_vm(vm_name) option_dict = {} option_dict = utils_misc.valued_option_dict(options, r' --(?!-)') logging.debug("option_dict is %s", option_dict) # A backup of original vm vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) logging.debug("original xml is %s", vmxml_backup) # Generate empty image for negative test if bad_disk is not None: bad_disk = os.path.join(test.tmpdir, bad_disk) os.open(bad_disk, os.O_RDWR | os.O_CREAT) # Generate external disk if reuse_external: disk_path = '' for i in range(dnum): external_disk = "external_disk%s" % i if params.get(external_disk): disk_path = os.path.join(test.tmpdir, params.get(external_disk)) utils.run("qemu-img create -f qcow2 %s 1G" % disk_path) # Only chmod of the last external disk for negative case if dac_denial: utils.run("chmod 500 %s" % disk_path) qemu_conf = None libvirtd_conf = None libvirtd_log_path = None libvirtd = utils_libvirtd.Libvirtd() try: # Config "snapshot_image_format" option in qemu.conf if config_format: qemu_conf = utils_config.LibvirtQemuConfig() qemu_conf.snapshot_image_format = snapshot_image_format logging.debug("the qemu config file content is:\n %s" % qemu_conf) libvirtd.restart() if check_json_no_savevm: libvirtd_conf = utils_config.LibvirtdConfig() libvirtd_conf["log_level"] = '1' libvirtd_conf["log_filters"] = '"1:json 3:remote 4:event"' libvirtd_log_path = os.path.join(test.tmpdir, "libvirtd.log") libvirtd_conf["log_outputs"] = '"1:file:%s"' % libvirtd_log_path logging.debug("the libvirtd config file content is:\n %s" % libvirtd_conf) libvirtd.restart() # Start qemu-ga on guest if have --quiesce if unix_channel and options.find("quiesce") >= 0: if vm.is_alive(): vm.destroy() virt_xml_obj = libvirt_xml.VMXML(virsh_instance=virsh) virt_xml_obj.set_agent_channel(vm_name) vm.start() session = vm.wait_for_login() # Check if qemu-ga already started automatically cmd = "rpm -q qemu-guest-agent || yum install -y qemu-guest-agent" stat_install = session.cmd_status(cmd, 300) if stat_install != 0: raise error.TestNAError("Fail to install qemu-guest-agent, " "make sure that you have usable repo " "in guest") # Check if qemu-ga already started stat_ps = session.cmd_status("ps aux |grep [q]emu-ga") if stat_ps != 0: if start_ga == "yes": session.cmd("qemu-ga -d") # Check if the qemu-ga really started stat_ps = session.cmd_status("ps aux |grep [q]emu-ga") if stat_ps != 0: raise error.TestNAError("Fail to run qemu-ga in guest") else: if start_ga == "no": # The qemu-ga could be running and should be killed session.cmd("kill -9 `pidof qemu-ga`") # Check if the qemu-ga get killed stat_ps = session.cmd_status("ps aux |grep [q]emu-ga") if not stat_ps: # As managed by systemd and set as autostart, qemu-ga # could be restarted, so use systemctl to stop it. session.cmd("systemctl stop qemu-guest-agent") stat_ps = session.cmd_status("ps aux |grep [q]emu-ga") if not stat_ps: raise error.TestNAError("Fail to stop agent in " "guest") if domain_state == "paused": virsh.suspend(vm_name) # Record the previous snapshot-list snaps_before = virsh.snapshot_list(vm_name) # Attach disk before create snapshot if not print xml and multi disks # specified in cfg if dnum > 1 and "--print-xml" not in options: for i in range(1, dnum): disk_path = os.path.join(test.tmpdir, 'disk%s.qcow2' % i) utils.run("qemu-img create -f qcow2 %s 200M" % disk_path) virsh.attach_disk(vm_name, disk_path, 'vd%s' % list(string.lowercase)[i], debug=True) # Run virsh command # May create several snapshots, according to configuration for count in range(int(multi_num)): if create_autodestroy: # Run virsh command in interactive mode vmxml_backup.undefine() vp = virsh.VirshPersistent() vp.create(vmxml_backup['xml'], '--autodestroy') cmd_result = vp.snapshot_create_as(vm_name, options, ignore_status=True, debug=True) vp.close_session() vmxml_backup.define() else: cmd_result = virsh.snapshot_create_as(vm_name, options, unprivileged_user=usr, uri=uri, ignore_status=True, debug=True) output = cmd_result.stdout.strip() status = cmd_result.exit_status # check status_error if status_error == "yes": if status == 0: raise error.TestFail("Run successfully with wrong command!") else: # Check memspec file should be removed if failed if (options.find("memspec") >= 0 and options.find("atomic") >= 0): if os.path.isfile(option_dict['memspec']): os.remove(option_dict['memspec']) raise error.TestFail("Run failed but file %s exist" % option_dict['memspec']) else: logging.info("Run failed as expected and memspec" " file already been removed") # Check domain xml is not updated if reuse external fail elif reuse_external and dac_denial: output = virsh.dumpxml(vm_name).stdout.strip() if "reuse_external" in output: raise error.TestFail("Domain xml should not be " "updated with snapshot image") else: logging.info("Run failed as expected") elif status_error == "no": if status != 0: raise error.TestFail("Run failed with right command: %s" % output) else: # Check the special options snaps_list = virsh.snapshot_list(vm_name) logging.debug("snaps_list is %s", snaps_list) check_snapslist(vm_name, options, option_dict, output, snaps_before, snaps_list) # For cover bug 872292 if check_json_no_savevm: pattern = "The command savevm has not been found" with open(libvirtd_log_path) as f: for line in f: if pattern in line and "error" in line: raise error.TestFail("'%s' was found: %s" % (pattern, line)) finally: # recover domain xml xml_recover(vmxml_backup) path = "/var/lib/libvirt/qemu/snapshot/" + vm_name if os.path.isfile(path): raise error.TestFail("Still can find snapshot metadata") # rm bad disks if bad_disk is not None: os.remove(bad_disk) # rm attach disks and reuse external disks if dnum > 1 and "--print-xml" not in options: for i in range(dnum): disk_path = os.path.join(test.tmpdir, 'disk%s.qcow2' % i) if os.path.exists(disk_path): os.unlink(disk_path) external_disk = "external_disk%s" % i disk_path = os.path.join(test.tmpdir, params.get(external_disk)) if os.path.exists(disk_path): os.unlink(disk_path) # restore config if config_format and qemu_conf: qemu_conf.restore() if libvirtd_conf: libvirtd_conf.restore() if libvirtd_conf or (config_format and qemu_conf): libvirtd.restart() if libvirtd_log_path and os.path.exists(libvirtd_log_path): os.unlink(libvirtd_log_path)
def run(test, params, env): """ Test the command virsh memtune (1) To get the current memtune parameters (2) Change the parameter values (3) Check the memtune query updated with the values (4) Check whether the mounted cgroup path gets the updated value (5) Login to guest and use the memory greater that the assigned value and check whether it kills the vm. (6) TODO:Check more values and robust scenarios. """ def check_limit(path, expected_value, limit_name): """ Matches the expected and actual output (1) Match the output of the virsh memtune (2) Match the output of the respective cgroup fs value :params: path: memory controller path for a domain :params: expected_value: the expected limit value :params: limit_name: the limit to be checked hard_limit/soft_limit/swap_hard_limit :return: True or False based on the checks """ status_value = True # Check 1 actual_value = virsh.memtune_get(domname, limit_name) if actual_value == -1: raise error.TestFail("the key %s not found in the " "virsh memtune output" % limit_name) if actual_value != int(expected_value): status_value = False logging.error("%s virsh output:\n\tExpected value:%d" "\n\tActual value: " "%d", limit_name, int(expected_value), int(actual_value)) # Check 2 if limit_name == 'hard_limit': cg_file_name = '%s/memory.limit_in_bytes' % path elif limit_name == 'soft_limit': cg_file_name = '%s/memory.soft_limit_in_bytes' % path elif limit_name == 'swap_hard_limit': cg_file_name = '%s/memory.memsw.limit_in_bytes' % path cg_file = None try: try: cg_file = open(cg_file_name) output = cg_file.read() value = int(output) / 1024 if int(expected_value) != int(value): status_value = False logging.error("%s cgroup fs:\n\tExpected Value: %d" "\n\tActual Value: " "%d", limit_name, int(expected_value), int(value)) except IOError: status_value = False logging.error("Error while reading:\n%s", cg_file_name) finally: if cg_file is not None: cg_file.close() return status_value # Get the vm name, pid of vm and check for alive domname = params.get("main_vm") vm = env.get_vm(params["main_vm"]) vm.verify_alive() pid = vm.get_pid() logging.info("Verify valid cgroup path for VM pid: %s", pid) # Resolve the memory cgroup path for a domain path = utils_cgroup.resolve_task_cgroup_path(int(pid), "memory") # Set the initial memory starting value for test case # By default set 1GB less than the total memory # In case of total memory is less than 1GB set to 256MB # visit subtests.cfg to change these default values Memtotal = utils_memory.read_from_meminfo('MemTotal') base_mem = params.get("memtune_base_mem") if int(Memtotal) < int(base_mem): Mem = int(params.get("memtune_min_mem")) else: Mem = int(Memtotal) - int(base_mem) # Initialize error counter error_counter = 0 # Check for memtune command is available in the libvirt version under test if not virsh.has_help_command("memtune"): raise error.TestNAError( "Memtune not available in this libvirt version") # Run test case with 100kB increase in memory value for each iteration while (Mem < Memtotal): if virsh.has_command_help_match("memtune", "hard-limit"): hard_mem = Mem - int(params.get("memtune_hard_base_mem")) options = " --hard-limit %d --live" % hard_mem virsh.memtune_set(domname, options) if not check_limit(path, hard_mem, "hard_limit"): error_counter += 1 else: raise error.TestNAError("harlimit option not available in memtune " "cmd in this libvirt version") if virsh.has_command_help_match("memtune", "soft-limit"): soft_mem = Mem - int(params.get("memtune_soft_base_mem")) options = " --soft-limit %d --live" % soft_mem virsh.memtune_set(domname, options) if not check_limit(path, soft_mem, "soft_limit"): error_counter += 1 else: raise error.TestNAError("softlimit option not available in memtune " "cmd in this libvirt version") if virsh.has_command_help_match("memtune", "swap-hard-limit"): swaphard = Mem options = " --swap-hard-limit %d --live" % swaphard virsh.memtune_set(domname, options) if not check_limit(path, swaphard, "swap_hard_limit"): error_counter += 1 else: raise error.TestNAError("swaplimit option not available in memtune " "cmd in this libvirt version") Mem += int(params.get("memtune_hard_base_mem")) # Raise error based on error_counter if error_counter > 0: raise error.TestFail( "Test failed, consult the previous error messages")
def run(test, params, env): """ Test snapshot-create-as command Make sure that the clean repo can be used because qemu-guest-agent need to be installed in guest The command create a snapshot (disk and RAM) from arguments which including the following point * virsh snapshot-create-as --print-xml --diskspec --name --description * virsh snapshot-create-as --print-xml with multi --diskspec * virsh snapshot-create-as --print-xml --memspec * virsh snapshot-create-as --description * virsh snapshot-create-as --no-metadata * virsh snapshot-create-as --no-metadata --print-xml (negative test) * virsh snapshot-create-as --atomic --disk-only * virsh snapshot-create-as --quiesce --disk-only (positive and negative) * virsh snapshot-create-as --reuse-external * virsh snapshot-create-as --disk-only --diskspec * virsh snapshot-create-as --memspec --reuse-external --atomic(negative) * virsh snapshot-create-as --disk-only and --memspec (negative) * Create multi snapshots with snapshot-create-as * Create snapshot with name a--a a--a--snap1 """ if not virsh.has_help_command('snapshot-create-as'): raise error.TestNAError("This version of libvirt does not support " "the snapshot-create-as test") vm_name = params.get("main_vm") status_error = params.get("status_error", "no") options = params.get("snap_createas_opts") multi_num = params.get("multi_num", "1") diskspec_num = params.get("diskspec_num", "1") bad_disk = params.get("bad_disk") external_disk = params.get("external_disk") start_ga = params.get("start_ga", "yes") domain_state = params.get("domain_state") memspec_opts = params.get("memspec_opts") diskspec_opts = params.get("diskspec_opts") create_autodestroy = 'yes' == params.get("create_autodestroy", "no") uri = params.get("virsh_uri") usr = params.get('unprivileged_user') if usr: if usr.count('EXAMPLE'): usr = '******' if not libvirt_version.version_compare(1, 1, 1): if params.get('setup_libvirt_polkit') == 'yes': raise error.TestNAError("API acl test not supported in current" + " libvirt version.") opt_names = locals() if memspec_opts is not None: mem_options = compose_disk_options(test, params, memspec_opts) # if the parameters have the disk without "file=" then we only need to # add testdir for it. if mem_options is None: mem_options = os.path.join(test.virtdir, memspec_opts) options += " --memspec " + mem_options tag_diskspec = 0 dnum = int(diskspec_num) if diskspec_opts is not None: tag_diskspec = 1 opt_names['diskopts_1'] = diskspec_opts # diskspec_opts[n] is used in cfg when more than 1 --diskspec is used if dnum > 1: tag_diskspec = 1 for i in range(1, dnum + 1): opt_names["diskopts_%s" % i] = params.get("diskspec_opts%s" % i) if tag_diskspec == 1: for i in range(1, dnum + 1): disk_options = compose_disk_options(test, params, opt_names["diskopts_%s" % i]) options += " --diskspec " + disk_options logging.debug("options are %s", options) vm = env.get_vm(vm_name) option_dict = {} option_dict = utils_misc.valued_option_dict(options, r' --(?!-)') logging.debug("option_dict is %s", option_dict) # A backup of original vm vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) logging.debug("original xml is %s", vmxml_backup) # Generate empty image for negative test if bad_disk is not None: bad_disk = os.path.join(test.virtdir, bad_disk) os.open(bad_disk, os.O_RDWR | os.O_CREAT) # Generate external disk if external_disk is not None: external_disk = os.path.join(test.virtdir, external_disk) commands.getoutput("qemu-img create -f qcow2 %s 1G" % external_disk) try: # Start qemu-ga on guest if have --quiesce if options.find("quiesce") >= 0: if vm.is_alive(): vm.destroy() virt_xml_obj = libvirt_xml.VMXML(virsh_instance=virsh) virt_xml_obj.set_agent_channel(vm_name) vm.start() session = vm.wait_for_login() # Check if qemu-ga already started automatically cmd = "rpm -q qemu-guest-agent || yum install -y qemu-guest-agent" stat_install = session.cmd_status(cmd, 300) if stat_install != 0: raise error.TestNAError("Fail to install qemu-guest-agent, " "make sure that you have usable repo " "in guest") # Check if qemu-ga already started stat_ps = session.cmd_status("ps aux |grep [q]emu-ga") if stat_ps != 0: if start_ga == "yes": session.cmd("qemu-ga -d") # Check if the qemu-ga really started stat_ps = session.cmd_status("ps aux |grep [q]emu-ga") if stat_ps != 0: raise error.TestNAError("Fail to run qemu-ga in guest") else: if start_ga == "no": # The qemu-ga could be running and should be killed session.cmd("kill -9 `pidof qemu-ga`") # Check if the qemu-ga get killed stat_ps = session.cmd_status("ps aux |grep [q]emu-ga") if not stat_ps: # As managed by systemd and set as autostart, qemu-ga # could be restarted, so use systemctl to stop it. session.cmd("systemctl stop qemu-guest-agent") stat_ps = session.cmd_status("ps aux |grep [q]emu-ga") if not stat_ps: raise error.TestNAError("Fail to stop agent in " "guest") if domain_state == "paused": virsh.suspend(vm_name) # Record the previous snapshot-list snaps_before = virsh.snapshot_list(vm_name) # Run virsh command # May create several snapshots, according to configuration for count in range(int(multi_num)): if create_autodestroy: # Run virsh command in interactive mode vmxml_backup.undefine() vp = virsh.VirshPersistent() vp.create(vmxml_backup['xml'], '--autodestroy') cmd_result = vp.snapshot_create_as(vm_name, options, ignore_status=True, debug=True) vp.close_session() vmxml_backup.define() else: cmd_result = virsh.snapshot_create_as(vm_name, options, unprivileged_user=usr, uri=uri, ignore_status=True, debug=True) output = cmd_result.stdout.strip() status = cmd_result.exit_status # check status_error if status_error == "yes": if status == 0: raise error.TestFail( "Run successfully with wrong command!") else: # Check memspec file should be removed if failed if (options.find("memspec") >= 0 and options.find("atomic") >= 0): if os.path.isfile(option_dict['memspec']): os.remove(option_dict['memspec']) raise error.TestFail( "Run failed but file %s exist" % option_dict['memspec']) else: logging.info( "Run failed as expected and memspec file" " already beed removed") else: logging.info("Run failed as expected") elif status_error == "no": if status != 0: raise error.TestFail("Run failed with right command: %s" % output) else: # Check the special options snaps_list = virsh.snapshot_list(vm_name) logging.debug("snaps_list is %s", snaps_list) check_snapslist(vm_name, options, option_dict, output, snaps_before, snaps_list) finally: # Environment clean if options.find("quiesce") >= 0 and start_ga == "yes": session.cmd("rpm -e qemu-guest-agent") # recover domain xml xml_recover(vmxml_backup) path = "/var/lib/libvirt/qemu/snapshot/" + vm_name if os.path.isfile(path): raise error.TestFail("Still can find snapshot metadata") # rm bad disks if bad_disk is not None: os.remove(bad_disk)
def run(test, params, env): """ Test send-key command, include all types of codeset and sysrq For normal sendkey test, we create a file to check the command execute by send-key. For sysrq test, check the /var/log/messages and guest status """ if not virsh.has_help_command('send-key'): raise error.TestNAError("This version of libvirt does not support " "the send-key test") vm_name = params.get("main_vm", "avocado-vt-vm1") status_error = ("yes" == params.get("status_error", "no")) options = params.get("sendkey_options", "") sysrq_test = ("yes" == params.get("sendkey_sysrq", "no")) sleep_time = int(params.get("sendkey_sleeptime", 2)) readonly = params.get("readonly", False) username = params.get("username") password = params.get("password") create_file = params.get("create_file_name") uri = params.get("virsh_uri") unprivileged_user = params.get('unprivileged_user') if unprivileged_user: if unprivileged_user.count('EXAMPLE'): unprivileged_user = '******' if not libvirt_version.version_compare(1, 1, 1): if params.get('setup_libvirt_polkit') == 'yes': raise error.TestNAError("API acl test not supported in current" " libvirt version.") def send_line(send_str): """ send string to guest with send-key and end with Enter """ for send_ch in list(send_str): virsh.sendkey(vm_name, "KEY_%s" % send_ch.upper(), ignore_status=False) virsh.sendkey(vm_name, "KEY_ENTER", ignore_status=False) vm = env.get_vm(vm_name) session = vm.wait_for_login() if sysrq_test: # Is 'rsyslog' installed on guest? It'll be what writes out # to /var/log/messages rpm_stat = session.cmd_status("rpm -q rsyslog") if rpm_stat != 0: logging.debug("rsyslog not found in guest installing") stat_install = session.cmd_status("yum install -y rsyslog", 300) if stat_install != 0: raise error.TestFail("Fail to install rsyslog, make" "sure that you have usable repo in guest") # clear messages, restart rsyslog, and make sure it's running session.cmd("echo '' > /var/log/messages") session.cmd("service rsyslog restart") ps_stat = session.cmd_status("ps aux |grep rsyslog") if ps_stat != 0: raise error.TestFail("rsyslog is not running in guest") # enable sysrq session.cmd("echo 1 > /proc/sys/kernel/sysrq") # make sure the environment is clear if create_file is not None: session.cmd("rm -rf %s" % create_file) try: # wait for tty1 started tty1_stat = "ps aux|grep tty[1]" timeout = 60 while timeout >= 0 and \ session.get_command_status(tty1_stat) != 0: time.sleep(1) timeout = timeout - 1 if timeout < 0: raise error.TestFail("Can not wait for tty1 started in 60s") # send user and passwd to guest to login send_line(username) time.sleep(2) send_line(password) time.sleep(2) output = virsh.sendkey(vm_name, options, readonly=readonly, unprivileged_user=unprivileged_user, uri=uri) time.sleep(sleep_time) if output.exit_status != 0: if status_error: logging.info( "Failed to sendkey to guest as expected, Error:" "%s.", output.stderr) return else: raise error.TestFail("Failed to send key to guest, Error:%s." % output.stderr) elif status_error: raise error.TestFail("Expect fail, but succeed indeed.") if create_file is not None: # check if created file exist cmd_ls = "ls %s" % create_file sec_status, sec_output = session.get_command_status_output(cmd_ls) if sec_status == 0: logging.info("Succeed to create file with send key") else: raise error.TestFail("Fail to create file with send key, " "Error:%s" % sec_output) elif sysrq_test: # check /var/log/message info according to different key # Since there's no guarantee when messages will be written # we'll do a check and wait loop for up to 60 seconds timeout = 60 while timeout >= 0: if "KEY_H" in options: get_status = session.cmd_status("cat /var/log/messages|" "grep 'SysRq.*HELP'") elif "KEY_M" in options: get_status = session.cmd_status( "cat /var/log/messages|" "grep 'SysRq.*Show Memory'") elif "KEY_T" in options: get_status = session.cmd_status("cat /var/log/messages|" "grep 'SysRq.*Show State'") elif "KEY_B" in options: client_session = vm.wait_for_login() result = virsh.domstate(vm_name, '--reason', ignore_status=True) output = result.stdout.strip() logging.debug("The guest state: %s", output) if not output.count("booted"): get_status = 1 else: get_status = 0 client_session.close() if get_status == 0: timeout = -1 else: session.cmd( "echo \"virsh sendkey waiting\" >> /var/log/messages") time.sleep(1) timeout = timeout - 1 if get_status != 0: raise error.TestFail("SysRq does not take effect in guest, " "options is %s" % options) else: logging.info("Succeed to send SysRq command") else: raise error.TestFail("Test cfg file invalid: either sysrq_params " "or create_file_name must be defined") finally: if create_file is not None: session.cmd("rm -rf %s" % create_file) session.close()
def run(test, params, env): """ Test virsh domblkerror in 2 types error 1. unspecified error 2. no space """ if not virsh.has_help_command('domblkerror'): raise error.TestNAError("This version of libvirt does not support " "domblkerror test") vm_name = params.get("main_vm", "virt-tests-vm1") error_type = params.get("domblkerror_error_type") timeout = params.get("domblkerror_timeout", 240) mnt_dir = params.get("domblkerror_mnt_dir", "/home/test") tmp_file = params.get("domblkerror_tmp_file", "/tmp/fdisk-cmd") export_file = params.get("nfs_export_file", "/etc/exports") img_name = params.get("domblkerror_img_name", "libvirt-disk") img_size = params.get("domblkerror_img_size") target_dev = params.get("domblkerror_target_dev", "vdb") pool_name = params.get("domblkerror_pool_name", "fs_pool") vol_name = params.get("domblkerror_vol_name", "vol1") vm = env.get_vm(vm_name) # backup /etc/exports shutil.copyfile(export_file, "%s.bak" % export_file) selinux_bak = "" # backup xml vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) try: # Gerenate tmp dir tmp_dir = data_dir.get_tmp_dir() img_dir = os.path.join(tmp_dir, 'images') if not os.path.exists(img_dir): os.mkdir(img_dir) # Generate attached disk utils.run("qemu-img create %s %s" % (os.path.join(img_dir, img_name), img_size)) # Get unspecified error if error_type == "unspecified error": # In this situation, guest will attach a disk on nfs, stop nfs # service will cause guest paused and get unspecified error nfs_dir = os.path.join(tmp_dir, 'mnt') if not os.path.exists(nfs_dir): os.mkdir(nfs_dir) mount_opt = "rw,no_root_squash,async" res = utils_test.libvirt.setup_or_cleanup_nfs( is_setup=True, mount_dir=nfs_dir, is_mount=False, export_options=mount_opt, export_dir=img_dir) selinux_bak = res["selinux_status_bak"] utils.run("mount -o nolock,soft,timeo=1,retrans=1,retry=0 " "127.0.0.1:%s %s" % (img_dir, nfs_dir)) img_path = os.path.join(nfs_dir, img_name) nfs_service = Factory.create_service("nfs") elif error_type == "no space": # Steps to generate no space block error: # 1. Prepare a iscsi disk and build fs pool with it # 2. Create vol with larger capacity and 0 allocation # 3. Attach this disk in guest # 4. In guest, create large image in the vol, which may cause # guest paused pool_target = os.path.join(tmp_dir, pool_name) _pool_vol = utils_test.libvirt.PoolVolumeTest(test, params) _pool_vol.pre_pool(pool_name, "fs", pool_target, img_name, image_size=img_size) _pool_vol.pre_vol(vol_name, "raw", "100M", "0", pool_name) img_path = os.path.join(pool_target, vol_name) # Generate disk xml # Guest will attach a disk with cache=none and error_policy=stop img_disk = Disk(type_name="file") img_disk.device = "disk" img_disk.source = img_disk.new_disk_source( **{'attrs': {'file': img_path}}) img_disk.driver = {'name': "qemu", 'type': "raw", 'cache': "none", 'error_policy': "stop"} img_disk.target = {'dev': target_dev, 'bus': "virtio"} logging.debug("disk xml is %s", img_disk.xml) # Start guest and get session if not vm.is_alive(): vm.start() session = vm.wait_for_login() # Get disk list before operation get_disks_cmd = "fdisk -l|grep '^Disk /dev'|cut -d: -f1|cut -d' ' -f2" bef_list = session.cmd_output(get_disks_cmd).split("\n") # Attach disk to guest ret = virsh.attach_device(domain_opt=vm_name, file_opt=img_disk.xml) if ret.exit_status != 0: raise error.TestFail("Fail to attach device %s" % ret.stderr) time.sleep(2) logging.debug("domain xml is %s", virsh.dumpxml(vm_name)) # get disk list after attach aft_list = session.cmd_output(get_disks_cmd).split("\n") # Find new disk after attach new_disk = "".join(list(set(bef_list) ^ set(aft_list))) logging.debug("new disk is %s", new_disk) def create_large_image(): """ Create large image in guest """ # create partition and file system session.cmd("echo 'n\np\n\n\n\nw\n' > %s" % tmp_file) # mount disk and write file in it try: # The following steps may cause guest paused before it return session.cmd("fdisk %s < %s" % (new_disk, tmp_file)) session.cmd("mkfs.ext3 %s1" % new_disk) session.cmd("mkdir -p %s && mount %s1 %s" % (mnt_dir, new_disk, mnt_dir)) session.cmd("dd if=/dev/zero of=%s/big_file bs=1024 " "count=51200 && sync" % mnt_dir) except Exception, err: logging.debug("Expected Fail %s" % err) session.close() create_large_image() if error_type == "unspecified error": # umount nfs to trigger error after create large image nfs_service.stop() logging.debug("nfs status is %s", nfs_service.status()) # wait and check the guest status with timeout def _check_state(): """ Check domain state """ return (vm.state() == "paused") if not utils_misc.wait_for(_check_state, timeout): raise error.TestFail("Guest does not paused, it is %s now" % vm.state()) else: logging.info("Now domain state changed to paused status") output = virsh.domblkerror(vm_name) if output.exit_status == 0: expect_result = "%s: %s" % (img_disk.target['dev'], error_type) if output.stdout.strip() == expect_result: logging.info("Get expect result: %s", expect_result) else: raise error.TestFail("Failed to get expect result, get %s" % output.stdout.strip()) else: raise error.TestFail("Fail to get domblkerror info:%s" % output.stderr)
def run(test, params, env): """ Test virsh domdisplay command, return the graphic url This test covered vnc and spice type, also readonly and readwrite mode If have --include-passwd option, also need to check passwd list in result """ if not virsh.has_help_command('domdisplay'): raise error.TestNAError("This version of libvirt doesn't support " "domdisplay test") vm_name = params.get("main_vm", "virt-tests-vm1") status_error = ("yes" == params.get("status_error", "no")) options = params.get("domdisplay_options", "") graphic = params.get("domdisplay_graphic", "vnc") readonly = ("yes" == params.get("readonly", "no")) passwd = params.get("domdisplay_passwd") is_ssl = ("yes" == params.get("domdisplay_ssl", "no")) is_domid = ("yes" == params.get("domdisplay_domid", "no")) is_domuuid = ("yes" == params.get("domdisplay_domuuid", "no")) qemu_conf = params.get("qemu_conf_file", "/etc/libvirt/qemu.conf") # Do xml backup for final recovery vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) tmp_file = os.path.join(test.tmpdir, "qemu.conf.bk") def prepare_ssl_env(): """ Do prepare for ssl spice connection """ # modify qemu.conf f_obj = open(qemu_conf, "r") cont = f_obj.read() # remove the existing setting left_cont = re.sub(r'\s*spice_tls\s*=.*', '', cont) left_cont = re.sub(r'\s*spice_tls_x509_cert_dir\s*=.*', '', left_cont) # write back to origin file with cut left content f_obj = open(qemu_conf, "w") f_obj.write(left_cont) f_obj.write("spice_tls = 1\n") f_obj.write("spice_tls_x509_cert_dir = \"/etc/pki/libvirt-spice\"") f_obj.close() # make modification effect utils_libvirtd.libvirtd_restart() # Generate CA cert utils_misc.create_x509_dir("/etc/pki/libvirt-spice", "/C=IL/L=Raanana/O=Red Hat/CN=my CA", "/C=IL/L=Raanana/O=Red Hat/CN=my server", passwd) try: graphic_count = len(vmxml_backup.get_graphics_devices()) if is_ssl: # Do backup for qemu.conf in tmp_file shutil.copyfile(qemu_conf, tmp_file) prepare_ssl_env() if graphic_count: Graphics.del_graphic(vm_name) Graphics.add_graphic(vm_name, passwd, "spice", True) else: if not graphic_count: Graphics.add_graphic(vm_name, passwd, graphic) # Only change graphic type and passwd Graphics.change_graphic_type_passwd(vm_name, graphic, passwd) vm = env.get_vm(vm_name) if not vm.is_alive(): vm.start() dom_id = virsh.domid(vm_name).stdout.strip() dom_uuid = virsh.domuuid(vm_name).stdout.strip() if is_domid: vm_name = dom_id if is_domuuid: vm_name = dom_uuid # Do test result = virsh.domdisplay(vm_name, options, readonly=readonly, debug=True) logging.debug("result is %s", result) if result.exit_status: if not status_error: raise error.TestFail("Fail to get domain display info. Error:" "%s." % result.stderr.strip()) else: logging.info("Get domain display info failed as expected. " "Error:%s.", result.stderr.strip()) return elif status_error: raise error.TestFail("Expect fail, but succeed indeed!") output = result.stdout.strip() # Different result depends on the domain xml listen address if output.find("localhost:") >= 0: expect_addr = "localhost" else: expect_addr = "127.0.0.1" # Get active domain xml info vmxml_act = vm_xml.VMXML.new_from_dumpxml(vm_name, "--security-info") logging.debug("xml is %s", vmxml_act.get_xmltreefile()) graphic_act = vmxml_act.devices.by_device_tag('graphics')[0] port = graphic_act.port # Do judgement for result if graphic == "vnc": expect = "vnc://%s:%s" % (expect_addr, str(int(port)-5900)) elif graphic == "spice" and is_ssl: tlsport = graphic_act.tlsPort expect = "spice://%s:%s?tls-port=%s" % \ (expect_addr, port, tlsport) elif graphic == "spice": expect = "spice://%s:%s" % (expect_addr, port) if options != "" and passwd is not None: # have --include-passwd and have passwd in xml if graphic == "vnc": expect = "vnc://:%s@%s:%s" % \ (passwd, expect_addr, str(int(port)-5900)) elif graphic == "spice" and is_ssl: expect = expect + "&password="******"spice": expect = expect + "?password="******"Get correct display:%s", output) else: raise error.TestFail("Expect %s, but get %s" % (expect, output)) finally: # Domain xml recovery vmxml_backup.sync() if is_ssl: # qemu.conf recovery shutil.move(tmp_file, qemu_conf) utils_libvirtd.libvirtd_restart()
def run(test, params, env): """ Test virsh cpu-stats command. The command can display domain per-CPU and total statistics. 1. Call virsh cpu-stats [domain] 2. Call virsh cpu-stats [domain] with valid options 3. Call virsh cpu-stats [domain] with invalid options """ def get_cpuacct_info(suffix): """ Get the CPU accounting info within the vm :param suffix: str, suffix of the CPU accounting.(stat/usage/usage_percpu) :return: list, the list of CPU accounting info """ if 'cg_obj' not in locals(): return # On cgroup v2 use cpu.stat as a substitute if cg_obj.is_cgroup_v2_enabled(): cg_path = cg_obj.get_cgroup_path("cpu") para = ('cpu.%s' % suffix) else: cg_path = cg_obj.get_cgroup_path("cpuacct") para = ('cpuacct.%s' % suffix) # We only need the info in file which "emulator" is not in path if os.path.basename(cg_path) == "emulator": cg_path = os.path.dirname(cg_path) usage_file = os.path.join(cg_path, para) with open(usage_file, 'r') as f: cpuacct_info = f.read().strip().split() logging.debug("cpuacct info %s", cpuacct_info) return cpuacct_info def check_user_and_system_time(total_list): user_time = float(total_list[4]) system_time = float(total_list[7]) # Check libvirt user and system time between pre and next cgroup time # Unit conversion (Unit: second) # Default time unit is microseconds on cgroup v2 while 1/100 second on v1 if cg_obj.is_cgroup_v2_enabled(): pre_user_time = float(cpuacct_res_pre[3]) / 1000000 pre_sys_time = float(cpuacct_res_pre[5]) / 1000000 next_user_time = float(cpuacct_res_next[3]) / 1000000 next_sys_time = float(cpuacct_res_next[5]) / 1000000 else: pre_user_time = float(cpuacct_res_pre[1]) / 100 pre_sys_time = float(cpuacct_res_pre[3]) / 100 next_user_time = float(cpuacct_res_next[1]) / 100 next_sys_time = float(cpuacct_res_next[3]) / 100 # check user_time if next_user_time >= user_time >= pre_user_time: logging.debug("Got the expected user_time: %s", user_time) else: test.fail("Got unexpected user_time: %s, " % user_time + "should between pre_user_time:%s " % pre_user_time + "and next_user_time:%s" % next_user_time) # check system_time if next_sys_time >= system_time >= pre_sys_time: logging.debug("Got the expected system_time: %s", system_time) else: test.fail("Got unexpected system_time: %s, " % system_time + "should between pre_sys_time:%s " % pre_sys_time + "and next_sys_time:%s" % next_sys_time) if not virsh.has_help_command('cpu-stats'): test.cancel("This version of libvirt does not support " "the cpu-stats test") vm_name = params.get("main_vm", "vm1") vm_ref = params.get("cpu_stats_vm_ref") status_error = params.get("status_error", "no") options = params.get("cpu_stats_options") error_msg = params.get("error_msg", "") logging.debug("options are %s", options) if vm_ref == "name": vm_ref = vm_name vm = env.get_vm(vm_ref) if vm and vm.get_pid(): cg_obj = libvirt_cgroup.CgroupTest(vm.get_pid()) # get host cpus num cpus = cpu.online_cpus_count() logging.debug("host online cpu num is %s", cpus) # get options and put into a dict get_total = re.search('total', options) get_start = re.search('start', options) get_count = re.search('count', options) # command without options get_noopt = 0 if not get_total and not get_start and not get_count: get_noopt = 1 # command with only --total option get_totalonly = 0 if not get_start and not get_count and get_total: get_totalonly = 1 option_dict = {} if options.strip(): option_list = options.split('--') logging.debug("option_list is %s", option_list) for match in option_list[1:]: if get_start or get_count: option_dict[match.split(' ')[0]] = match.split(' ')[1] # check if cpu is enough,if not cancel the test if (status_error == "no"): cpu_start = int(option_dict.get("start", "0")) if cpu_start == 32: cpus = cpu.total_cpus_count() logging.debug("Host total cpu num: %s", cpus) if (cpu_start >= cpus): test.cancel("Host cpus are not enough") # get CPU accounting info twice to compare with user_time and system_time cpuacct_res_pre = get_cpuacct_info('stat') # Run virsh command cmd_result = virsh.cpu_stats(vm_ref, options, ignore_status=True, debug=True) output = cmd_result.stdout.strip() status = cmd_result.exit_status cpuacct_res_next = get_cpuacct_info('stat') # check status_error if status_error == "yes": if status == 0: test.fail("Run successfully with wrong command! Output: {}".format( output)) else: # Check error message is expected if not re.search(error_msg, cmd_result.stderr.strip()): test.fail("Error message is not expected! " "Expected: {} Actual: {}".format( error_msg, cmd_result.stderr.strip())) elif status_error == "no": if status != 0: test.fail("Run failed with right command! Error: {}".format( cmd_result.stderr.strip())) else: # Get cgroup cpu_time if not get_totalonly: cgtime = get_cpuacct_info('usage_percpu') # Cut CPUs from output and format to list if get_total: mt_start = re.search('Total', output).start() else: mt_start = len(output) output_cpus = " ".join(output[:mt_start].split()) cpus_list = re.compile(r'CPU\d+:').split(output_cpus) # conditions that list total time info if get_noopt or get_total: mt_end = re.search('Total', output).end() total_list = output[mt_end + 1:].split() total_time = float(total_list[1]) check_user_and_system_time(total_list) start_num = 0 if get_start: start_num = int(option_dict["start"]) end_num = int(cpus) if get_count: count_num = int(option_dict["count"]) if end_num > start_num + count_num: end_num = start_num + count_num # for only give --total option it only shows "Total" cpu info if get_totalonly: end_num = -1 # find CPU[N] in output and sum the cpu_time and cgroup cpu_time sum_cputime = 0 sum_cgtime = 0 logging.debug("start_num %d, end_num %d", start_num, end_num) for i in range(start_num, end_num): logging.debug("Check CPU" + "%i" % i + " exist") sum_cputime += float(cpus_list[i - start_num + 1].split()[1]) sum_cgtime += float(cgtime[i]) if not re.search('CPU' + "%i" % i, output): test.fail("Fail to find CPU" + "%i" % i + "in " "result") # check cgroup cpu_time > sum of cpu_time if end_num >= 0: logging.debug("Check sum of cgroup cpu_time %d >= cpu_time %d", sum_cgtime, sum_cputime) if sum_cputime > sum_cgtime: test.fail("Check sum of cgroup cpu_time < sum " "of output cpu_time") # check Total cpu_time >= sum of cpu_time when no options if get_noopt: logging.debug( "Check total time %d >= sum of output cpu_time" " %d", total_time, sum_cputime) if total_time < sum_cputime: test.fail("total time < sum of output cpu_time")
def run(test, params, env): """ Test the command virsh memtune 1) To get the current memtune parameters 2) Change the parameter values 3) Check the memtune query updated with the values 4) Check whether the mounted cgroup path gets the updated value 5) Check the output of virsh dumpxml 6) Check vm is alive """ # Check for memtune command is available in the libvirt version under test if not virsh.has_help_command("memtune"): test.cancel("Memtune not available in this libvirt version") # Check if memtune options are supported for option in memtune_types: option = re.sub('_', '-', option) if not virsh.has_command_help_match("memtune", option): test.cancel("%s option not available in memtune " "cmd in this libvirt version" % option) # Get common parameters acceptable_minus = int(utils_memory.getpagesize() - 1) step_mem = params.get("mt_step_mem", "no") == "yes" expect_error = params.get("expect_error", "no") == "yes" restart_libvirtd = params.get("restart_libvirtd", "no") == "yes" set_one_line = params.get("set_in_one_command", "no") == "yes" mt_hard_limit = params.get("mt_hard_limit", None) mt_soft_limit = params.get("mt_soft_limit", None) mt_swap_hard_limit = params.get("mt_swap_hard_limit", None) # if restart_libvirtd is True, set set_one_line is True set_one_line = True if restart_libvirtd else set_one_line # Get the vm name, pid of vm and check for alive vm = env.get_vm(params["main_vm"]) vm.verify_alive() pid = vm.get_pid() # Resolve the memory cgroup path for a domain cgtest = libvirt_cgroup.CgroupTest(pid) path = cgtest.get_cgroup_path("memory") logging.debug("cgroup path is %s", path) global mem_cgroup_info mem_cgroup_info = cgtest.get_cgroup_file_mapping(virsh_cmd='memtune') logging.debug("memtune cgroup info is %s", mem_cgroup_info) # step_mem is used to do step increment limit testing if step_mem: mem_step(params, path, vm, test, acceptable_minus) return if not set_one_line: # Set one type memtune limit in one command if mt_hard_limit: index = 0 mt_limit = mt_hard_limit elif mt_soft_limit: index = 1 mt_limit = mt_soft_limit elif mt_swap_hard_limit: index = 2 mt_limit = mt_swap_hard_limit mt_type = memtune_types[index] mt_cgname = mem_cgroup_info[mt_type] options = " --%s %s --live" % (re.sub('_', '-', mt_type), mt_limit) result = virsh.memtune_set(vm.name, options, debug=True) if expect_error: fail_patts = [params.get("error_info")] libvirt.check_result(result, fail_patts, []) else: # If limit value is negative, means no memtune limit mt_expected = mt_limit if int(mt_limit) > 0 else -1 check_limit(path, mt_expected, mt_type, mt_cgname, vm, test, acceptable_minus) else: # Set 3 limits in one command line mt_limits = [mt_hard_limit, mt_soft_limit, mt_swap_hard_limit] options = " %s --live" % ' '.join(mt_limits) result = virsh.memtune_set(vm.name, options, debug=True) if expect_error: fail_patts = [params.get("error_info")] libvirt.check_result(result, fail_patts, []) else: check_limits(path, mt_limits, vm, test, acceptable_minus) if restart_libvirtd: libvirtd = utils_libvirtd.Libvirtd() libvirtd.restart() if not expect_error: # After libvirtd restared, check memtune values again check_limits(path, mt_limits, vm, test, acceptable_minus)
def run(test, params, env): """ Test the command virsh memtune (1) To get the current memtune parameters (2) Change the parameter values (3) Check the memtune query updated with the values (4) Check whether the mounted cgroup path gets the updated value (5) Login to guest and use the memory greater that the assigned value and check whether it kills the vm. (6) TODO:Check more values and robust scenarios. """ def check_limit(path, expected_value, limit_name): """ Matches the expected and actual output (1) Match the output of the virsh memtune (2) Match the output of the respective cgroup fs value :params: path: memory controller path for a domain :params: expected_value: the expected limit value :params: limit_name: the limit to be checked hard_limit/soft_limit/swap_hard_limit :return: True or False based on the checks """ status_value = True # Check 1 actual_value = virsh.memtune_get(domname, limit_name) if actual_value == -1: test.fail("the key %s not found in the " "virsh memtune output" % limit_name) if actual_value != int(expected_value): status_value = False logging.error( "%s virsh output:\n\tExpected value:%d" "\n\tActual value: " "%d", limit_name, int(expected_value), int(actual_value)) # Check 2 if limit_name == 'hard_limit': cg_file_name = '%s/memory.limit_in_bytes' % path elif limit_name == 'soft_limit': cg_file_name = '%s/memory.soft_limit_in_bytes' % path elif limit_name == 'swap_hard_limit': cg_file_name = '%s/memory.memsw.limit_in_bytes' % path cg_file = None try: with open(cg_file_name) as cg_file: output = cg_file.read() value = int(output) / 1024 if int(expected_value) != int(value): status_value = False logging.error( "%s cgroup fs:\n\tExpected Value: %d" "\n\tActual Value: " "%d", limit_name, int(expected_value), int(value)) except IOError: status_value = False logging.error("Error while reading:\n%s", cg_file_name) return status_value # Get the vm name, pid of vm and check for alive domname = params.get("main_vm") vm = env.get_vm(params["main_vm"]) vm.verify_alive() pid = vm.get_pid() logging.info("Verify valid cgroup path for VM pid: %s", pid) # Resolve the memory cgroup path for a domain path = utils_cgroup.resolve_task_cgroup_path(int(pid), "memory") # Set the initial memory starting value for test case # By default set 1GB less than the total memory # In case of total memory is less than 1GB set to 256MB # visit subtests.cfg to change these default values Memtotal = utils_memory.read_from_meminfo('MemTotal') base_mem = params.get("memtune_base_mem") if int(Memtotal) < int(base_mem): Mem = int(params.get("memtune_min_mem")) else: Mem = int(Memtotal) - int(base_mem) # Initialize error counter error_counter = 0 # Check for memtune command is available in the libvirt version under test if not virsh.has_help_command("memtune"): test.cancel("Memtune not available in this libvirt version") # Run test case with 100kB increase in memory value for each iteration while (Mem < Memtotal): if virsh.has_command_help_match("memtune", "hard-limit"): hard_mem = Mem - int(params.get("memtune_hard_base_mem")) options = " --hard-limit %d --live" % hard_mem virsh.memtune_set(domname, options) if not check_limit(path, hard_mem, "hard_limit"): error_counter += 1 else: test.cancel("harlimit option not available in memtune " "cmd in this libvirt version") if virsh.has_command_help_match("memtune", "soft-limit"): soft_mem = Mem - int(params.get("memtune_soft_base_mem")) options = " --soft-limit %d --live" % soft_mem virsh.memtune_set(domname, options) if not check_limit(path, soft_mem, "soft_limit"): error_counter += 1 else: test.cancel("softlimit option not available in memtune " "cmd in this libvirt version") if virsh.has_command_help_match("memtune", "swap-hard-limit"): swaphard = Mem options = " --swap-hard-limit %d --live" % swaphard virsh.memtune_set(domname, options) if not check_limit(path, swaphard, "swap_hard_limit"): error_counter += 1 else: test.cancel("swaplimit option not available in memtune " "cmd in this libvirt version") Mem += int(params.get("memtune_hard_base_mem")) # Raise error based on error_counter if error_counter > 0: test.fail("Test failed, consult the previous error messages")
def run(test, params, env): """ Test send-key command, include all types of codeset and sysrq For normal sendkey test, we create a file to check the command execute by send-key. For sysrq test, check the /var/log/messages in RHEL or /var/log/syslog in Ubuntu and guest status """ if not virsh.has_help_command('send-key'): test.cancel("This version of libvirt does not support the send-key " "test") vm_name = params.get("main_vm", "avocado-vt-vm1") status_error = ("yes" == params.get("status_error", "no")) keystrokes = params.get("sendkey", "") codeset = params.get("codeset", "") holdtime = params.get("holdtime", "") hold_timeout = eval(params.get("hold_timeout", "1")) sysrq_test = ("yes" == params.get("sendkey_sysrq", "no")) sleep_time = int(params.get("sendkey_sleeptime", 5)) readonly = params.get("readonly", False) username = params.get("username") password = params.get("password") create_file = params.get("create_file_name") uri = params.get("virsh_uri") simultaneous = params.get("sendkey_simultaneous", "yes") == "yes" unprivileged_user = params.get('unprivileged_user') is_crash = ("yes" == params.get("is_crash", "no")) add_panic_device = ("yes" == params.get("add_panic_device", "yes")) need_keyboard_device = ("yes" == params.get("need_keyboard_device", "yes")) panic_model = params.get('panic_model', 'isa') force_vm_boot_text_mode = ("yes" == params.get("force_vm_boot_text_mode", "yes")) crash_dir = "/var/crash" if unprivileged_user: if unprivileged_user.count('EXAMPLE'): unprivileged_user = '******' if not libvirt_version.version_compare(1, 1, 1): if params.get('setup_libvirt_polkit') == 'yes': test.cancel("API acl test not supported in current libvirt " "version.") def send_line(send_str): """ send string to guest with send-key and end with Enter """ for send_ch in list(send_str): virsh.sendkey(vm_name, "KEY_%s" % send_ch.upper(), ignore_status=False) virsh.sendkey(vm_name, "KEY_ENTER", ignore_status=False) def add_keyboard_device(vm_name): """ Add keyboard to guest if guest doesn't have :params: vm_name: the guest name """ inputs = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)\ .get_devices(device_type="input") for input_device in inputs: if input_device.type_name == "keyboard": logging.debug("Guest already has a keyboard device") return kbd = Input("keyboard") kbd.input_bus = "virtio" logging.debug("Add keyboard device %s" % kbd) result = virsh.attach_device(vm_name, kbd.xml) if result.exit_status: test.error("Failed to add keyboard device") vm = env.get_vm(vm_name) # Part of sysrq tests need keyboard device otherwise the sysrq cmd doesn't # work. Refer to BZ#1526862 if need_keyboard_device: add_keyboard_device(vm_name) vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) vm.wait_for_login().close() if force_vm_boot_text_mode: # Boot the guest in text only mode so that send-key commands would succeed # in creating a file try: utils_test.update_boot_option( vm, args_added="3", guest_arch_name=params.get('vm_arch_name')) except Exception as info: test.error(info) session = vm.wait_for_login() if sysrq_test: # In postprocess of previous testcase would pause and resume the VM # that would change the domstate to running (unpaused) and cause # sysrq reboot testcase to fail as the domstate persist across reboot # so it is better to destroy and start VM before the test starts if "KEY_B" in keystrokes: cmd_result = virsh.domstate(vm_name, '--reason', ignore_status=True) if "unpaused" in cmd_result.stdout.strip(): vm.destroy() vm.start() session = vm.wait_for_login() if is_crash: session.cmd("rm -rf {0}; mkdir {0}".format(crash_dir)) libvirt.update_on_crash(vm_name, "destroy") if add_panic_device: libvirt.add_panic_device(vm_name, model=panic_model) if not vm.is_alive(): vm.start() session = vm.wait_for_login() LOG_FILE = "/var/log/messages" if "ubuntu" in vm.get_distro().lower(): LOG_FILE = "/var/log/syslog" # Is 'rsyslog' installed on guest? It'll be what writes out # to LOG_FILE if not utils_package.package_install("rsyslog", session): test.fail("Fail to install rsyslog, make sure that you have " "usable repo in guest") # clear messages, restart rsyslog, and make sure it's running session.cmd("echo '' > %s" % LOG_FILE) # check the result of restart rsyslog status, output = session.cmd_status_output("service rsyslog restart") if status: # To avoid 'Exec format error' utils_package.package_remove("rsyslog", session) utils_package.package_install("rsyslog", session) # if rsyslog.service is masked, need to unmask rsyslog if "Unit rsyslog.service is masked" in output: session.cmd("systemctl unmask rsyslog") session.cmd("echo '' > %s" % LOG_FILE) session.cmd("service rsyslog restart") ps_stat = session.cmd_status("ps aux |grep rsyslog") if ps_stat != 0: test.fail("rsyslog is not running in guest") # enable sysrq session.cmd("echo 1 > /proc/sys/kernel/sysrq") # make sure the environment is clear if create_file is not None: session.cmd("rm -rf %s" % create_file) try: # wait for tty started tty_stat = "ps aux|grep tty" timeout = 60 while timeout >= 0 and \ session.get_command_status(tty_stat) != 0: time.sleep(1) timeout = timeout - 1 if timeout < 0: test.fail("Can not wait for tty started in 60s") # send user and passwd to guest to login send_line(username) time.sleep(2) send_line(password) time.sleep(2) if sysrq_test or simultaneous: output = virsh.sendkey(vm_name, keystrokes, codeset=codeset, holdtime=holdtime, readonly=readonly, unprivileged_user=unprivileged_user, uri=uri) else: # If multiple keycodes are specified, they are all sent # simultaneously to the guest, and they may be received # in random order. If you need distinct keypresses, you # must use multiple send-key invocations. for keystroke in keystrokes.split(): output = virsh.sendkey(vm_name, keystroke, codeset=codeset, holdtime=holdtime, readonly=readonly, unprivileged_user=unprivileged_user, uri=uri) if output.exit_status: test.fail("Failed to send key %s to guest: %s" % (keystroke, output.stderr)) time.sleep(sleep_time) if output.exit_status != 0: if status_error: logging.info( "Failed to sendkey to guest as expected, Error:" "%s.", output.stderr) return else: test.fail("Failed to send key to guest, Error:%s." % output.stderr) elif status_error: test.fail("Expect fail, but succeed indeed.") if create_file is not None: # check if created file exist cmd_ls = "ls %s" % create_file if not wait.wait_for( lambda: session.get_command_status_output(cmd_ls), hold_timeout, step=5): test.fail("Fail to create file with send key") logging.info("Succeed to create file with send key") elif sysrq_test: # check LOG_FILE info according to different key # Since there's no guarantee when messages will be written # we'll do a check and wait loop for up to 60 seconds timeout = 60 while timeout >= 0: if "KEY_H" in keystrokes: cmd = "cat %s | grep -i 'SysRq.*HELP'" % LOG_FILE get_status = session.cmd_status(cmd) elif "KEY_M" in keystrokes: cmd = "cat %s | grep -i 'SysRq.*Show Memory'" % LOG_FILE get_status = session.cmd_status(cmd) elif "KEY_T" in keystrokes: cmd = "cat %s | grep -i 'SysRq.*Show State'" % LOG_FILE get_status = session.cmd_status(cmd) # Sometimes SysRq.*Show State string missed in LOG_FILE # as a fall back check for runnable tasks logged if get_status != 0: cmd = "cat %s | grep 'runnable tasks:'" % LOG_FILE get_status = session.cmd_status(cmd) elif "KEY_B" in keystrokes: session = vm.wait_for_login() result = virsh.domstate(vm_name, '--reason', ignore_status=True) output = result.stdout.strip() logging.debug("The guest state: %s", output) if not output.count("booted"): get_status = 1 else: get_status = 0 session.close() # crash elif is_crash: dom_state = virsh.domstate(vm_name, "--reason").stdout.strip() logging.debug("domain state is %s" % dom_state) if "crashed" in dom_state: get_status = 0 else: get_status = 1 if get_status == 0: timeout = -1 else: if not is_crash: session.cmd("echo \"virsh sendkey waiting\" >> %s" % LOG_FILE) time.sleep(1) timeout = timeout - 1 if get_status != 0: test.fail("SysRq does not take effect in guest, keystrokes is " "%s" % keystrokes) else: logging.info("Succeed to send SysRq command") else: test.fail("Test cfg file invalid: either sysrq_params or " "create_file_name must be defined") finally: if create_file is not None: session = vm.wait_for_login() session.cmd("rm -rf %s" % create_file) session.close() vmxml_backup.sync()
def run(test, params, env): """ This test virsh domtime command and its options. 1) Start a guest with/without guest agent configured; 2) Record guest times; 3) Do some operation to stop VM; 4) Run virsh domtime command with different options; 5) Check the command result; 6) Check the guest times against expectation; 7) Cleanup test environment. """ epoch = datetime.datetime(1970, 1, 1, 0, 0, 0) # Max time can be set with domtime successfully in newer qemu-ga time_max_1 = 3155731199 # Max time can be set with domtime successfully in older qemu-ga time_max_2 = 3155759999 # Max time can be set with domtime bug failed to set RTC in older qemu-ga time_max_3 = 9223372035 def init_time(session): """ Initialize guest RTC time to epoch + 1234567890 and system time one day latter. :param session: Session from which to access guest """ res = virsh.domtime(vm_name, time=1234567890) if res.exit_status: logging.debug("Failed to init time to 1234567890:\n%s", res) status, output = session.cmd_status_output('date -s "1 day"') if status: test.error("Failed to set guest time:\n%s" % output) def get_host_utc_time(): """ Get host UTC time from date command. """ res = process.run("date -u", shell=True) # Strip timezone info from output # e.g. 'Sun Feb 15 07:31:40 CST 2009' -> 'Sun Feb 15 07:31:40 2009' time_str = re.sub(r'\S+ (?=\S+$)', '', res.stdout_text.strip()) return datetime.datetime.strptime(time_str, r"%a %b %d %H:%M:%S %Y") def run_cmd(session, cmd): """ Run a command in a session and record duration of call. """ start = time.time() output = session.cmd_output(cmd) duration = time.time() - start logging.info('Result of command "%s". Duration: %s. Output:%s', cmd, duration, output.strip()) return output, duration def get_guest_times(session): """ Retrieve different guest time as a dict for checking. Keys: local_hw: Guest RTC time in local timezone local_sys: Guest system time in local timezone utc_sys: Guest system time in UTC domtime: Guest system time in UTC got from virsh domtime command :param session: Session from which to access guest """ times = {} get_begin = time.time() # Guest RTC local timezone time output, _ = run_cmd(session, 'hwclock') try: time_str, _ = re.search(r"(.+) (\S+ seconds)", output).groups() try: # output format 1: Tue 01 Mar 2016 01:53:46 PM CST # Remove timezone info from output new_str = re.sub(r'\s+\S+$', '', time_str) times['local_hw'] = datetime.datetime.strptime( new_str, r"%a %d %b %Y %I:%M:%S %p") except ValueError: # There are known three possible output format for `hwclock` # output format 2: Sat Feb 14 07:31:33 2009 times['local_hw'] = datetime.datetime.strptime( time_str, r"%a %b %d %H:%M:%S %Y") except AttributeError: try: # output format 3: 2019-03-22 05:16:18.224511-04:00 time_str = output.split(".")[0] times['local_hw'] = datetime.datetime.strptime( time_str, r"%Y-%m-%d %H:%M:%S") except ValueError: test.fail("Unknown hwclock output format in guest: %s", output) delta = time.time() - get_begin times['local_hw'] -= datetime.timedelta(seconds=delta) # Guest system local timezone time output, _ = run_cmd(session, 'date') # Strip timezone info from output # e.g. 'Sun Feb 15 07:31:40 CST 2009' -> 'Sun Feb 15 07:31:40 2009' time_str = re.sub(r'\S+ (?=\S+$)', '', output.strip()) times['local_sys'] = datetime.datetime.strptime( time_str, r"%a %b %d %H:%M:%S %Y") delta = time.time() - get_begin times['local_sys'] -= datetime.timedelta(seconds=delta) # Guest system UTC timezone time output, _ = run_cmd(session, 'date -u') # Strip timezone info from output # e.g. 'Sun Feb 15 07:31:40 CST 2009' -> 'Sun Feb 15 07:31:40 2009' time_str = re.sub(r'\S+ (?=\S+$)', '', output.strip()) times['utc_sys'] = datetime.datetime.strptime( time_str, r"%a %b %d %H:%M:%S %Y") delta = time.time() - get_begin times['utc_sys'] -= datetime.timedelta(seconds=delta) # Guest UTC time from virsh domtime res = virsh.domtime(vm_name, pretty=True, ignore_status=True) if not res.exit_status: logging.info('Result of "domtime". Duration: %s. Output:%s', res.duration, res.stdout.strip()) _, time_str = res.stdout.split(" ", 1) times['domtime'] = datetime.datetime.strptime( time_str.strip(), r"%Y-%m-%d %H:%M:%S") delta = time.time() - get_begin times['domtime'] -= datetime.timedelta(seconds=delta) else: logging.debug("Unable to get domain time:\n%s", res) times['domtime'] = None return times, time.time() - get_begin def check_get_success(expected_times): """ Check virsh command get result against expected times :param expected_times: Expected time for checking """ _, time_str = res.stdout.split(" ", 1) if pretty: # Time: 2015-01-13 06:29:18 domtime = datetime.datetime.strptime(time_str.strip(), r"%Y-%m-%d %H:%M:%S") else: # Time: 1421130740 domtime = epoch + datetime.timedelta(seconds=int(time_str)) time_shift = time.time() - start logging.debug("Time shift is %s", time_shift) result_diff = (domtime - expected_times['domtime']).total_seconds() if abs(result_diff) > 2.0: test.fail("Expect get time %s, but got %s, time " "diff: %s" % (org_times['domtime'], domtime, result_diff)) def check_guest_times(expected_times, cur_times): """ Check guest times after test against expected times :param expected_times: Expected time for checking """ time_shift = time.time() - start logging.debug("Time shift is %s", time_shift) error_msgs = [] for key in cur_times: if cur_times[key] is not None: cur = cur_times[key] expect = expected_times[key] diff = (cur - expect).total_seconds() msg = "For %s, expect get time %s, got %s, time diff: %s" % ( key, expect, cur, diff) logging.debug(msg) if abs(diff) > 2.0: error_msgs.append(msg) if error_msgs: test.fail('\n'.join(error_msgs)) def check_time(result, org_times, cur_times): """ Check whether domain time has been change accordingly. :param result: virsh domtime CmdResult instance :param org_times: Original guest times """ action = "get" if now or sync or (set_time is not None): action = "set" host_tz_diff = org_host_loc_time - org_host_time logging.debug("Timezone diff on host is %d hours.", (host_tz_diff.total_seconds() // 3600)) # Hardware time will never stop logging.info('Add %ss to expected guest time', interval) if action == 'get': expected_times = org_times elif action == 'set': if result.exit_status: # Time not change if domtime fails expected_times = org_times else: # Time change accordingly if succeed. if now: utc_time = org_host_time local_time = utc_time + guest_tz_diff elif sync: local_time = org_times["local_hw"] utc_time = local_time - guest_tz_diff elif set_time is not None: utc_time = epoch + datetime.timedelta( seconds=(int(set_time) - guest_duration)) local_time = utc_time + guest_tz_diff expected_times = {} expected_times['local_hw'] = local_time expected_times['local_sys'] = local_time expected_times["utc_sys"] = utc_time expected_times["domtime"] = utc_time # Add interval between two checks of guest time for key in expected_times: if expected_times[key] is not None: expected_times[key] += interval # Hardware time will never stop # Software time will stop if suspended or managed-saved if suspend or managedsave: logging.info('Remove %ss from expected guest software time', stop_time) expected_times["domtime"] -= stop_time expected_times["local_sys"] -= stop_time expected_times["utc_sys"] -= stop_time # Check guest time if domtime succeeded check_guest_times(expected_times, cur_times) # Check if output of domtime is correct if action == 'get' and not result.exit_status: check_get_success(expected_times) def prepare_fail_patts(): """ Predict fail pattern from test parameters. """ fail_patts = [] if not channel: fail_patts.append(r"QEMU guest agent is not configured") if not agent: # For older version fail_patts.append(r"Guest agent not available for now") # For newer version fail_patts.append(r"Guest agent is not responding") if int(now) + int(sync) + int(bool(set_time)) > 1: fail_patts.append(r"Options \S+ and \S+ are mutually exclusive") if shutdown: fail_patts.append(r"domain is not running") if set_time is not None: if int(set_time) < 0: fail_patts.append(r"Invalid argument") elif time_max_1 < int(set_time) <= time_max_2: fail_patts.append(r"Invalid time") elif time_max_2 < int(set_time) <= time_max_3: fail_patts.append(r"Invalid time") elif time_max_3 < int(set_time): fail_patts.append(r"too big for guest agent") if readonly: fail_patts.append(r"operation forbidden") return fail_patts def stop_vm(): """ Suspend, managedsave, pmsuspend or shutdown a VM for a period of time """ stop_start = time.time() vmlogin_dur = 0.0 if suspend: vm.pause() time.sleep(vm_stop_duration) vm.resume() elif managedsave: vm.managedsave() time.sleep(vm_stop_duration) vm.start() start_dur = time.time() vm.wait_for_login() vmlogin_dur = time.time() - start_dur elif pmsuspend: vm.pmsuspend() time.sleep(vm_stop_duration) vm.pmwakeup() start_dur = time.time() vm.wait_for_login() vmlogin_dur = time.time() - start_dur elif shutdown: vm.destroy() # Check real guest stop time stop_seconds = time.time() - stop_start - vmlogin_dur stop_time = datetime.timedelta(seconds=stop_seconds) logging.debug("Guest stopped: %s", stop_time) return stop_time # Check availability of virsh command domtime if not virsh.has_help_command('domtime'): test.cancel("This version of libvirt does not support " "the domtime test") channel = (params.get("prepare_channel", "yes") == 'yes') agent = (params.get("start_agent", "yes") == 'yes') pretty = (params.get("domtime_pretty", "no") == 'yes') now = (params.get("domtime_now", "no") == 'yes') sync = (params.get("domtime_sync", "no") == 'yes') set_time = params.get("domtime_time", None) shutdown = (params.get("shutdown_vm", "no") == 'yes') suspend = (params.get("suspend_vm", "no") == 'yes') managedsave = (params.get("managedsave_vm", "no") == 'yes') pmsuspend = (params.get("pmsuspend_vm", "no") == 'yes') vm_stop_duration = int(params.get("vm_stop_duration", "10")) vm_name = params.get("main_vm") vm = env.get_vm(vm_name) readonly = (params.get("readonly_test", "no") == "yes") # Backup domain XML xml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) try: if pmsuspend: vm_xml.VMXML.set_pm_suspend(vm_name) # Add or remove qemu-agent from guest before test vm.prepare_guest_agent(channel=channel, start=agent) session = vm.wait_for_login() # Let's set guest timezone to region where we do not # have day light savings, to affect the time session.cmd("timedatectl set-timezone Asia/Kolkata") try: init_guest_times, _ = get_guest_times(session) guest_tz_diff = init_guest_times['local_sys'] - init_guest_times['utc_sys'] logging.debug("Timezone diff on guest is %d hours.", (guest_tz_diff.total_seconds() // 3600)) if channel and agent: init_time(session) # Expected fail message patterns fail_patts = prepare_fail_patts() # Message patterns test should skip when met skip_patts = [ r'The command \S+ has not been found', ] # Record start time start = time.time() # Record host utc time before testing org_host_time = get_host_utc_time() # Record host local time before testing outp = process.run('date', shell=True) time_st = re.sub(r'\S+ (?=\S+$)', '', outp.stdout_text.strip()) org_host_loc_time = datetime.datetime.strptime(time_st, r"%a %b %d %H:%M:%S %Y") # Get original guest times org_times, guest_duration = get_guest_times(session) # Run some operations to stop guest system stop_time = stop_vm() # Run command with specified options. res = virsh.domtime(vm_name, now=now, pretty=pretty, sync=sync, time=set_time, readonly=readonly, debug=True) libvirt.check_result(res, fail_patts, skip_patts) # Check interval between two check of guest time interval = datetime.timedelta( seconds=(time.time() - start)) logging.debug("Interval between guest checking: %s", interval) if not shutdown: # Get current guest times cur_times, _ = get_guest_times(session) check_time(res, org_times, cur_times) finally: if shutdown: vm.start() # sync guest timezone utils_time.sync_timezone_linux(vm) # Sync guest time with host if channel and agent and not shutdown: res = virsh.domtime(vm_name, now=True) if res.exit_status: session.close() test.error("Failed to recover guest time:\n%s" % res) session.close() finally: # Restore VM XML xml_backup.sync()
def run(test, params, env): """ 1. Configure kernel cmdline to support kdump 2. Start kdump service 3. Inject NMI to the guest 4. Check NMI times """ for cmd in 'inject-nmi', 'qemu-monitor-command': if not virsh.has_help_command(cmd): raise error.TestNAError("This version of libvirt does not " " support the %s test", cmd) vm_name = params.get("main_vm", "virt-tests-vm1") vm = env.get_vm(vm_name) start_vm = params.get("start_vm") expected_nmi_times = params.get("expected_nmi_times", '0') unprivileged_user = params.get('unprivileged_user') if unprivileged_user: if unprivileged_user.count('EXAMPLE'): unprivileged_user = '******' if not libvirt_version.version_compare(1, 1, 1): if params.get('setup_libvirt_polkit') == 'yes': raise error.TestNAError("API acl test not supported in current" " libvirt version.") if start_vm == "yes": # start kdump service in the guest cmd = "which kdump" try: run_cmd_in_guest(vm, cmd) except: try: # try to install kexec-tools on fedoraX/rhelx.y guest run_cmd_in_guest(vm, "yum install -y kexec-tools") except: raise error.TestNAError("Requires kexec-tools(or the " "equivalent for your distro)") # enable kdump service in the guest cmd = "service kdump start" run_cmd_in_guest(vm, cmd) # filter original 'NMI' information from the /proc/interrupts cmd = "grep NMI /proc/interrupts" nmi_str = run_cmd_in_guest(vm, cmd) # filter CPU from the /proc/cpuinfo and count number cmd = "grep -E '^process' /proc/cpuinfo | wc -l" vcpu_num = run_cmd_in_guest(vm, cmd).strip() logging.info("Inject NMI to the guest via virsh inject_nmi") virsh.inject_nmi(vm_name, debug=True, ignore_status=False) logging.info("Inject NMI to the guest via virsh qemu_monitor_command") virsh.qemu_monitor_command(vm_name, '{"execute":"inject-nmi"}') # injects a Non-Maskable Interrupt into the default CPU (x86/s390) # or all CPUs (ppc64), as usual, the default CPU index is 0 cmd = "grep NMI /proc/interrupts | awk '{print $2}'" nmi_from_default_vcpu = run_cmd_in_guest(vm, cmd) real_nmi_times = nmi_from_default_vcpu.splitlines()[0] logging.debug("The current Non-Maskable Interrupts: %s", real_nmi_times) # check Non-maskable interrupts times if real_nmi_times != expected_nmi_times: raise error.TestFail("NMI times aren't expected %s:%s", real_nmi_times, expected_nmi_times)
def run(test, params, env): """ Test domfsthaw command, make sure that all supported options work well Test scenaries: 1. fsthaw fs which has been freezed 2. fsthaw fs which has not been freezed Note: --mountpoint still not supported so will not test here """ if not virsh.has_help_command('domfsthaw'): raise error.TestNAError("This version of libvirt does not support " "the domfsthaw test") vm_name = params.get("main_vm", "avocado-vt-vm1") start_vm = ("yes" == params.get("start_vm", "no")) no_freeze = ("yes" == params.get("no_freeze", "yes")) has_qemu_ga = not ("yes" == params.get("no_qemu_ga", "no")) start_qemu_ga = not ("yes" == params.get("no_start_qemu_ga", "no")) status_error = ("yes" == params.get("status_error", "no")) options = params.get("domfsthaw_options", "") vm_ref = params.get("vm_ref", "") # Do backup for origin xml xml_backup = vm_xml.VMXML.new_from_dumpxml(vm_name) try: vm = env.get_vm(vm_name) vm.destroy() if not vm.is_alive(): vm.start() # Firstly, freeze all filesytems if not no_freeze: # Add channel device for qemu-ga vm.prepare_guest_agent() cmd_result = virsh.domfsfreeze(vm_name, debug=True) if cmd_result.exit_status != 0: raise error.TestFail("Fail to do virsh domfsfreeze, error %s" % cmd_result.stderr) if has_qemu_ga: vm.prepare_guest_agent(start=start_qemu_ga) else: # Remove qemu-ga channel vm.prepare_guest_agent(channel=has_qemu_ga, start=False) if start_vm: if not vm.is_alive(): vm.start() else: vm.destroy() if vm_ref == "none": vm_name = " " cmd_result = virsh.domfsthaw(vm_name, options=options, debug=True) if not status_error: if cmd_result.exit_status != 0: raise error.TestFail("Fail to do virsh domfsthaw, error %s" % cmd_result.stderr) else: if cmd_result.exit_status == 0: raise error.TestFail("Command 'virsh domfsthaw' failed ") finally: # Do domain recovery xml_backup.sync()
def run_virsh_vcpupin(test, params, env): """ Test the command virsh vcpupin (1) Get the host and guest cpu count (2) Call virsh vcpupin for each vcpu with pinning of each cpu (3) Check whether the virsh vcpupin has pinned the respective vcpu to cpu (4) TODO: Right now the testcase covers the pinning one cpu at a time this can be improved by a random number of cpus """ def build_actual_info(domname, vcpu): """ This function returns list of the vcpu's affinity from virsh vcpuinfo output @param: domname: VM Name to operate on @param: vcpu: vcpu number for which the affinity is required """ output = virsh.vcpuinfo(domname) cmd = re.findall('[^Affinity:][-y]+', str(output)) total_affinity = cmd[vcpu].lstrip() actual_affinity = list(total_affinity) return actual_affinity def build_expected_info(vcpu, cpu): """ This function returns the list of vcpu's expected affinity build @param: vcpu: vcpu number for which the affinity is required @param: cpu: cpu details for the affinity """ expected_affinity = [] for i in range(int(host_cpu_count)): expected_affinity.append('y') for i in range(int(host_cpu_count)): if cpu != i: expected_affinity[i] = '-' expected_affinity_proc = int(math.pow(2, cpu)) return expected_affinity, expected_affinity_proc def virsh_check_vcpupin(domname, vcpu, cpu, pid): """ This function checks the actual and the expected affinity of given vcpu and raises error if not matchs @param: domname: VM Name to operate on @param: vcpu: vcpu number for which the affinity is required @param: cpu: cpu details for the affinity """ expected_output, expected_output_proc = build_expected_info(vcpu, cpu) actual_output = build_actual_info(domname, vcpu) # Get the vcpus pid vcpus_pid = vm.get_vcpus_pid() vcpu_pid = vcpus_pid[vcpu] # Get the actual cpu affinity value in the proc entry output = utils.cpu_affinity_by_task(pid, vcpu_pid) actual_output_proc = int(output, 16) if expected_output == actual_output: logging.info("successfully pinned cpu: %s --> vcpu: %s", cpu, vcpu) else: raise error.TestFail( "Command 'virsh vcpupin %s %s %s'not succeeded" ", cpu pinning details not updated properly in" " virsh vcpuinfo command output" % (vm_name, vcpu, cpu)) if expected_output_proc == actual_output_proc: logging.info( "successfully pinned cpu: %s --> vcpu: %s" " in respective proc entry", cpu, vcpu) else: raise error.TestFail( "Command 'virsh vcpupin %s %s %s'not succeeded" " cpu pinning details not updated properly in" " /proc/%s/task/%s/status" % (vm_name, vcpu, cpu, pid, vcpu_pid)) if not virsh.has_help_command('vcpucount'): raise error.TestNAError( "This version of libvirt doesn't support this test") # Get the vm name, pid of vm and check for alive vm_name = params.get("main_vm") vm = env.get_vm(params["main_vm"]) vm.verify_alive() pid = vm.get_pid() # Get the host cpu count host_cpu_count = utils.count_cpus() # Get the guest vcpu count guest_vcpu_count = virsh.vcpucount_live(vm_name) # Run test case for vcpu in range(int(guest_vcpu_count)): for cpu in range(int(host_cpu_count)): vm.vcpupin(vcpu, cpu) virsh_check_vcpupin(vm_name, vcpu, cpu, pid)
def run_virsh_vcpupin(test, params, env): """ Test the command virsh vcpupin (1) Get the host and guest cpu count (2) Call virsh vcpupin for each vcpu with pinning of each cpu (3) Check whether the virsh vcpupin has pinned the respective vcpu to cpu """ def affinity_from_vcpuinfo(domname, vcpu): """ This function returns list of the vcpu's affinity from virsh vcpuinfo output :param domname: VM Name to operate on :param vcpu: vcpu number for which the affinity is required """ output = virsh.vcpuinfo(domname).stdout.rstrip() affinity = re.findall('CPU Affinity: +[-y]+', output) total_affinity = affinity[int(vcpu)].split()[-1].strip() actual_affinity = list(total_affinity) return actual_affinity def check_vcpupin(domname, vcpu, cpu_list, pid): """ This function checks the actual and the expected affinity of given vcpu and raises error if not matchs :param domname: VM Name to operate on :param vcpu: vcpu number for which the affinity is required :param cpu: cpu details for the affinity """ expected_output = utils_test.libvirt.cpus_string_to_affinity_list( cpu_list, host_cpu_count) actual_output = affinity_from_vcpuinfo(domname, vcpu) if expected_output == actual_output: logging.info("successfully pinned cpu_list: %s --> vcpu: %s", cpu_list, vcpu) else: raise error.TestFail("Command 'virsh vcpupin %s %s %s'not " "succeeded, cpu pinning details not " "updated properly in virsh vcpuinfo " "command output" % (vm_name, vcpu, cpu_list)) if pid is None: return # Get the vcpus pid vcpus_pid = vm.get_vcpus_pid() vcpu_pid = vcpus_pid[vcpu] # Get the actual cpu affinity value in the proc entry output = utils_test.libvirt.cpu_allowed_list_by_task(pid, vcpu_pid) actual_output_proc = utils_test.libvirt.cpus_string_to_affinity_list( output, host_cpu_count) if expected_output == actual_output_proc: logging.info( "successfully pinned cpu: %s --> vcpu: %s" " in respective proc entry", cpu_list, vcpu) else: raise error.TestFail( "Command 'virsh vcpupin %s %s %s'not " "succeeded cpu pinning details not " "updated properly in /proc/%s/task/%s/status" % (vm_name, vcpu, cpu_list, pid, vcpu_pid)) def run_and_check_vcpupin(vm_name, vcpu, cpu_list, options, pid): """ Run the vcpupin command and then check the result. """ # Execute virsh vcpupin command. cmdResult = virsh.vcpupin(vm_name, vcpu, cpu_list, options) if cmdResult.exit_status: if not status_error: # Command fail and it is in positive case. raise error.TestFail(cmdResult) else: # Command fail and it is in negative case. return else: if status_error: # Command success and it is in negative case. raise error.TestFail(cmdResult) else: # Command success and it is in positive case. # "--config" will take effect after VM destroyed. if options == "--config": virsh.destroy(vm_name) pid = None # Check the result of vcpupin command. check_vcpupin(vm_name, vcpu, cpu_list, pid) if not virsh.has_help_command('vcpucount'): raise error.TestNAError("This version of libvirt doesn't" " support this test") # Get the vm name, pid of vm and check for alive vm_name = params.get("main_vm", "virt-tests-vm1") vm = env.get_vm(vm_name) pid = vm.get_pid() # Get the variables for vcpupin command. args = params.get("vcpupin_args", "dom_name") if args == "dom_name": args = vm_name options = params.get("vcpupin_options", "--current") cpu_list = params.get("vcpupin_cpu_list", "x") # Get status of this case. status_error = ("yes" == params.get("status_error", "no")) # Run cases when guest is shutoff. if vm.is_dead() and (params.get("start_vm") == "no"): run_and_check_vcpupin(args, 0, 0, "", 0) return # Get the host cpu count host_cpu_count = utils.count_cpus() if (int(host_cpu_count) < 2) and (not cpu_list == "x"): raise error.TestNAError("We need more cpus on host in this case " "for the cpu_list=%s. But current number of " "cpu on host is %s." % (cpu_list, host_cpu_count)) # Get the guest vcpu count guest_vcpu_count = virsh.vcpucount(vm_name, "--live --active").stdout.strip() # Run test case for vcpu in range(int(guest_vcpu_count)): if cpu_list == "x": for cpu in range(int(host_cpu_count)): run_and_check_vcpupin(args, vcpu, str(cpu), options, pid) else: cpu_max = int(host_cpu_count) - 1 if cpu_list == "x-y": cpus = "0-%s" % cpu_max elif cpu_list == "x,y": cpus = "0,%s" % cpu_max elif cpu_list == "x-y,^z": cpus = "0-%s,^%s" % (cpu_max, cpu_max) elif cpu_list == "r": cpus = "r" elif cpu_list == "-1": cpus = "-1" elif cpu_list == "out_of_max": cpus = str(cpu_max + 1) else: raise error.TestNAError("Cpu_list=%s is not recognized." % cpu_list) run_and_check_vcpupin(args, vcpu, cpus, options, pid)
def run(test, params, env): """ Test send-key command, include all types of codeset and sysrq For normal sendkey test, we create a file to check the command execute by send-key. For sysrq test, check the /var/log/messages and guest status """ if not virsh.has_help_command('send-key'): raise error.TestNAError("This version of libvirt does not support " "the send-key test") vm_name = params.get("main_vm", "virt-tests-vm1") status_error = ("yes" == params.get("status_error", "no")) options = params.get("sendkey_options", "") sysrq_test = ("yes" == params.get("sendkey_sysrq", "no")) readonly = params.get("readonly", False) username = params.get("username") password = params.get("password") create_file = params.get("create_file_name") def send_line(send_str): """ send string to guest with send-key and end with Enter """ for send_ch in list(send_str): virsh.sendkey(vm_name, "KEY_%s" % send_ch.upper(), ignore_status=False) virsh.sendkey(vm_name, "KEY_ENTER", ignore_status=False) vm = env.get_vm(vm_name) session = vm.wait_for_login() if sysrq_test: # Is 'rsyslog' installed on guest? It'll be what writes out # to /var/log/messages rpm_stat = session.cmd_status("rpm -q rsyslog") if rpm_stat != 0: logging.debug("rsyslog not found in guest installing") stat_install = session.cmd_status("yum install -y rsyslog", 300) if stat_install != 0: raise error.TestFail("Fail to install rsyslog, make" "sure that you have usable repo in guest") # clear messages, restart rsyslog, and make sure it's running session.cmd("echo '' > /var/log/messages") session.cmd("service rsyslog restart") ps_stat = session.cmd_status("ps aux |grep rsyslog") if ps_stat != 0: raise error.TestFail("rsyslog is not running in guest") # enable sysrq session.cmd("echo 1 > /proc/sys/kernel/sysrq") # make sure the environment is clear if create_file is not None: session.cmd("rm -rf %s" % create_file) try: # wait for tty1 started tty1_stat = "ps aux|grep [/]sbin/.*tty.*tty1" timeout = 60 while timeout >= 0 and \ session.get_command_status(tty1_stat) != 0: time.sleep(1) timeout = timeout - 1 if timeout < 0: raise error.TestFail("Can not wait for tty1 started in 60s") # send user and passwd to guest to login send_line(username) time.sleep(2) send_line(password) time.sleep(2) output = virsh.sendkey(vm_name, options, readonly=readonly) time.sleep(2) if output.exit_status != 0: if status_error: logging.info("Failed to sendkey to guest as expected, Error:" "%s.", output.stderr) return else: raise error.TestFail("Failed to send key to guest, Error:%s." % output.stderr) elif status_error: raise error.TestFail("Expect fail, but succeed indeed.") if create_file is not None: # check if created file exist cmd_ls = "ls %s" % create_file sec_status, sec_output = session.get_command_status_output(cmd_ls) if sec_status == 0: logging.info("Succeed to create file with send key") else: raise error.TestFail("Fail to create file with send key, " "Error:%s" % sec_output) elif sysrq_test: # check /var/log/message info according to different key # Since there's no guarantee when messages will be written # we'll do a check and wait loop for up to 60 seconds timeout = 60 while timeout >= 0: if "KEY_H" in options: get_status = session.cmd_status("cat /var/log/messages|" "grep 'SysRq.*HELP'") elif "KEY_M" in options: get_status = session.cmd_status("cat /var/log/messages|" "grep 'SysRq.*Show Memory'") elif "KEY_T" in options: get_status = session.cmd_status("cat /var/log/messages|" "grep 'SysRq.*Show State'") if get_status == 0: timeout = -1 else: session.cmd("echo \"virsh sendkey waiting\" >> /var/log/messages") time.sleep(1) timeout = timeout -1 if get_status != 0: raise error.TestFail("SysRq does not take effect in guest, " "options is %s" % options) else: logging.info("Succeed to send SysRq command") else: raise error.TestFail("Test cfg file invalid: either sysrq_params " "or create_file_name must be defined") finally: if create_file is not None: session.cmd("rm -rf %s" % create_file) session.close()
def run(test, params, env): """ This test virsh domfsfreeze and domfsthaw commands and their options. 1) Start a guest with/without guest agent configured; 2) Freeze the guest file systems with domfsfreeze; 3) Create a file on guest to see command hang; 4) Thaw the guest file systems with domfsthaw; 5) Check the file is already created; 6) Retouch the file the ensure guest file system are not frozen; 7) Cleanup test environment. """ def check_freeze(session): """ Check whether file system has been frozen by touch a test file and see if command will hang. :param session: Guest session to be tested. """ try: output = session.cmd_output('touch freeze_test', timeout=10) test.fail("Failed to freeze file system. " "Create file succeeded:\n%s" % output) except aexpect.ShellTimeoutError: pass def check_thaw(session): """ Check whether file system has been thawed by check a test file prohibited from creation when frozen created and successfully touch the file again. :param session: Guest session to be tested. """ status, output = session.cmd_status_output('ls freeze_test') if status: test.fail("Failed to thaw file system. " "Find created file failed:\n%s" % output) try: output = session.cmd_output('touch freeze_test', timeout=10) except aexpect.ShellTimeoutError: test.fail("Failed to freeze file system. " "Touch file timeout:\n%s" % output) def cleanup(session): """ Clean up the test file used for freeze/thaw test. :param session: Guest session to be cleaned up. """ status, output = session.cmd_status_output('rm -f freeze_test') if status: test.error("Failed to cleanup test file" "Find created file failed:\n%s" % output) if not virsh.has_help_command('domfsfreeze'): test.cancel("This version of libvirt does not support " "the domfsfreeze/domfsthaw test") channel = ("yes" == params.get("prepare_channel", "yes")) agent = ("yes" == params.get("start_agent", "yes")) mountpoint = params.get("mountpoint", None) vm_name = params.get("main_vm") vm = env.get_vm(vm_name) xml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) try: # Add or remove qemu-agent from guest before test vm.prepare_guest_agent(channel=channel, start=agent) session = vm.wait_for_login() try: # Expected fail message patterns fail_patts = [] if not channel: fail_patts.append(r"QEMU guest agent is not configured") if not agent: # For older version fail_patts.append(r"Guest agent not available for now") # For newer version fail_patts.append(r"Guest agent is not responding") # Message patterns test should skip when met skip_patts = [ r'The command \S+ has not been found', r'specifying mountpoints is not supported', ] res = virsh.domfsfreeze(vm_name, mountpoint=mountpoint) libvirt.check_result(res, fail_patts, skip_patts) if not res.exit_status: check_freeze(session) res = virsh.domfsthaw(vm_name, mountpoint=mountpoint) libvirt.check_result(res, fail_patts, skip_patts) if not res.exit_status: check_thaw(session) cleanup(session) finally: session.close() finally: xml_backup.sync()
def run(test, params, env): """ Test the command virsh freecell (1) Call virsh freecell (2) Call virsh freecell --all (3) Call virsh freecell with a numeric argument (4) Call virsh freecell xyz (5) Call virsh freecell with libvirtd service stop """ connect_uri = libvirt_vm.normalize_connect_uri( params.get("connect_uri", "default")) option = params.get("virsh_freecell_options") # Prepare libvirtd service check_libvirtd = params.has_key("libvirtd") if check_libvirtd: libvirtd = params.get("libvirtd") if libvirtd == "off": utils_libvirtd.libvirtd_stop() # Run test case cmd_result = virsh.freecell(ignore_status=True, extra=option, uri=connect_uri, debug=True) output = cmd_result.stdout.strip() status = cmd_result.exit_status # Recover libvirtd service start if libvirtd == "off": utils_libvirtd.libvirtd_start() # Check the output if virsh.has_help_command('numatune'): OLD_LIBVIRT = False else: OLD_LIBVIRT = True if option == '--all': raise error.TestNAError("Older libvirt virsh freecell " "doesn't support --all option") def output_check(freecell_output): if not re.search("ki?B", freecell_output, re.IGNORECASE): raise error.TestFail("virsh freecell output invalid: " + freecell_output) # Check status_error status_error = params.get("status_error") if status_error == "yes": if status == 0: if libvirtd == "off": raise error.TestFail( "Command 'virsh freecell' succeeded " "with libvirtd service stopped, incorrect") else: # newer libvirt if not OLD_LIBVIRT: raise error.TestFail( "Command 'virsh freecell %s' succeeded" "(incorrect command)" % option) else: # older libvirt raise error.TestNAError('Older libvirt virsh freecell ' 'incorrectly processes extranious' 'command-line options') elif status_error == "no": output_check(output) if status != 0: raise error.TestFail("Command 'virsh freecell %s' failed " "(correct command)" % option)
def run(test, params, env): """ Test the command virsh memtune 1) To get the current memtune parameters 2) Change the parameter values 3) Check the memtune query updated with the values 4) Check whether the mounted cgroup path gets the updated value 5) Check the output of virsh dumpxml 6) Check vm is alive """ # Check for memtune command is available in the libvirt version under test if not virsh.has_help_command("memtune"): test.cancel( "Memtune not available in this libvirt version") # Check if memtune options are supported for option in memtune_types: if not virsh.has_command_help_match("memtune", option): test.cancel("%s option not available in memtune " "cmd in this libvirt version" % option) # Get common parameters acceptable_minus = int(params.get("acceptable_minus", 8)) step_mem = params.get("mt_step_mem", "no") == "yes" expect_error = params.get("expect_error", "no") == "yes" restart_libvirtd = params.get("restart_libvirtd", "no") == "yes" set_one_line = params.get("set_in_one_command", "no") == "yes" mt_hard_limit = params.get("mt_hard_limit", None) mt_soft_limit = params.get("mt_soft_limit", None) mt_swap_hard_limit = params.get("mt_swap_hard_limit", None) # if restart_libvirtd is True, set set_one_line is True set_one_line = True if restart_libvirtd else set_one_line # Get the vm name, pid of vm and check for alive vm = env.get_vm(params["main_vm"]) vm.verify_alive() pid = vm.get_pid() # Resolve the memory cgroup path for a domain path = utils_cgroup.resolve_task_cgroup_path(int(pid), "memory") # step_mem is used to do step increment limit testing if step_mem: mem_step(params, path, vm, test, acceptable_minus) return if not set_one_line: # Set one type memtune limit in one command if mt_hard_limit: index = 0 mt_limit = mt_hard_limit elif mt_soft_limit: index = 1 mt_limit = mt_soft_limit elif mt_swap_hard_limit: index = 2 mt_limit = mt_swap_hard_limit mt_type = memtune_types[index] mt_cgname = memtune_cgnames[index] options = " --%s %s --live" % (mt_type, mt_limit) result = virsh.memtune_set(vm.name, options, debug=True) if expect_error: fail_patts = [params.get("error_info")] libvirt.check_result(result, fail_patts, []) else: # If limit value is negative, means no memtune limit mt_expected = mt_limit if int(mt_limit) > 0 else -1 check_limit(path, mt_expected, mt_type, mt_cgname, vm, test, acceptable_minus) else: # Set 3 limits in one command line mt_limits = [mt_hard_limit, mt_soft_limit, mt_swap_hard_limit] options = " %s --live" % ' '.join(mt_limits) result = virsh.memtune_set(vm.name, options, debug=True) if expect_error: fail_patts = [params.get("error_info")] libvirt.check_result(result, fail_patts, []) else: check_limits(path, mt_limits, vm, test, acceptable_minus) if restart_libvirtd: libvirtd = utils_libvirtd.Libvirtd() libvirtd.restart() if not expect_error: # After libvirtd restared, check memtune values again check_limits(path, mt_limits, vm, test, acceptable_minus)
def run(test, params, env): """ Test domfstrim command, make sure that all supported options work well Test scenaries: 1. fstrim without options 2. fstrim with --minimum with large options 3. fstrim with --minimum with small options Note: --mountpoint still not supported so will not test here """ def recompose_xml(vm_name, scsi_disk): """ Add scsi disk, guest agent and scsi controller for guest :param: vm_name: Name of domain :param: scsi_disk: scsi_debug disk name """ vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name) disk_path = scsi_disk # Add scsi disk xml scsi_disk = Disk(type_name="block") scsi_disk.device = "lun" scsi_disk.source = scsi_disk.new_disk_source( **{'attrs': { 'dev': disk_path }}) scsi_disk.target = {'dev': "sdb", 'bus': "scsi"} find_scsi = "no" controllers = vmxml.xmltreefile.findall("devices/controller") for controller in controllers: if controller.get("type") == "scsi": find_scsi = "yes" vmxml.add_device(scsi_disk) # Add scsi disk controller if find_scsi == "no": scsi_controller = Controller("controller") scsi_controller.type = "scsi" scsi_controller.index = "0" scsi_controller.model = "virtio-scsi" vmxml.add_device(scsi_controller) # Redefine guest vmxml.sync() if not virsh.has_help_command('domfstrim'): raise error.TestNAError("This version of libvirt does not support " "the domfstrim test") try: utils_misc.find_command("lsscsi") except ValueError: raise error.TestNAError("Command 'lsscsi' is missing. You must " "install it.") vm_name = params.get("main_vm", "virt-tests-vm1") status_error = ("yes" == params.get("status_error", "no")) minimum = params.get("domfstrim_minimum") mountpoint = params.get("domfstrim_mountpoint") options = params.get("domfstrim_options", "") is_fulltrim = ("yes" == params.get("is_fulltrim", "yes")) uri = params.get("virsh_uri") unprivileged_user = params.get('unprivileged_user') has_qemu_ga = not ("yes" == params.get("no_qemu_ga", "no")) start_qemu_ga = not ("yes" == params.get("no_start_qemu_ga", "no")) if unprivileged_user: if unprivileged_user.count('EXAMPLE'): unprivileged_user = '******' if not libvirt_version.version_compare(1, 1, 1): if params.get('setup_libvirt_polkit') == 'yes': raise error.TestNAError("API acl test not supported in current" " libvirt version.") # Do backup for origin xml xml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) try: vm = env.get_vm(vm_name) if not vm.is_alive(): vm.start() session = vm.wait_for_login() bef_list = session.cmd_output("fdisk -l|grep ^/dev|" "cut -d' ' -f1").split("\n") session.close() vm.destroy() # Load module and get scsi disk name utils.load_module("scsi_debug lbpu=1 lbpws=1") scsi_disk = utils.run("lsscsi|grep scsi_debug|" "awk '{print $6}'").stdout.strip() # Create partition open("/tmp/fdisk-cmd", "w").write("n\np\n\n\n\nw\n") output = utils.run("fdisk %s < /tmp/fdisk-cmd" % scsi_disk).stdout.strip() logging.debug("fdisk output %s", output) os.remove("/tmp/fdisk-cmd") # Format disk output = utils.run("mkfs.ext3 %s1" % scsi_disk).stdout.strip() logging.debug("output %s", output) # Add scsi disk in guest recompose_xml(vm_name, scsi_disk) # Prepare guest agent and start guest if has_qemu_ga: vm.prepare_guest_agent(start=start_qemu_ga) else: # Remove qemu-ga channel vm.prepare_guest_agent(channel=has_qemu_ga, start=False) guest_session = vm.wait_for_login() # Get new generated disk af_list = guest_session.cmd_output("fdisk -l|grep ^/dev|" "cut -d' ' -f1").split('\n') new_disk = "".join(list(set(bef_list) ^ set(af_list))) # Mount disk in guest guest_session.cmd("mkdir -p /home/test && mount %s /home/test" % new_disk) # Do first fstrim before all to get original map for compare cmd_result = virsh.domfstrim(vm_name) if cmd_result.exit_status != 0: if not status_error: raise error.TestFail("Fail to do virsh domfstrim, error %s" % cmd_result.stderr) def get_diskmap_size(): """ Collect size from disk map :return: disk size """ map_cmd = "cat /sys/bus/pseudo/drivers/scsi_debug/map" diskmap = utils.run(map_cmd).stdout.strip('\n\x00') logging.debug("disk map is %s", diskmap) sum = 0 for i in diskmap.split(","): sum = sum + int(i.split("-")[1]) - int(i.split("-")[0]) return sum ori_size = get_diskmap_size() # Write date in disk dd_cmd = "dd if=/dev/zero of=/home/test/file bs=1048576 count=5" guest_session.cmd(dd_cmd) def _full_mapped(): """ Do full map check :return: True or False """ full_size = get_diskmap_size() return (ori_size < full_size) if not utils_misc.wait_for(_full_mapped, timeout=30): raise error.TestError("Scsi map is not updated after dd command.") full_size = get_diskmap_size() # Remove disk content in guest guest_session.cmd("rm -rf /home/test/*") guest_session.close() def _trim_completed(): """ Do empty fstrim check :return: True of False """ cmd_result = virsh.domfstrim(vm_name, minimum, mountpoint, options, unprivileged_user=unprivileged_user, uri=uri) if cmd_result.exit_status != 0: if not status_error: raise error.TestFail( "Fail to do virsh domfstrim, error %s" % cmd_result.stderr) else: logging.info("Fail to do virsh domfstrim as expected: %s", cmd_result.stderr) return True empty_size = get_diskmap_size() if is_fulltrim: return empty_size <= ori_size else: # For partly trim will check later return False if not utils_misc.wait_for(_trim_completed, timeout=30): # Get result again to check partly fstrim empty_size = get_diskmap_size() if not is_fulltrim: if ori_size < empty_size <= full_size: logging.info("Success to do fstrim partly") return True raise error.TestFail("Fail to do fstrim. (orignal size: %s), " "(current size: %s), (full size: %s)" % (ori_size, empty_size, full_size)) logging.info("Success to do fstrim") finally: # Do domain recovery vm.shutdown() xml_backup.sync() utils.unload_module("scsi_debug")
def run(test, params, env): """ Test send-key command, include all types of codeset and sysrq For normal sendkey test, we create a file to check the command execute by send-key. For sysrq test, check the /var/log/messages in RHEL or /var/log/syslog in Ubuntu and guest status """ if not virsh.has_help_command('send-key'): test.cancel("This version of libvirt does not support the send-key " "test") vm_name = params.get("main_vm", "avocado-vt-vm1") status_error = ("yes" == params.get("status_error", "no")) keystrokes = params.get("sendkey", "") codeset = params.get("codeset", "") holdtime = params.get("holdtime", "") sysrq_test = ("yes" == params.get("sendkey_sysrq", "no")) sleep_time = int(params.get("sendkey_sleeptime", 2)) readonly = params.get("readonly", False) username = params.get("username") password = params.get("password") create_file = params.get("create_file_name") uri = params.get("virsh_uri") simultaneous = params.get("sendkey_simultaneous", "yes") == "yes" unprivileged_user = params.get('unprivileged_user') if unprivileged_user: if unprivileged_user.count('EXAMPLE'): unprivileged_user = '******' if not libvirt_version.version_compare(1, 1, 1): if params.get('setup_libvirt_polkit') == 'yes': test.cancel("API acl test not supported in current libvirt " "version.") def send_line(send_str): """ send string to guest with send-key and end with Enter """ for send_ch in list(send_str): virsh.sendkey(vm_name, "KEY_%s" % send_ch.upper(), ignore_status=False) virsh.sendkey(vm_name, "KEY_ENTER", ignore_status=False) vm = env.get_vm(vm_name) session = vm.wait_for_login() if sysrq_test: # In postprocess of previous testcase would pause and resume the VM # that would change the domstate to running (unpaused) and cause # sysrq reboot testcase to fail as the domstate persist across reboot # so it is better to destroy and start VM before the test starts if "KEY_B" in keystrokes: cmd_result = virsh.domstate(vm_name, '--reason', ignore_status=True) if "unpaused" in cmd_result.stdout.strip(): vm.destroy() vm.start() session = vm.wait_for_login() LOG_FILE = "/var/log/messages" if "ubuntu" in vm.get_distro().lower(): LOG_FILE = "/var/log/syslog" # Is 'rsyslog' installed on guest? It'll be what writes out # to LOG_FILE if not utils_package.package_install("rsyslog", session): test.fail("Fail to install rsyslog, make sure that you have " "usable repo in guest") # clear messages, restart rsyslog, and make sure it's running session.cmd("echo '' > %s" % LOG_FILE) session.cmd("service rsyslog restart") ps_stat = session.cmd_status("ps aux |grep rsyslog") if ps_stat != 0: test.fail("rsyslog is not running in guest") # enable sysrq session.cmd("echo 1 > /proc/sys/kernel/sysrq") # make sure the environment is clear if create_file is not None: session.cmd("rm -rf %s" % create_file) try: # wait for tty started tty_stat = "ps aux|grep tty" timeout = 60 while timeout >= 0 and \ session.get_command_status(tty_stat) != 0: time.sleep(1) timeout = timeout - 1 if timeout < 0: test.fail("Can not wait for tty started in 60s") # send user and passwd to guest to login send_line(username) time.sleep(2) send_line(password) time.sleep(2) if sysrq_test or simultaneous: output = virsh.sendkey(vm_name, keystrokes, codeset=codeset, holdtime=holdtime, readonly=readonly, unprivileged_user=unprivileged_user, uri=uri) else: # If multiple keycodes are specified, they are all sent # simultaneously to the guest, and they may be received # in random order. If you need distinct keypresses, you # must use multiple send-key invocations. for keystroke in keystrokes.split(): output = virsh.sendkey(vm_name, keystroke, codeset=codeset, holdtime=holdtime, readonly=readonly, unprivileged_user=unprivileged_user, uri=uri) if output.exit_status: test.fail("Failed to send key %s to guest: %s" % (keystroke, output.stderr)) time.sleep(sleep_time) if output.exit_status != 0: if status_error: logging.info( "Failed to sendkey to guest as expected, Error:" "%s.", output.stderr) return else: test.fail("Failed to send key to guest, Error:%s." % output.stderr) elif status_error: test.fail("Expect fail, but succeed indeed.") if create_file is not None: # check if created file exist cmd_ls = "ls %s" % create_file sec_status, sec_output = session.get_command_status_output(cmd_ls) if sec_status == 0: logging.info("Succeed to create file with send key") else: test.fail("Fail to create file with send key, Error:%s" % sec_output) elif sysrq_test: # check LOG_FILE info according to different key # Since there's no guarantee when messages will be written # we'll do a check and wait loop for up to 60 seconds timeout = 60 while timeout >= 0: if "KEY_H" in keystrokes: cmd = "cat %s | grep 'SysRq.*HELP'" % LOG_FILE get_status = session.cmd_status(cmd) elif "KEY_M" in keystrokes: cmd = "cat %s | grep 'SysRq.*Show Memory'" % LOG_FILE get_status = session.cmd_status(cmd) elif "KEY_T" in keystrokes: cmd = "cat %s | grep 'SysRq.*Show State'" % LOG_FILE get_status = session.cmd_status(cmd) # Sometimes SysRq.*Show State string missed in LOG_FILE # as a fall back check for runnable tasks logged if get_status != 0: cmd = "cat %s | grep 'runnable tasks:'" % LOG_FILE get_status = session.cmd_status(cmd) elif "KEY_B" in keystrokes: session = vm.wait_for_login() result = virsh.domstate(vm_name, '--reason', ignore_status=True) output = result.stdout.strip() logging.debug("The guest state: %s", output) if not output.count("booted"): get_status = 1 else: get_status = 0 session.close() if get_status == 0: timeout = -1 else: session.cmd("echo \"virsh sendkey waiting\" >> %s" % LOG_FILE) time.sleep(1) timeout = timeout - 1 if get_status != 0: test.fail("SysRq does not take effect in guest, keystrokes is " "%s" % keystrokes) else: logging.info("Succeed to send SysRq command") else: test.fail("Test cfg file invalid: either sysrq_params or " "create_file_name must be defined") finally: if create_file is not None: session.cmd("rm -rf %s" % create_file) session.close()
def run(test, params, env): """ Test virsh domblkerror in 2 types error 1. unspecified error 2. no space """ if not virsh.has_help_command('domblkerror'): test.cancel("This version of libvirt does not support domblkerror " "test") vm_name = params.get("main_vm", "avocado-vt-vm1") error_type = params.get("domblkerror_error_type") timeout = params.get("domblkerror_timeout", 240) mnt_dir = params.get("domblkerror_mnt_dir", "/home/test") export_file = params.get("nfs_export_file", "/etc/exports") img_name = params.get("domblkerror_img_name", "libvirt-disk") img_size = params.get("domblkerror_img_size") target_dev = params.get("domblkerror_target_dev", "vdb") pool_name = params.get("domblkerror_pool_name", "fs_pool") vol_name = params.get("domblkerror_vol_name", "vol1") ubuntu = distro.detect().name == 'Ubuntu' rhel = distro.detect().name == 'rhel' nfs_service_package = params.get("nfs_service_package", "nfs-kernel-server") nfs_service = None selinux_bool = None session = None selinux_bak = "" vm = env.get_vm(vm_name) if error_type == "unspecified error": selinux_local = params.get("setup_selinux_local", "yes") == "yes" if not ubuntu and not rhel: nfs_service_package = "nfs" elif rhel: nfs_service_package = "nfs-server" if not rhel and not utils_package.package_install(nfs_service_package): test.cancel("NFS package not available in host to test") # backup /etc/exports shutil.copyfile(export_file, "%s.bak" % export_file) # backup xml vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) try: # Gerenate tmp dir tmp_dir = data_dir.get_tmp_dir() img_dir = os.path.join(tmp_dir, 'images') if not os.path.exists(img_dir): os.mkdir(img_dir) # Generate attached disk process.run("qemu-img create %s %s" % (os.path.join(img_dir, img_name), img_size), shell=True, verbose=True) # Get unspecified error if error_type == "unspecified error": # In this situation, guest will attach a disk on nfs, stop nfs # service will cause guest paused and get unspecified error nfs_dir = os.path.join(tmp_dir, 'mnt') if not os.path.exists(nfs_dir): os.mkdir(nfs_dir) mount_opt = "rw,no_root_squash,async" res = libvirt.setup_or_cleanup_nfs(is_setup=True, mount_dir=nfs_dir, is_mount=False, export_options=mount_opt, export_dir=img_dir) if not ubuntu: selinux_bak = res["selinux_status_bak"] process.run("mount -o nolock,soft,timeo=1,retrans=1,retry=0 " "127.0.0.1:%s %s" % (img_dir, nfs_dir), shell=True, verbose=True) img_path = os.path.join(nfs_dir, img_name) nfs_service = Factory.create_service(nfs_service_package) if not ubuntu and selinux_local: params['set_sebool_local'] = "yes" params['local_boolean_varible'] = "virt_use_nfs" params['local_boolean_value'] = "on" selinux_bool = utils_misc.SELinuxBoolean(params) selinux_bool.setup() elif error_type == "no space": # Steps to generate no space block error: # 1. Prepare a iscsi disk and build fs pool with it # 2. Create vol with larger capacity and 0 allocation # 3. Attach this disk in guest # 4. In guest, create large image in the vol, which may cause # guest paused _pool_vol = None pool_target = os.path.join(tmp_dir, pool_name) _pool_vol = libvirt.PoolVolumeTest(test, params) _pool_vol.pre_pool(pool_name, "fs", pool_target, img_name, image_size=img_size) _pool_vol.pre_vol(vol_name, "raw", "100M", "0", pool_name) img_path = os.path.join(pool_target, vol_name) # Generate disk xml # Guest will attach a disk with cache=none and error_policy=stop img_disk = Disk(type_name="file") img_disk.device = "disk" img_disk.source = img_disk.new_disk_source( **{'attrs': {'file': img_path}}) img_disk.driver = {'name': "qemu", 'type': "raw", 'cache': "none", 'error_policy': "stop"} img_disk.target = {'dev': target_dev, 'bus': "virtio"} logging.debug("disk xml is %s", img_disk.xml) # Start guest and get session if not vm.is_alive(): vm.start() session = vm.wait_for_login() # Get disk list before operation get_disks_cmd = "fdisk -l|grep '^Disk /dev'|cut -d: -f1|cut -d' ' -f2" bef_list = str(session.cmd_output(get_disks_cmd)).strip().split("\n") logging.debug("disk_list_debug = %s", bef_list) # Attach disk to guest ret = virsh.attach_device(vm_name, img_disk.xml) if ret.exit_status != 0: test.fail("Fail to attach device %s" % ret.stderr) time.sleep(2) logging.debug("domain xml is %s", virsh.dumpxml(vm_name)) # get disk list after attach aft_list = str(session.cmd_output(get_disks_cmd)).strip().split("\n") logging.debug("disk list after attaching - %s", aft_list) # Find new disk after attach new_disk = "".join(list(set(bef_list) ^ set(aft_list))) logging.debug("new disk is %s", new_disk) def create_large_image(): """ Create large image in guest """ # install dependent packages pkg_list = ["parted", "e2fsprogs"] for pkg in pkg_list: if not utils_package.package_install(pkg, session): test.error("Failed to install dependent package %s" % pkg) # create partition and file system session.cmd("parted -s %s mklabel msdos" % new_disk) session.cmd("parted -s %s mkpart primary ext3 '0%%' '100%%'" % new_disk) # mount disk and write file in it session.cmd("mkfs.ext3 %s1" % new_disk) session.cmd("mkdir -p %s && mount %s1 %s" % (mnt_dir, new_disk, mnt_dir)) # The following step may cause guest paused before it return try: session.cmd("dd if=/dev/zero of=%s/big_file bs=1024 " "count=51200 && sync" % mnt_dir) except Exception as err: logging.debug("Expected Fail %s", err) session.close() create_large_image() if error_type == "unspecified error": # umount nfs to trigger error after create large image if nfs_service is not None: nfs_service.stop() logging.debug("nfs status is %s", nfs_service.status()) # wait and check the guest status with timeout def _check_state(): """ Check domain state """ return (vm.state() == "paused") if not utils_misc.wait_for(_check_state, timeout): # If not paused, perform one more IO operation to the mnt disk session = vm.wait_for_login() session.cmd("echo 'one more write to big file' > %s/big_file" % mnt_dir) if not utils_misc.wait_for(_check_state, 60): test.fail("Guest does not paused, it is %s now" % vm.state()) else: logging.info("Now domain state changed to paused status") output = virsh.domblkerror(vm_name) if output.exit_status == 0: expect_result = "%s: %s" % (img_disk.target['dev'], error_type) if output.stdout.strip() == expect_result: logging.info("Get expect result: %s", expect_result) else: test.fail("Failed to get expect result, get %s" % output.stdout.strip()) else: test.fail("Fail to get domblkerror info:%s" % output.stderr) finally: logging.info("Do clean steps") if session: session.close() if error_type == "unspecified error": if nfs_service is not None: nfs_service.start() vm.destroy() if os.path.isfile("%s.bak" % export_file): shutil.move("%s.bak" % export_file, export_file) res = libvirt.setup_or_cleanup_nfs(is_setup=False, mount_dir=nfs_dir, export_dir=img_dir, restore_selinux=selinux_bak) if selinux_bool: selinux_bool.cleanup(keep_authorized_keys=True) elif error_type == "no space": vm.destroy() if _pool_vol: _pool_vol.cleanup_pool(pool_name, "fs", pool_target, img_name) vmxml_backup.sync() data_dir.clean_tmp_files()
def run_virsh_freecell(test, params, env): """ Test the command virsh freecell (1) Call virsh freecell (2) Call virsh freecell --all (3) Call virsh freecell with a numeric argument (4) Call virsh freecell xyz (5) Call virsh freecell with libvirtd service stop """ connect_uri = libvirt_vm.normalize_connect_uri( params.get("connect_uri", "default") ) option = params.get("virsh_freecell_options") # Prepare libvirtd service check_libvirtd = params.has_key("libvirtd") if check_libvirtd: libvirtd = params.get("libvirtd") if libvirtd == "off": libvirt_vm.service_libvirtd_control("stop") # Run test case cmd_result = virsh.freecell(ignore_status=True, extra=option, uri=connect_uri, debug=True) output = cmd_result.stdout.strip() status = cmd_result.exit_status # Recover libvirtd service start if libvirtd == "off": libvirt_vm.service_libvirtd_control("start") # Check the output if virsh.has_help_command('numatune'): OLD_LIBVIRT = False else: OLD_LIBVIRT = True if option == '--all': raise error.TestNAError("Older libvirt virsh freecell " "doesn't support --all option") def output_check(freecell_output): if not re.search("ki?B", freecell_output, re.IGNORECASE): raise error.TestFail("virsh freecell output invalid: " + freecell_output) # Check status_error status_error = params.get("status_error") if status_error == "yes": if status == 0: if libvirtd == "off": raise error.TestFail("Command 'virsh freecell' succeeded " "with libvirtd service stopped, incorrect") else: # newer libvirt if not OLD_LIBVIRT: raise error.TestFail("Command 'virsh freecell %s' succeeded" "(incorrect command)" % option) else: # older libvirt raise error.TestNAError('Older libvirt virsh freecell ' 'incorrectly processes extranious' 'command-line options') elif status_error == "no": output_check(output) if status != 0: raise error.TestFail("Command 'virsh freecell %s' failed " "(correct command)" % option)
def run(test, params, env): """ This test virsh domtime command and its options. 1) Start a guest with/without guest agent configured; 2) Record guest times; 3) Do some operation to stop VM; 4) Run virsh domtime command with different options; 5) Check the command result; 6) Check the guest times against expectation; 7) Cleanup test environment. """ epoch = datetime.datetime(1970, 1, 1, 0, 0, 0) # Max time can be set with domtime successfully in newer qemu-ga time_max_1 = 3155731199 # Max time can be set with domtime successfully in older qemu-ga time_max_2 = 3155759999 # Max time can be set with domtime bug failed to set RTC in older qemu-ga time_max_3 = 9223372035 def init_time(session): """ Initialize guest RTC time to epoch + 1234567890 and system time one day latter. :param session: Session from which to access guest """ res = virsh.domtime(vm_name, time=1234567890) if res.exit_status: logging.debug("Failed to init time to 1234567890:\n%s", res) status, output = session.cmd_status_output('date -s "1 day"') if status: test.error("Failed to set guest time:\n%s" % output) def get_host_utc_time(): """ Get host UTC time from date command. """ res = process.run("date -u", shell=True) # Strip timezone info from output # e.g. 'Sun Feb 15 07:31:40 CST 2009' -> 'Sun Feb 15 07:31:40 2009' time_str = re.sub(r'\S+ (?=\S+$)', '', res.stdout_text.strip()) return datetime.datetime.strptime(time_str, r"%a %b %d %H:%M:%S %Y") def run_cmd(session, cmd): """ Run a command in a session and record duration of call. """ start = time.time() output = session.cmd_output(cmd) duration = time.time() - start logging.info('Result of command "%s". Duration: %s. Output:%s', cmd, duration, output.strip()) return output, duration def get_guest_times(session): """ Retrieve different guest time as a dict for checking. Keys: local_hw: Guest RTC time in local timezone local_sys: Guest system time in local timezone utc_sys: Guest system time in UTC domtime: Guest system time in UTC got from virsh domtime command :param session: Session from which to access guest """ times = {} get_begin = time.time() # Guest RTC local timezone time output, _ = run_cmd(session, 'hwclock') time_str, _ = re.search(r"(.+) (\S+ seconds)", output).groups() try: # output format 1: Tue 01 Mar 2016 01:53:46 PM CST # Remove timezone info from output new_str = re.sub(r'\s+\S+$', '', time_str) times['local_hw'] = datetime.datetime.strptime( new_str, r"%a %d %b %Y %I:%M:%S %p") except ValueError: # There are two possible output format for `hwclock` # output format 2: Sat Feb 14 07:31:33 2009 times['local_hw'] = datetime.datetime.strptime( time_str, r"%a %b %d %H:%M:%S %Y") delta = time.time() - get_begin times['local_hw'] -= datetime.timedelta(seconds=delta) # Guest system local timezone time output, _ = run_cmd(session, 'date') # Strip timezone info from output # e.g. 'Sun Feb 15 07:31:40 CST 2009' -> 'Sun Feb 15 07:31:40 2009' time_str = re.sub(r'\S+ (?=\S+$)', '', output.strip()) times['local_sys'] = datetime.datetime.strptime( time_str, r"%a %b %d %H:%M:%S %Y") delta = time.time() - get_begin times['local_sys'] -= datetime.timedelta(seconds=delta) # Guest system UTC timezone time output, _ = run_cmd(session, 'date -u') # Strip timezone info from output # e.g. 'Sun Feb 15 07:31:40 CST 2009' -> 'Sun Feb 15 07:31:40 2009' time_str = re.sub(r'\S+ (?=\S+$)', '', output.strip()) times['utc_sys'] = datetime.datetime.strptime(time_str, r"%a %b %d %H:%M:%S %Y") delta = time.time() - get_begin times['utc_sys'] -= datetime.timedelta(seconds=delta) # Guest UTC time from virsh domtime res = virsh.domtime(vm_name, pretty=True, ignore_status=True) if not res.exit_status: logging.info('Result of "domtime". Duration: %s. Output:%s', res.duration, res.stdout.strip()) _, time_str = res.stdout.split(" ", 1) times['domtime'] = datetime.datetime.strptime( time_str.strip(), r"%Y-%m-%d %H:%M:%S") delta = time.time() - get_begin times['domtime'] -= datetime.timedelta(seconds=delta) else: logging.debug("Unable to get domain time:\n%s", res) times['domtime'] = None return times, time.time() - get_begin def check_get_success(expected_times): """ Check virsh command get result against expected times :param expected_times: Expected time for checking """ _, time_str = res.stdout.split(" ", 1) if pretty: # Time: 2015-01-13 06:29:18 domtime = datetime.datetime.strptime(time_str.strip(), r"%Y-%m-%d %H:%M:%S") else: # Time: 1421130740 domtime = epoch + datetime.timedelta(seconds=int(time_str)) time_shift = time.time() - start logging.debug("Time shift is %s", time_shift) result_diff = (domtime - expected_times['domtime']).total_seconds() if abs(result_diff) > 2.0: test.fail("Expect get time %s, but got %s, time " "diff: %s" % (org_times['domtime'], domtime, result_diff)) def check_guest_times(expected_times, cur_times): """ Check guest times after test against expected times :param expected_times: Expected time for checking """ time_shift = time.time() - start logging.debug("Time shift is %s", time_shift) error_msgs = [] for key in cur_times: if cur_times[key] is not None: cur = cur_times[key] expect = expected_times[key] diff = (cur - expect).total_seconds() msg = "For %s, expect get time %s, got %s, time diff: %s" % ( key, expect, cur, diff) logging.debug(msg) if abs(diff) > 2.0: error_msgs.append(msg) if error_msgs: test.fail('\n'.join(error_msgs)) def check_time(result, org_times, cur_times): """ Check whether domain time has been change accordingly. :param result: virsh domtime CmdResult instance :param org_times: Original guest times """ action = "get" if now or sync or (set_time is not None): action = "set" host_tz_diff = org_host_loc_time - org_host_time logging.debug("Timezone diff on host is %d hours.", (host_tz_diff.total_seconds() // 3600)) # Hardware time will never stop logging.info('Add %ss to expected guest time', interval) if action == 'get': expected_times = org_times elif action == 'set': if result.exit_status: # Time not change if domtime fails expected_times = org_times else: # Time change accordingly if succeed. if now: utc_time = org_host_time local_time = utc_time + guest_tz_diff elif sync: local_time = org_times["local_hw"] utc_time = local_time - guest_tz_diff elif set_time is not None: utc_time = epoch + datetime.timedelta( seconds=(int(set_time) - guest_duration)) local_time = utc_time + guest_tz_diff expected_times = {} expected_times['local_hw'] = local_time expected_times['local_sys'] = local_time expected_times["utc_sys"] = utc_time expected_times["domtime"] = utc_time # Add interval between two checks of guest time for key in expected_times: if expected_times[key] is not None: expected_times[key] += interval # Hardware time will never stop # Software time will stop if suspended or managed-saved if suspend or managedsave: logging.info('Remove %ss from expected guest software time', stop_time) expected_times["domtime"] -= stop_time expected_times["local_sys"] -= stop_time expected_times["utc_sys"] -= stop_time # Check guest time if domtime succeeded check_guest_times(expected_times, cur_times) # Check if output of domtime is correct if action == 'get' and not result.exit_status: check_get_success(expected_times) def prepare_fail_patts(): """ Predict fail pattern from test parameters. """ fail_patts = [] if not channel: fail_patts.append(r"QEMU guest agent is not configured") if not agent: # For older version fail_patts.append(r"Guest agent not available for now") # For newer version fail_patts.append(r"Guest agent is not responding") if int(now) + int(sync) + int(bool(set_time)) > 1: fail_patts.append(r"Options \S+ and \S+ are mutually exclusive") if shutdown: fail_patts.append(r"domain is not running") if set_time is not None: if int(set_time) < 0: fail_patts.append(r"Invalid argument") elif time_max_1 < int(set_time) <= time_max_2: fail_patts.append(r"Invalid time") elif time_max_2 < int(set_time) <= time_max_3: fail_patts.append(r"Invalid time") elif time_max_3 < int(set_time): fail_patts.append(r"too big for guest agent") if readonly: fail_patts.append(r"operation forbidden") return fail_patts def stop_vm(): """ Suspend, managedsave, pmsuspend or shutdown a VM for a period of time """ stop_start = time.time() vmlogin_dur = 0.0 if suspend: vm.pause() time.sleep(vm_stop_duration) vm.resume() elif managedsave: vm.managedsave() time.sleep(vm_stop_duration) vm.start() start_dur = time.time() vm.wait_for_login() vmlogin_dur = time.time() - start_dur elif pmsuspend: vm.pmsuspend() time.sleep(vm_stop_duration) vm.pmwakeup() start_dur = time.time() vm.wait_for_login() vmlogin_dur = time.time() - start_dur elif shutdown: vm.destroy() # Check real guest stop time stop_seconds = time.time() - stop_start - vmlogin_dur stop_time = datetime.timedelta(seconds=stop_seconds) logging.debug("Guest stopped: %s", stop_time) return stop_time # Check availability of virsh command domtime if not virsh.has_help_command('domtime'): test.cancel("This version of libvirt does not support " "the domtime test") channel = (params.get("prepare_channel", "yes") == 'yes') agent = (params.get("start_agent", "yes") == 'yes') pretty = (params.get("domtime_pretty", "no") == 'yes') now = (params.get("domtime_now", "no") == 'yes') sync = (params.get("domtime_sync", "no") == 'yes') set_time = params.get("domtime_time", None) shutdown = (params.get("shutdown_vm", "no") == 'yes') suspend = (params.get("suspend_vm", "no") == 'yes') managedsave = (params.get("managedsave_vm", "no") == 'yes') pmsuspend = (params.get("pmsuspend_vm", "no") == 'yes') vm_stop_duration = int(params.get("vm_stop_duration", "10")) vm_name = params.get("main_vm") vm = env.get_vm(vm_name) readonly = (params.get("readonly_test", "no") == "yes") # Backup domain XML xml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) try: if pmsuspend: vm_xml.VMXML.set_pm_suspend(vm_name) # Add or remove qemu-agent from guest before test vm.prepare_guest_agent(channel=channel, start=agent) session = vm.wait_for_login() # Let's set guest timezone to region where we do not # have day light savings, to affect the time session.cmd("timedatectl set-timezone Asia/Kolkata") try: init_guest_times, _ = get_guest_times(session) guest_tz_diff = init_guest_times['local_sys'] - init_guest_times[ 'utc_sys'] logging.debug("Timezone diff on guest is %d hours.", (guest_tz_diff.total_seconds() // 3600)) if channel and agent: init_time(session) # Expected fail message patterns fail_patts = prepare_fail_patts() # Message patterns test should skip when met skip_patts = [ r'The command \S+ has not been found', ] # Record start time start = time.time() # Record host utc time before testing org_host_time = get_host_utc_time() # Record host local time before testing outp = process.run('date', shell=True) time_st = re.sub(r'\S+ (?=\S+$)', '', outp.stdout_text.strip()) org_host_loc_time = datetime.datetime.strptime( time_st, r"%a %b %d %H:%M:%S %Y") # Get original guest times org_times, guest_duration = get_guest_times(session) # Run some operations to stop guest system stop_time = stop_vm() # Run command with specified options. res = virsh.domtime(vm_name, now=now, pretty=pretty, sync=sync, time=set_time, readonly=readonly, debug=True) libvirt.check_result(res, fail_patts, skip_patts) # Check interval between two check of guest time interval = datetime.timedelta(seconds=(time.time() - start)) logging.debug("Interval between guest checking: %s", interval) if not shutdown: # Get current guest times cur_times, _ = get_guest_times(session) check_time(res, org_times, cur_times) finally: if shutdown: vm.start() # sync guest timezone utils_time.sync_timezone_linux(vm) # Sync guest time with host if channel and agent and not shutdown: res = virsh.domtime(vm_name, now=True) if res.exit_status: session.close() test.error("Failed to recover guest time:\n%s" % res) session.close() finally: # Restore VM XML xml_backup.sync()
def run(test, params, env): """ Test command: migrate-compcache <domain> [--size <number>] 1) Run migrate-compcache command and check return code. """ vm_ref = params.get("vm_ref", "name") vm_name = params.get('main_vm') start_vm = 'yes' == params.get('start_vm', 'yes') pause_vm = 'yes' == params.get('pause_after_start_vm', 'no') expect_succeed = 'yes' == params.get('expect_succeed', 'yes') size_option = params.get('size_option', 'valid') action = params.get('compcache_action', 'get') vm = env.get_vm(vm_name) # Check if the virsh command migrate-compcache is available if not virsh.has_help_command('migrate-compcache'): raise error.TestNAError("This version of libvirt does not support " "virsh command migrate-compcache") # Prepare the VM state if it's not correct. if start_vm and not vm.is_alive(): vm.start() elif not start_vm and vm.is_alive(): vm.destroy() if pause_vm and not vm.is_paused(): vm.pause() # Setup domain reference if vm_ref == 'domname': vm_ref = vm_name # Setup size according to size_option: # minimal: Same as memory page size # maximal: Same as guest memory # empty: An empty string # small: One byte less than page size # large: Larger than guest memory # huge : Largest int64 page_size = get_page_size() if size_option == 'minimal': size = str(page_size) elif size_option == 'maximal': size = str(vm.get_max_mem() * 1024) elif size_option == 'empty': size = '""' elif size_option == 'small': size = str(page_size - 1) elif size_option == 'large': # Guest memory is larger than the max mem set, # add 50MB to ensure size exceeds guest memory. size = str(vm.get_max_mem() * 1024 + 50000000) elif size_option == 'huge': size = str(2**64 - 1) else: size = size_option # If we need to get, just omit the size option if action == 'get': size = None # Run testing command result = virsh.migrate_compcache(vm_ref, size=size) logging.debug(result) remote_uri = params.get("jobabort_remote_uri") remote_host = params.get("migrate_dest_host") remote_user = params.get("migrate_dest_user", "root") remote_pwd = params.get("migrate_dest_pwd") check_job_compcache = False if not remote_host.count( "EXAMPLE") and size is not None and expect_succeed: # Config ssh autologin for remote host ssh_key.setup_ssh_key(remote_host, remote_user, remote_pwd, port=22) if vm.is_dead(): vm.start() if vm.is_paused(): vm.resume() vm.wait_for_login() # Do actual migration to verify compression cache of migrate jobs command = "virsh migrate %s %s --compressed" % (vm_name, remote_uri) p = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) # Give enough time for starting job t = 0 while t < 5: jobtype = vm.get_job_type() if "None" == jobtype: t += 1 time.sleep(1) continue elif jobtype is False: logging.error("Get job type failed.") break else: logging.debug("Job started: %s", jobtype) break jobinfo = virsh.domjobinfo(vm_ref, debug=True, ignore_status=True).stdout check_job_compcache = True if p.poll(): try: p.kill() except OSError: pass # Cleanup in case of successful migration utlv.MigrationTest().cleanup_dest_vm(vm, None, remote_uri) # Shut down the VM to make sure the compcache setting cleared if vm.is_alive(): vm.destroy() # Check test result if expect_succeed: if result.exit_status != 0: raise error.TestFail( 'Expected succeed, but failed with result:\n%s' % result) if check_job_compcache: for line in jobinfo.splitlines(): detail = line.split(":") if detail[0].count("Compression cache"): value = detail[-1].split()[0].strip() value = int(float(value)) unit = detail[-1].split()[-1].strip() if unit == "KiB": size = int(int(size) / 1024) elif unit == "MiB": size = int(int(size) / 1048576) elif unit == "GiB": size = int(int(size) / 1073741824) if value != size: raise error.TestFail("Compression cache is not match" " with setted") else: return raise error.TestFail("Get compression cahce in job failed.") elif not expect_succeed: if result.exit_status == 0: raise error.TestFail( 'Expected fail, but succeed with result:\n%s' % result)
def run(test, params, env): """ Test the command virsh vcpupin (1) Get the host and guest cpu count (2) Call virsh vcpupin for each vcpu with pinning of each cpu (3) Check whether the virsh vcpupin has pinned the respective vcpu to cpu """ def affinity_from_vcpuinfo(vm_name, vcpu): """ This function returns list of the vcpu's affinity from virsh vcpuinfo output :param vm_name: VM Name to operate on :param vcpu: vcpu number for which the affinity is required """ output = virsh.vcpuinfo(vm_name).stdout.rstrip() affinity = re.findall('CPU Affinity: +[-y]+', output) total_affinity = affinity[int(vcpu)].split()[-1].strip() actual_affinity = list(total_affinity) return actual_affinity def check_vcpupin(vm_name, vcpu, cpu_list, pid, vcpu_pid): """ This function checks the actual and the expected affinity of given vcpu and raises error if not matchs :param vm_name: VM Name to operate on :param vcpu: vcpu number for which the affinity is required :param cpu: cpu details for the affinity :param pid: VM pid :param vcpu: VM cpu pid """ total_cpu = utils.run("ls -d /sys/devices/system/cpu/cpu[0-9]* |wc -l").stdout expected_output = utils_test.libvirt.cpus_string_to_affinity_list( cpu_list, int(total_cpu)) logging.debug("Expecte affinity: %s", expected_output) actual_output = affinity_from_vcpuinfo(vm_name, vcpu) logging.debug("Actual affinity in vcpuinfo output: %s", actual_output) if expected_output == actual_output: logging.info("successfully pinned cpu_list: %s --> vcpu: %s", cpu_list, vcpu) else: raise error.TestFail("Cpu pinning details not updated properly in" " virsh vcpuinfo command output") if pid is None: return # Get the actual cpu affinity value in the proc entry output = utils_test.libvirt.cpu_allowed_list_by_task(pid, vcpu_pid) actual_output_proc = utils_test.libvirt.cpus_string_to_affinity_list( output, int(total_cpu)) logging.debug("Actual affinity in guest proc: %s", actual_output_proc) if expected_output == actual_output_proc: logging.info("successfully pinned vcpu: %s --> cpu: %s" " in respective proc entry", vcpu, cpu_list) else: raise error.TestFail("Cpu pinning details not updated properly in" " /proc/%s/task/%s/status" % (pid, vcpu_pid)) def run_and_check_vcpupin(vm, vm_ref, vcpu, cpu_list, options): """ Run the vcpupin command and then check the result. """ if vm_ref == "name": vm_ref = vm.name elif vm_ref == "uuid": vm_ref = vm.get_uuid() # Execute virsh vcpupin command. cmdResult = virsh.vcpupin(vm_ref, vcpu, cpu_list, options, debug=True) if cmdResult.exit_status: if not status_error: # Command fail and it is in positive case. raise error.TestFail(cmdResult) else: # Command fail and it is in negative case. return else: if status_error: # Command success and it is in negative case. raise error.TestFail(cmdResult) else: # Command success and it is in positive case. # "--config" will take effect after VM destroyed. pid = None vcpu_pid = None if options == "--config": virsh.destroy(vm.name) else: pid = vm.get_pid() logging.debug("vcpus_pid: %s", vm.get_vcpus_pid()) vcpu_pid = vm.get_vcpus_pid()[vcpu] # Check the result of vcpupin command. check_vcpupin(vm.name, vcpu, cpu_list, pid, vcpu_pid) def offline_pin_and_check(vm, vcpu, cpu_list): """ Edit domain xml to pin vcpu and check the result. """ cputune = vm_xml.VMCPUTuneXML() cputune.vcpupins = [{'vcpu': str(vcpu), 'cpuset': cpu_list}] vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm.name) vmxml.cputune = cputune vmxml.sync() utils_misc.wait_for(lambda: vm.state() == "shut off", 10) cmdResult = virsh.start(vm.name, debug=True) libvirt.check_exit_status(cmdResult, status_error) pid = vm.get_pid() vcpu_pid = vm.get_vcpus_pid()[vcpu] check_vcpupin(vm.name, vcpu, cpu_list, pid, vcpu_pid) if not virsh.has_help_command('vcpucount'): raise error.TestNAError("This version of libvirt doesn't" " support this test") vm_name = params.get("main_vm", "virt-tests-vm1") vm = env.get_vm(vm_name) # Get the variables for vcpupin command. vm_ref = params.get("vcpupin_vm_ref", "name") options = params.get("vcpupin_options", "--current") cpu_list = params.get("vcpupin_cpu_list", "x") start_vm = ("yes" == params.get("start_vm", "yes")) # Get status of this case. status_error = ("yes" == params.get("status_error", "no")) # Edit domain xml to pin vcpus offline_pin = ("yes" == params.get("offline_pin", "no")) # Backup for recovery. vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) vmxml_backup = vmxml.copy() # Get the guest vcpu count if offline_pin: vcpucount_option = "--config --active" else: vcpucount_option = "--live --active" guest_vcpu_count = virsh.vcpucount(vm_name, vcpucount_option).stdout.strip() try: # Control multi domain vcpu affinity multi_dom = ("yes" == params.get("multi_dom_pin", "no")) vm2 = None if multi_dom: vm_names = params.get("vms").split() if len(vm_names) > 1: vm2 = env.get_vm(vm_names[1]) else: raise error.TestError("Need more than one domains") if not vm2: raise error.TestNAError("No %s find" % vm_names[1]) vm2.destroy() vm2xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm2.name) vm2xml_backup = vm2xml.copy() # Make sure vm2 has the same cpu numbers with vm vm2xml.set_vm_vcpus(vm2.name, int(guest_vcpu_count), guest_vcpu_count) if start_vm: vm2.start() # Run cases when guest is shutoff. if not offline_pin: if vm.is_dead() and not start_vm: run_and_check_vcpupin(vm, vm_ref, 0, 0, "") return # Get the host cpu count host_cpu_count = utils.count_cpus() cpu_max = int(host_cpu_count) - 1 if (int(host_cpu_count) < 2) and (not cpu_list == "x"): raise error.TestNAError("We need more cpus on host in this case " "for the cpu_list=%s. But current number " "of cpu on host is %s." % (cpu_list, host_cpu_count)) # Find the alive cpus list cpus_list = utils.cpu_online_map() logging.info("Active cpus in host are %s", cpus_list) # Run test case for vcpu in range(int(guest_vcpu_count)): if cpu_list == "x": for cpu in cpus_list: left_cpus = "0-%s,^%s" % (cpu_max, cpu) if offline_pin: offline_pin_and_check(vm, vcpu, str(cpu)) if multi_dom: offline_pin_and_check(vm2, vcpu, left_cpus) else: run_and_check_vcpupin(vm, vm_ref, vcpu, str(cpu), options) if multi_dom: run_and_check_vcpupin(vm2, "name", vcpu, left_cpus, options) else: if cpu_list == "x-y": cpus = "0-%s" % cpu_max elif cpu_list == "x,y": cpus = ','.join(random.sample(cpus_list, 2)) logging.info(cpus) elif cpu_list == "x-y,^z": cpus = "0-%s,^%s" % (cpu_max, cpu_max) elif cpu_list == "r": cpus = "r" elif cpu_list == "-1": cpus = "-1" elif cpu_list == "out_of_max": cpus = str(cpu_max + 1) else: raise error.TestNAError("Cpu_list=%s is not recognized." % cpu_list) if offline_pin: offline_pin_and_check(vm, vcpu, cpus) else: run_and_check_vcpupin(vm, vm_ref, vcpu, cpus, options) finally: # Recover xml of vm. vmxml_backup.sync() if vm2: vm2xml_backup.sync()
def run(test, params, env): """ Test send-key command, include all types of codeset and sysrq For normal sendkey test, we create a file to check the command execute by send-key. For sysrq test, check the /var/log/messages and guest status """ if not virsh.has_help_command('send-key'): raise error.TestNAError("This version of libvirt does not support " "the send-key test") vm_name = params.get("main_vm", "virt-tests-vm1") status_error = ("yes" == params.get("status_error", "no")) options = params.get("sendkey_options", "") params_test = ("yes" == params.get("sendkey_params", "no")) sysrq_test = ("yes" == params.get("sendkey_sysrq", "no")) readonly = params.get("readonly", False) username = params.get("username") password = params.get("password") create_file = params.get("create_file_name") def send_line(send_str): """ send string to guest with send-key and end with Enter """ for send_ch in list(send_str): virsh.sendkey(vm_name, "KEY_%s" % send_ch.upper(), ignore_status=False) virsh.sendkey(vm_name, "KEY_ENTER", ignore_status=False) vm = env.get_vm(vm_name) session = vm.wait_for_login() if sysrq_test: # clear messages before test session.cmd("echo '' > /var/log/message") # enable sysrq session.cmd("echo 1 > /proc/sys/kernel/sysrq") # make sure the environment is clear session.cmd("rm -rf %s" % create_file) try: # wait for tty1 started tty1_stat = "ps aux|grep [/]sbin/.*tty.*tty1" timeout = 60 while timeout >= 0 and \ session.get_command_status(tty1_stat) != 0: time.sleep(1) timeout = timeout - 1 if timeout < 0: raise error.TestFail("Can not wait for tty1 started in 60s") # send user and passwd to guest to login send_line(username) time.sleep(2) send_line(password) time.sleep(2) output = virsh.sendkey(vm_name, options, readonly=readonly) time.sleep(2) if output.exit_status != 0: if status_error: logging.info("Failed to sendkey to guest as expected, Error:" "%s.", output.stderr) return else: raise error.TestFail("Failed to send key to guest, Error:%s." % output.stderr) elif status_error: raise error.TestFail("Expect fail, but succeed indeed.") if params_test: # check if created file exist cmd_ls = "ls %s" % create_file sec_status, sec_output = session.get_command_status_output(cmd_ls) if sec_status == 0: logging.info("Succeed to create file with send key") else: raise error.TestFail("Fail to create file with send key, " "Error:%s" % sec_output) elif sysrq_test: # check /var/log/message info according to different key if "KEY_H" in options: get_status = session.cmd_status("cat /var/log/messages|" "grep SysRq.*HELP") elif "KEY_M" in options: get_status = session.cmd_status("cat /var/log/messages|" "grep 'SysRq.*Show Memory'") elif "KEY_T" in options: get_status = session.cmd_status("cat /var/log/messages|" "grep 'SysRq.*Show State'") if get_status != 0: raise error.TestFail("SysRq does not take effect in guest, " "options is %s" % options) else: logging.info("Succeed to send SysRq command") finally: session.cmd("rm -rf %s" % create_file) session.close()