def create_iSCSI(params, root_dir=data_dir.get_tmp_dir()): iscsi_instance = None ubuntu = distro.detect().name == 'Ubuntu' # check and install iscsi initiator packages if ubuntu: iscsi_package = ["open-iscsi"] else: iscsi_package = ["iscsi-initiator-utils"] if not utils_package.package_install(iscsi_package): raise exceptions.TestError("Failed to install iscsi initiator" " packages") # Install linux iscsi target software targetcli iscsi_package = ["targetcli"] if not utils_package.package_install(iscsi_package): logging.error("Failed to install targetcli trying with scsi-" "target-utils or tgt package") # try with scsi target utils if targetcli is not available if ubuntu: iscsi_package = ["tgt"] else: iscsi_package = ["scsi-target-utils"] if not utils_package.package_install(iscsi_package): raise exceptions.TestError("Failed to install iscsi target and" " initiator packages") iscsi_instance = IscsiTGT(params, root_dir) else: iscsi_instance = IscsiLIO(params, root_dir) return iscsi_instance
def check_mcast_network(session): """ Check multicast ip address on guests """ username = params.get("username") password = params.get("password") src_addr = ast.literal_eval(iface_source)['address'] add_session = additional_vm.wait_for_serial_login(username=username, password=password) vms_sess_dict = {vm_name: session, additional_vm.name: add_session} # Check mcast address on host cmd = "netstat -g | grep %s" % src_addr if process.run(cmd, ignore_status=True, shell=True).exit_status: test.fail("Can't find multicast ip address" " on host") vms_ip_dict = {} # Get ip address on each guest for vms in list(vms_sess_dict.keys()): vm_mac = vm_xml.VMXML.get_first_mac_by_name(vms) vm_ip = get_guest_ip(vms_sess_dict[vms], vm_mac) if not vm_ip: test.fail("Can't get multicast ip" " address on guest") vms_ip_dict.update({vms: vm_ip}) if len(set(vms_ip_dict.values())) != len(vms_sess_dict): test.fail("Got duplicated multicast ip address") logging.debug("Found ips on guest: %s", vms_ip_dict) # Run omping server on host if not utils_package.package_install(["omping"]): test.error("Failed to install omping" " on host") cmd = ("iptables -F;omping -m %s %s" % (src_addr, "192.168.122.1 %s" % ' '.join(list(vms_ip_dict.values())))) # Run a backgroup job waiting for connection of client bgjob = utils_misc.AsyncJob(cmd) # Run omping client on guests for vms in list(vms_sess_dict.keys()): # omping should be installed first if not utils_package.package_install(["omping"], vms_sess_dict[vms]): test.error("Failed to install omping" " on guest") cmd = ("iptables -F; omping -c 5 -T 5 -m %s %s" % (src_addr, "192.168.122.1 %s" % vms_ip_dict[vms])) ret, output = vms_sess_dict[vms].cmd_status_output(cmd) logging.debug("omping ret: %s, output: %s", ret, output) if (not output.count('multicast, xmt/rcv/%loss = 5/5/0%') or not output.count('unicast, xmt/rcv/%loss = 5/5/0%')): test.fail("omping failed on guest") # Kill the backgroup job bgjob.kill_func()
def run(test, params, env): """ vpum cpu cycles checking between host and guest: 1) boot guest 2) check cpu cycles for host 3) check cpu cycles for guest and compare with host :param test: QEMU test object. :param params: Dictionary with test parameters. :param env: Dictionary with the test environment. """ tmp_dir = params.get("tmp_dir") timeout = params.get_numeric("login_timeout", 360) test_cmd = params.get("test_cmd") build_cmd = params.get("build_cmd") vm_arch = params["vm_arch_name"] host_arch = arch.ARCH src_dir = os.path.join(data_dir.get_deps_dir(), 'million') src_file = os.path.join(src_dir, "million-%s.s" % host_arch) dst_file = os.path.join(tmp_dir, "million-%s.s" % host_arch) if not utils_package.package_install("perf"): test.error("Install dependency packages failed") vm = env.get_vm(params["main_vm"]) vm.verify_alive() session = vm.wait_for_login(timeout=timeout) error_context.context("build binary file 'million' in host", test.log.info) copyfile(src_file, dst_file) s, o = process.getstatusoutput(build_cmd % host_arch) if s: test.fail("Failed to build test command") error_context.context("running test command in host", test.log.info) s, o = process.getstatusoutput(test_cmd) if s: test.fail("Failed to run test command") host_cpu_cycles = re.findall(r"(\d+) *instructions:u", o, re.M) if not utils_package.package_install("perf", session): test.error("Install dependency packages failed") src_file = os.path.join(src_dir, "million-%s.s" % vm_arch) error_context.context("transfer '%s' to guest('%s')" % (src_file, dst_file), test.log.info) vm.copy_files_to(src_file, tmp_dir, timeout=timeout) error_context.context("build binary file 'million' in guest", test.log.info) session.cmd(build_cmd % vm_arch) error_context.context("running test command in guest", test.log.info) output = session.cmd_output(test_cmd, timeout=timeout) guest_cpu_cycles = re.findall(r"(\d+) *instructions:u", output, re.M) if host_cpu_cycles != guest_cpu_cycles: test.fail("cpu cycles is different between host and guest ")
def test_guest_tpm(expect_version, session, expect_fail): """ Test tpm function in guest :param expect_version: guest tpm version, as host version, or emulator specified :param session: Guest session to be tested :param expect_fail: guest tpm is expectedly fail to work """ logging.info("------Checking guest tpm device work------") if expect_version == "1.2": # Install tpm-tools and test by tcsd method if not utils_package.package_install(["tpm-tools"], session, 360): test.error("Failed to install tpm-tools package in guest") else: status, output = session.cmd_status_output( "systemctl start tcsd") logging.debug("Command output: %s", output) if status: if expect_fail: test.cancel( "tpm-crb passthrough only works with host tpm2.0, " "but your host tpm version is 1.2") else: test.fail("Failed to start tcsd.service in guest") else: dev_output = session.cmd_output("ls /dev/|grep tpm") logging.debug("Command output: %s", dev_output) status, output = session.cmd_status_output("tpm_version") logging.debug("Command output: %s", output) if status: test.fail("Guest tpm can not work") else: # If expect_version is tpm2.0, install and test by tpm2-tools if not utils_package.package_install(["tpm2-tools"], session, 360): test.error("Failed to install tpm2-tools package in guest") else: tpm2_getrandom_cmd = get_tpm2_tools_cmd(session) status1, output1 = session.cmd_status_output( "ls /dev/|grep tpm") logging.debug("Command output: %s", output1) status2, output2 = session.cmd_status_output( tpm2_getrandom_cmd) logging.debug("Command output: %s", output2) if status1 or status2: if not expect_fail: test.fail("Guest tpm can not work") else: d_status, d_output = session.cmd_status_output("date") if d_status: test.fail("Guest OS doesn't work well") logging.debug("Command output: %s", d_output) elif expect_fail: test.fail("Expect fail but guest tpm still works") logging.info("------PASS on guest tpm device work check------")
def check_mcast_network(session): """ Check multicast ip address on guests """ username = params.get("username") password = params.get("password") src_addr = ast.literal_eval(iface_source)['address'] add_session = additional_vm.wait_for_serial_login(username=username, password=password) vms_sess_dict = {vm_name: session, additional_vm.name: add_session} # Check mcast address on host cmd = "netstat -g | grep %s" % src_addr if utils.run(cmd, ignore_status=True).exit_status: raise error.TestFail("Can't find multicast ip address" " on host") vms_ip_dict = {} # Get ip address on each guest for vms in vms_sess_dict.keys(): vm_mac = vm_xml.VMXML.get_first_mac_by_name(vms) vm_ip = get_guest_ip(vms_sess_dict[vms], vm_mac) if not vm_ip: raise error.TestFail("Can't get multicast ip" " address on guest") vms_ip_dict.update({vms: vm_ip}) if len(set(vms_ip_dict.values())) != len(vms_sess_dict): raise error.TestFail("Got duplicated multicast ip address") logging.debug("Found ips on guest: %s", vms_ip_dict) # Run omping server on host if not utils_package.package_install(["omping"]): raise error.TestError("Failed to install omping" " on host") cmd = ("iptables -F;omping -m %s %s" % (src_addr, "192.168.122.1 %s" % ' '.join(vms_ip_dict.values()))) # Run a backgroup job waiting for connection of client bgjob = utils.AsyncJob(cmd) # Run omping client on guests for vms in vms_sess_dict.keys(): # omping should be installed first if not utils_package.package_install(["omping"], vms_sess_dict[vms]): raise error.TestError("Failed to install omping" " on guest") cmd = ("iptables -F; omping -c 5 -T 5 -m %s %s" % (src_addr, "192.168.122.1 %s" % vms_ip_dict[vms])) ret, output = vms_sess_dict[vms].cmd_status_output(cmd) logging.debug("omping ret: %s, output: %s", ret, output) if (not output.count('multicast, xmt/rcv/%loss = 5/5/0%') or not output.count('unicast, xmt/rcv/%loss = 5/5/0%')): raise error.TestFail("omping failed on guest") # Kill the backgroup job bgjob.kill_func()
def create_bridge(br_name, iface_name): """ Create bridge attached to physical interface """ # Make sure the bridge not exist if libvirt.check_iface(br_name, "exists", "--all"): test.cancel("The bridge %s already exist" % br_name) # Create bridge utils_package.package_install('tmux') cmd = 'tmux -c "ip link add name {0} type bridge; ip link set {1} up;' \ ' ip link set {1} master {0}; ip link set {0} up; pkill dhclient; ' \ 'sleep 6; dhclient {0}; ifconfig {1} 0"'.format(br_name, iface_name) process.run(cmd, shell=True, verbose=True)
def run_guest_libvirt(session): """ Check guest libvirt network """ # Try to install required packages if not utils_package.package_install(['libvirt'], session): raise error.TestError("Failed ot install libvirt" " package on guest") result = True # Try to load tun module first session.cmd("lsmod | grep tun || modprobe tun") # Check network state on guest cmd = ("service libvirtd restart; virsh net-info default" " | grep 'Active:.*no'") if session.cmd_status(cmd): result = False logging.error("Default network isn't in inactive state") # Try to start default network on guest, check error messages if result: cmd = "virsh net-start default" status, output = session.cmd_status_output(cmd) logging.debug("Run command on guest exit %s, output %s" % (status, output)) if not status or not output.count("already in use"): result = False logging.error("Failed to see network messges on guest") if not utils_package.package_remove("libvirt*", session): logging.error("Failed to remove libvirt packages on guest") if not result: raise error.TestFail("Check libvirt network on guest failed")
def get_ping_dest(vm_session, mac_addr="", restart_network=False): """ Get an ip address to ping :param vm_session: The session object to the guest :param mac_addr: mac address of given interface :param restart_network: Whether to restart guest's network :return: ip address """ if restart_network: if not utils_package.package_install('dhcp-client', session=vm_session): raise exceptions.TestFail( "Failed to install dhcp-client on guest.") utils_net.restart_guest_network(vm_session) vm_iface = utils_net.get_linux_ifname(vm_session, mac_addr) if isinstance(vm_iface, list): iface_name = vm_iface[0] else: iface_name = vm_iface utils_misc.wait_for( lambda: utils_net.get_net_if_addrs(iface_name, vm_session.cmd_output). get('ipv4'), 20) cmd = ("ip route |awk -F '/' '/^[0-9]/, /dev %s/ {print $1}'" % iface_name) status, output = utils_misc.cmd_status_output(cmd, shell=True, session=vm_session) if status or not output: raise exceptions.TestError("Failed to run cmd - {}, status - {}, " "output - {}.".format(cmd, status, output)) return re.sub('\d+$', '1', output.strip().splitlines()[-1])
def add_or_del_connection(params, session=None, is_del=False): """ Add/Delete connections :param params: the parameters dict :param session: The session object to the host :param is_del: Whether the connection should be deleted """ bridge_name = params.get("bridge_name") pf_name = params.get("pf_name") if not all([bridge_name, pf_name]): return if not utils_package.package_install(["tmux", "dhcp-client"], session): LOG.error("Failed to install the required package") recover_cmd = 'tmux -c "ip link set {0} nomaster; ip link delete {1}; ' \ 'pkill dhclient; sleep 5; dhclient"'.format( pf_name, bridge_name) if not is_del: utils_misc.cmd_status_output(recover_cmd, shell=True, session=session) cmd = 'tmux -c "ip link add name {1} type bridge; ip link set {0} up; ' \ 'ip link set {0} master {1}; ip link set {1} up; dhclient -r;' \ 'sleep 5; dhclient"'.format(pf_name, bridge_name) else: cmd = recover_cmd utils_misc.cmd_status_output(cmd, shell=True, verbose=True, ignore_status=False, session=session)
def prepare_guest_for_test(vm_name, session, test, oversize, nodeset_string): """ Setup guest :param vm_name: name of the VM to be executed on :param session: current session to execute commands on :param test: test object :param oversize: memory to be taken :param nodeset_string: nodeset string with nodes to be spread on """ result = virsh.numatune(vm_name, debug=True) if result.exit_status: test.fail("Something went wrong during the virsh numatune command.") result = virsh.numatune(vm_name, mode='0', nodeset=nodeset_string, debug=True) if result.exit_status: test.fail("Something went wrong during the 'virsh numatune 0 {}' " "command.".format(nodeset_string)) result = virsh.setmem(vm_name, oversize, debug=True) if result.exit_status: test.fail("Something went wrong during the 'virsh setmem {}' " "command.".format(oversize)) # Turn off a swap on guest session.cmd_status('swapoff -a', timeout=10) # Install the numactl package on the guest for a memhog program if not utils_package.package_install('numactl', session): test.fail("Failed to install package numactl on guest.")
def install_stressapptest(vm): """ Install stressapptest cmd :param vm: the vm to be installed with stressapptest """ session = vm.wait_for_login(timeout=360) name = ["git", "gcc", "gcc-c++", "make"] if not utils_package.package_install(name, session, timeout=300): raise exceptions.TestError("Installation of packages %s in guest " "failed" % name) app_repo = "git clone https://github.com/stressapptest/" \ "stressapptest.git" stressapptest_install_cmd = "rm -rf stressapptest " \ "&& %s" \ " && cd stressapptest " \ "&& ./configure " \ "&& make " \ "&& make install" % app_repo s, o = session.cmd_status_output(stressapptest_install_cmd) if s: raise exceptions.TestError("Failed to install stressapptest " "in guest: '%s'" % o) session.close()
def replace_os_disk(vm_xml, vm_name, nvram): """ Replace os(nvram) and disk(uefi) for x86 vtpm test :param vm_xml: current vm's xml :param vm_name: current vm name :param nvram: nvram file path of vm """ # Add loader, nvram in <os> nvram = nvram.replace("<VM_NAME>", vm_name) dict_os_attrs = {"loader_readonly": "yes", "secure": "yes", "loader_type": "pflash", "loader": loader, "nvram": nvram} vm_xml.set_os_attrs(**dict_os_attrs) logging.debug("Set smm=on in VMFeaturesXML") # Add smm in <features> features_xml = vm_xml.features features_xml.smm = "on" vm_xml.features = features_xml vm_xml.sync() # Replace disk with an uefi image if not utils_package.package_install("wget"): test.error("Failed to install wget on host") if uefi_disk_url.count("EXAMPLE"): test.error("Please provide the URL %s" % uefi_disk_url) else: download_cmd = ("wget %s -O %s" % (uefi_disk_url, download_file_path)) process.system(download_cmd, verbose=False, shell=True) vm = env.get_vm(vm_name) uefi_disk = {'disk_source_name': download_file_path} libvirt.set_vm_disk(vm, uefi_disk)
def prepare_pxe_boot(): """ Prepare tftp server and pxe boot files """ pkg_list = ["syslinux", "tftp-server", "tftp", "ipxe-roms-qemu", "wget"] # Try to install required packages if not utils_package.package_install(pkg_list): test.error("Failed ot install required packages") boot_initrd = params.get("boot_initrd", "EXAMPLE_INITRD") boot_vmlinuz = params.get("boot_vmlinuz", "EXAMPLE_VMLINUZ") if boot_initrd.count("EXAMPLE") or boot_vmlinuz.count("EXAMPLE"): test.cancel("Please provide initrd/vmlinuz URL") # Download pxe boot images process.system("wget %s -O %s/initrd.img" % (boot_initrd, tftp_root)) process.system("wget %s -O %s/vmlinuz" % (boot_vmlinuz, tftp_root)) process.system("cp -f /usr/share/syslinux/pxelinux.0 {0};" " mkdir -m 777 -p {0}/pxelinux.cfg".format(tftp_root), shell=True) pxe_file = "%s/pxelinux.cfg/default" % tftp_root boot_txt = """ DISPLAY boot.txt DEFAULT rhel LABEL rhel kernel vmlinuz append initrd=initrd.img PROMPT 1 TIMEOUT 3""" with open(pxe_file, 'w') as p_file: p_file.write(boot_txt)
def install_kernel(session, url=None, kernel_debug=False): """ Install kernel to vm """ if kernel_debug: if not utils_package.package_install(['kernel-debug'], session=session): test.error('Fail on installing debug kernel') else: logging.info('Install kernel-debug success') else: if not (url and url.endswith('.rpm')): test.error('kernel url not contain ".rpm"') # rhel6 need to install kernel-firmware first if '.el6' in session.cmd('uname -r'): kernel_fm_url = params.get('kernel_fm_url') cmd_install_firmware = 'rpm -Uv %s --force' % kernel_fm_url try: session.cmd(cmd_install_firmware, timeout=v2v_timeout) except Exception, e: test.error(str(e)) cmd_install_kernel = 'rpm -iv %s --force' % url try: session.cmd(cmd_install_kernel, timeout=v2v_timeout) except Exception, e: test.error(str(e))
def fill_null_in_vm(vm, target, size_value=500): """ File something in the disk of VM :param vm: VM guest :param target: disk dev in VM :param size_value: number in MiB """ try: session = vm.wait_for_login() if not utils_package.package_install(["parted"], session, timeout=300): logging.error("Failed to install the required 'parted' package") device_source = os.path.join(os.sep, 'dev', target) libvirt.mk_label(device_source, session=session) libvirt.mk_part(device_source, size="%sM" % size_value, session=session) # Run partprobe to make the change take effect. process.run("partprobe", ignore_status=True, shell=True) libvirt.mkfs("/dev/%s1" % target, "ext3", session=session) count_number = size_value - 100 cmd = ( "mount /dev/%s1 /mnt && dd if=/dev/zero of=/mnt/testfile bs=1024 count=1024x%s " " && umount /mnt" % (target, count_number)) s, o = session.cmd_status_output(cmd) logging.info("Check disk operation in VM:\n%s", o) session.close() if s != 0: raise exceptions.TestError( "Error happened when executing command:\n%s" % cmd) except Exception as e: raise exceptions.TestError(str(e))
def prepare_guest_for_test(vm_name, session, test, oversize, memory_to_eat): """ Setup guest :param vm_name: name of the VM to be executed on :param session: current session to execute commands on :param test: test object :param oversize: memory to be taken :param memory_to_eat: The memory guest will use """ result = virsh.setmem(vm_name, oversize, debug=True) if result.exit_status: test.fail("Something went wrong during the 'virsh setmem {}' " "command.".format(oversize)) def _check_mem(memory_to_eat): dommemstat_output = virsh.dommemstat(vm_name).stdout_text.strip() actual_mem = re.search("actual (\d*)", dommemstat_output).groups()[0] logging.debug("actual_mem is {}".format(actual_mem)) return int(actual_mem) > int(memory_to_eat) if not utils_misc.wait_for(lambda: _check_mem(memory_to_eat), 300, first=5): test.error("Failed to increase specific guest memory in time") # Turn off a swap on guest session.cmd_status('swapoff -a', timeout=10) # Install the numactl package on the guest for a memhog program if not utils_package.package_install('numactl', session): test.fail("Failed to install package numactl on guest.")
def setup_ceph_auth(): disk_path = ("rbd:%s:mon_host=%s" % (ceph_disk_name, ceph_mon_ip)) disk_path += (":id=%s:key=%s" % (ceph_auth_user, ceph_auth_key)) if not utils_package.package_install(["ceph-common"]): test.error("Failed to install ceph-common") with open(key_file, 'w') as f: f.write("[%s]\n\tkey = %s\n" % (ceph_client_name, ceph_client_key)) # Delete the disk if it exists cmd = ("rbd -m {0} {1} info {2} && rbd -m {0} {1} rm " "{2}".format(ceph_mon_ip, key_opt, ceph_disk_name)) process.run(cmd, ignore_status=True, shell=True) # Create an local image and make FS on it. img_file = os.path.join(data_dir.get_tmp_dir(), "test.img") disk_cmd = ("qemu-img create -f raw {0} 10M && mkfs.ext4 -F {0}". format(img_file)) process.run(disk_cmd, ignore_status=False, shell=True) # Convert the image to remote storage # Ceph can only support raw format disk_cmd = ("qemu-img convert -O %s %s %s" % ("raw", img_file, disk_path)) process.run(disk_cmd, ignore_status=False, shell=True)
def run(test, params, env): """ Run nvdimm cases: 1) Boot guest with two nvdimm devices 2) Change the two nvdimm devices to dax mode inside guest 3) Check if both devices are dax mode :param test: QEMU test object :param params: Dictionary with the test parameters :param env: Dictionary with test environment. """ vm = env.get_vm(params["main_vm"]) vm.verify_alive() session = vm.wait_for_login() if not utils_package.package_install("ndctl", session): test.cancel("Please install ndctl inside guest to proceed") create_dax_cmd = params["create_dax_cmd"] nvdimm_number = len(params["mem_devs"].split()) try: for i in range(nvdimm_number): session.cmd(create_dax_cmd % i) output = session.cmd_output(params["ndctl_check_cmd"]) output = eval(output) for item in output: if item['mode'] != 'devdax': test.fail("Change both nvdimm to dax mode failed") finally: utils_package.package_remove("ndctl", session) session.close() vm.destroy()
def run_guest_libvirt(session): """ Check guest libvirt network """ # Try to install required packages if "ubuntu" in vm.get_distro().lower(): pkg = "libvirt-bin" else: pkg = "libvirt" if not utils_package.package_install(pkg, session): test.error("Failed to install libvirt package on guest") # Try to load tun module first session.cmd("lsmod | grep tun || modprobe tun") # Check network state on guest cmd = ("service libvirtd restart; virsh net-info default" " | grep 'Active:.*yes'") if session.cmd_status(cmd): test.fail("'default' network isn't in active state") # Try to destroy&start default network on guest for opt in ['net-destroy', 'net-start']: cmd = "virsh %s default" % opt status, output = session.cmd_status_output(cmd) logging.debug("Run %s on guest exit %s, output %s" % (cmd, status, output)) if status: test.fail(output) if not utils_package.package_remove("libvirt*", session): test.error("Failed to remove libvirt packages on guest")
def run_guest_libvirt(session): """ Check guest libvirt network """ # Try to install required packages if "ubuntu" in vm.get_distro().lower(): pkg = "libvirt-bin" else: pkg = "libvirt" if not utils_package.package_install(pkg, session): test.error("Failed to install libvirt package on guest") # Try to load tun module first session.cmd("lsmod | grep tun || modprobe tun") # Check network state on guest cmd = ("service libvirtd restart; virsh net-info default" " | grep 'Active:.*yes'") if session.cmd_status(cmd): test.fail("'default' network isn't in active state") # Try to destroy&start default network on guest for opt in ['net-destroy', 'net-start']: cmd = "virsh %s default" % opt status, output = session.cmd_status_output(cmd) logging.debug("Run %s on guest exit %s, output %s" % (cmd, status, output)) if status: test.fail(output) if not utils_package.package_remove("libvirt*", session): test.error("Failed to remove libvirt packages on guest")
def prepare_pxe_boot(): """ Prepare tftp server and pxe boot files """ pkg_list = ["syslinux", "tftp-server", "tftp", "ipxe-roms-qemu", "wget"] # Try to install required packages if not utils_package.package_install(pkg_list): test.error("Failed ot install required packages") boot_initrd = params.get("boot_initrd", "EXAMPLE_INITRD") boot_vmlinuz = params.get("boot_vmlinuz", "EXAMPLE_VMLINUZ") if boot_initrd.count("EXAMPLE") or boot_vmlinuz.count("EXAMPLE"): test.cancel("Please provide initrd/vmlinuz URL") # Download pxe boot images process.system("wget %s -O %s/initrd.img" % (boot_initrd, tftp_root)) process.system("wget %s -O %s/vmlinuz" % (boot_vmlinuz, tftp_root)) process.system("cp -f /usr/share/syslinux/pxelinux.0 {0};" " mkdir -m 777 -p {0}/pxelinux.cfg".format(tftp_root), shell=True) pxe_file = "%s/pxelinux.cfg/default" % tftp_root boot_txt = """ DISPLAY boot.txt DEFAULT rhel LABEL rhel kernel vmlinuz append initrd=initrd.img PROMPT 1 TIMEOUT 3""" with open(pxe_file, 'w') as p_file: p_file.write(boot_txt)
def numa_cpu_guest(): """ Get the cpu id list for each node in guest os, sort with node id. """ error_context.context("Get cpus in guest os", logging.info) numa_cpu_guest = [] if vm_arch in ('ppc64', 'ppc64le'): numa_info_guest = NumaInfo(session=session) nodes_guest = numa_info_guest.online_nodes for node in nodes_guest: numa_cpus = numa_info_guest.online_nodes_cpus[node] numa_cpus = sorted([int(v) for v in numa_cpus.split()]) numa_cpu_guest.append(numa_cpus) else: error_context.context("Get SRAT ACPI table", logging.info) if not utils_package.package_install("acpidump", session): test.cancel("Please install acpidump in guest to proceed") content = session.cmd_output('cd /tmp && acpidump -n SRAT -b && ' 'iasl -d srat.dat && cat srat.dsl') pattern = re.compile( r'Proximity Domain Low\(8\)\s+:\s+([0-9A-Fa-f]+)' r'\n.*Apic ID\s+:\s+([0-9A-Fa-f]+)') node_cpus = pattern.findall(content) tmp = {} for item in node_cpus: nodeid = int(item[0], 16) cpuid = int(item[1], 16) if nodeid in tmp.keys(): tmp[nodeid] += [cpuid] else: tmp[nodeid] = [cpuid] for item in sorted(tmp.items(), key=lambda item: item[0]): numa_cpu_guest.append(sorted(item[1])) return numa_cpu_guest
def create_large_image(): """ Create large image in guest """ # install dependent packages pkg_list = ["parted", "e2fsprogs"] for pkg in pkg_list: if not utils_package.package_install(pkg, session): test.error("Failed to install dependent package %s" % pkg) # create partition and file system session.cmd("parted -s %s mklabel msdos" % new_disk) session.cmd("parted -s %s mkpart primary ext3 '0%%' '100%%'" % new_disk) # mount disk and write file in it session.cmd("mkfs.ext3 %s1" % new_disk) session.cmd("mkdir -p %s && mount %s1 %s" % (mnt_dir, new_disk, mnt_dir)) # The following step may cause guest paused before it return try: session.cmd("dd if=/dev/zero of=%s/big_file bs=1024 " "count=51200 && sync" % mnt_dir) except Exception as err: logging.debug("Expected Fail %s", err) session.close()
def install_kernel(session, url=None, kernel_debug=False): """ Install kernel to vm """ if kernel_debug: if not utils_package.package_install(['kernel-debug'], session=session): test.error('Fail on installing debug kernel') else: logging.info('Install kernel-debug success') else: if not (url and url.endswith('.rpm')): test.error('kernel url not contain ".rpm"') # rhel6 need to install kernel-firmware first if '.el6' in session.cmd('uname -r'): kernel_fm_url = params.get('kernel_fm_url') cmd_install_firmware = 'rpm -Uv %s --force' % kernel_fm_url try: session.cmd(cmd_install_firmware, timeout=v2v_timeout) except Exception, e: test.error(str(e)) cmd_install_kernel = 'rpm -iv %s --force' % url try: session.cmd(cmd_install_kernel, timeout=v2v_timeout) except Exception, e: test.error(str(e))
def setup_ceph_auth(): disk_path = ("rbd:%s:mon_host=%s" % (ceph_disk_name, ceph_mon_ip)) disk_path += (":id=%s:key=%s" % (ceph_auth_user, ceph_auth_key)) if not utils_package.package_install(["ceph-common"]): test.error("Failed to install ceph-common") with open(key_file, 'w') as f: f.write("[%s]\n\tkey = %s\n" % (ceph_client_name, ceph_client_key)) # Delete the disk if it exists cmd = ("rbd -m {0} {1} info {2} && rbd -m {0} {1} rm " "{2}".format(ceph_mon_ip, key_opt, ceph_disk_name)) process.run(cmd, ignore_status=True, shell=True) # Create an local image and make FS on it. img_file = os.path.join(data_dir.get_tmp_dir(), "test.img") disk_cmd = ("qemu-img create -f raw {0} 10M && mkfs.ext4 -F {0}" .format(img_file)) process.run(disk_cmd, ignore_status=False, shell=True) # Convert the image to remote storage # Ceph can only support raw format disk_cmd = ("qemu-img convert -O %s %s %s" % ("raw", img_file, disk_path)) process.run(disk_cmd, ignore_status=False, shell=True)
def setUp(self): """ Avocado-vt uses custom setUp/test/tearDown handling and unlike Avocado it allows skipping tests from any phase. To convince Avocado to allow skips let's say our tests run during setUp phase and report the status in test. """ env_lang = os.environ.get('LANG') os.environ['LANG'] = 'C' try: self._runTest() self.__status = "PASS" # This trick will give better reporting of virt tests being executed # into avocado (skips, warns and errors will display correctly) except exceptions.TestSkipError: self.__exc_info = sys.exc_info() raise # This one has to be raised in setUp except: # nopep8 Old-style exceptions are not inherited from Exception() self.__exc_info = sys.exc_info() self.__status = self.__exc_info[1] finally: # Clean libvirtd debug logs if the test is not fail or error if self.params.get("libvirtd_log_cleanup", "no") == "yes": if (self.params.get("vm_type") == 'libvirt' and self.params.get("enable_libvirtd_debug_log", "yes") == "yes"): libvirtd_log = self.params["libvirtd_debug_file"] if ("TestFail" not in str(self.__exc_info) and "TestError" not in str(self.__exc_info)): if libvirtd_log and os.path.isfile(libvirtd_log): logging.info("cleaning libvirtd logs...") os.remove(libvirtd_log) else: # tar the libvirtd log and archive logging.info("archiving libvirtd debug logs") from virttest import utils_package if utils_package.package_install("tar"): if os.path.isfile(libvirtd_log): archive = os.path.join( os.path.dirname(libvirtd_log), "libvirtd.tar.gz") cmd = ("tar -zcf %s -P %s" % (pipes.quote(archive), pipes.quote(libvirtd_log))) if process.system(cmd) == 0: os.remove(libvirtd_log) else: logging.error("Unable to find log file: %s", libvirtd_log) else: logging.error( "Unable to find tar to compress libvirtd " "logs") if env_lang: os.environ['LANG'] = env_lang else: del os.environ['LANG']
def run(test, params, env): """ Run htm cases: Case one 1) Download unit test suite and configure it 2) Run kvm test on host 3) Check host is still available Case two 1) Download test application in the guest 2) Run it in the guest :param test: QEMU test object :param params: Dictionary with the test parameters :param env: Dictionary with test environment. """ if params["unit_test"] == "yes": error_context.context("Prepare unit test on host", logging.info) cmds = [params["get_htm_dir"], params["compile_htm"]] for cmd in cmds: s, o = process.getstatusoutput(cmd, timeout=3600) if s: test.error("Failed to run cmd '%s', output: %s" % (cmd, o)) error_context.context("Run htm unit test on host", logging.info) s, o = process.getstatusoutput(params["run_htm_test"], timeout=3600) if s: test.fail("Run htm unit test failed, output: %s" % o) # Make sure if host is available by do commands on host status, output = process.getstatusoutput("rm -rf %s" % params["htm_dir"]) if status: test.fail("Please check host's status: %s" % output) utils_misc.verify_dmesg() else: check_exist_cmd = params["check_htm_env"] s, o = process.getstatusoutput(check_exist_cmd) if s: test.error( "Please check htm is supported or not by '%s', output: %s" % (check_exist_cmd, o)) vm = env.get_vm(params["main_vm"]) session = vm.wait_for_login() pkgs = params["depends_pkgs"].split() if not utils_package.package_install(pkgs, session): test.error("Install dependency packages failed") session.cmd(params["get_htm_dir"]) download_htm_demo = params["download_htm_demo"] status = session.cmd_status("wget %s" % download_htm_demo) if status: test.error( "Failed to download test file, please configure it in cfg : %s" % download_htm_demo) else: status, output = session.cmd_status_output( params["test_htm_command"]) if not re.search(params["expected_htm_test_result"], output): test.fail("Test failed and please check : %s" % output) vm.verify_kernel_crash()
def create_bridge(br_name, iface_name): """ Create bridge attached to physical interface :param br_name: bridge to be created :param iface_name: physical interface name :return: """ # Make sure the bridge not exist if libvirt.check_iface(br_name, "exists", "--all"): return # Create bridge using commands utils_package.package_install('tmux') cmd = 'tmux -c "ip link add name {0} type bridge; ip link set {1} up;' \ ' ip link set {1} master {0}; ip link set {0} up; pkill dhclient; ' \ 'sleep 6; dhclient {0}; ifconfig {1} 0"'.format(br_name, iface_name) process.run(cmd, shell=True, verbose=True)
def install_pkg(test, pkg_list, vm_session): """ Install required packages in the vm :param test: test object :param pkg_list: list, package names :param vm_session: vm session """ if not utils_package.package_install(pkg_list, vm_session): test.error("Failed to install package '{}' in the vm".format(pkg_list))
def check_feature(vm, feature="", vcpu=0): """ Checks the given feature is present :param vm: VM Name :param feature: feature to be verified :param vcpu: vcpu number to pin guest test :return: true on success, test fail on failure """ session = vm.wait_for_login() if 'power8' in feature: cmd = 'lscpu|grep -i "Model name:.*power8"' elif 'xive' in feature: # remove -v once guest xive support is available # right now power9 guest supports only xics cmd = "grep -v xive /sys/firmware/devicetree/base/interrupt-*/compatible" elif 'xics' in feature: cmd = "grep -v xive /sys/firmware/devicetree/base/interrupt-*/compatible" elif 'power9' in feature: cmd = 'lscpu|grep -i "Model name:.*power9"' elif 'hpt' in feature: cmd = 'grep "MMU.*: Hash" /proc/cpuinfo' elif 'rpt' in feature: cmd = 'grep "MMU.*: Radix" /proc/cpuinfo' elif 'isa' in feature: utils_package.package_install('gcc', session) cmd = "echo 'int main(){asm volatile (\".long 0x7c0005e6\");" cmd += "return 0;}' > ~/a.c;cc ~/a.c;taskset -c %s ./a.out" % vcpu status, output = session.cmd_status_output(cmd) logging.debug(output) session.close() if feature != "isa2.7": if status != 0: test.fail("Feature: %s check failed inside " "%s guest on %s host" % (feature, guest_version, host_version)) else: if status == 0: test.fail("isa3.0 instruction succeeds in " "%s guest on %s host" % (guest_version, host_version)) return True
def check_feature(vm, feature="", vcpu=0): """ Checks the given feature is present :param vm: VM Name :param feature: feature to be verified :param vcpu: vcpu number to pin guest test :return: true on success, test fail on failure """ session = vm.wait_for_login() if 'power8' in feature: cmd = 'lscpu|grep -i "Model name:.*power8"' elif 'xive' in feature: # remove -v once guest xive support is available # right now power9 guest supports only xics cmd = "grep -v xive /sys/firmware/devicetree/base/interrupt-*/compatible" elif 'xics' in feature: cmd = "grep -v xive /sys/firmware/devicetree/base/interrupt-*/compatible" elif 'power9' in feature: cmd = 'lscpu|grep -i "Model name:.*power9"' elif 'hpt' in feature: cmd = 'grep "MMU.*: Hash" /proc/cpuinfo' elif 'rpt' in feature: cmd = 'grep "MMU.*: Radix" /proc/cpuinfo' elif 'isa' in feature: utils_package.package_install('gcc', session) cmd = "echo 'int main(){asm volatile (\".long 0x7c0005e6\");" cmd += "return 0;}' > ~/a.c;cc ~/a.c;taskset -c %s ./a.out" % vcpu status, output = session.cmd_status_output(cmd) logging.debug(output) session.close() if feature != "isa2.7": if status != 0: test.fail("Feature: %s check failed inside " "%s guest on %s host" % (feature, guest_version, host_version)) else: if status == 0: test.fail("isa3.0 instruction succeeds in " "%s guest on %s host" % (guest_version, host_version)) return True
def install_pkgs(session, cpuid_uri, test): """ Install packages within the vm :param session: vm session :param params: dict for tests :param test: the test object :raises: test.error if installation fails """ if cpuid_uri: if not utils_package.package_install('wget', session=session): test.error("Fail to install 'wget' tool via repo") utils_misc.cmd_status_output('wget %s' % cpuid_uri, shell=True, ignore_status=False, verbose=True, session=session) if not utils_package.package_install('cpuid*.rpm', session=session): test.error("Fail to install package " "'%s'" % os.path.basename(cpuid_uri))
def create_bridge(br_name, iface_name): """ Create a linux bridge by virsh cmd: 1. Stop NetworkManager and Start network service 2. virsh iface-bridge <iface> <name> [--no-stp] :param br_name: bridge name :param iface_name: physical interface name :return: bridge created or raise exception """ # Make sure the bridge not exist if libvirt.check_iface(br_name, "exists", "--all"): test.cancel("The bridge %s already exist" % br_name) # Create bridge utils_package.package_install('tmux') cmd = 'tmux -c "ip link add name {0} type bridge; ip link set {1} up;' \ ' ip link set {1} master {0}; ip link set {0} up;' \ ' pkill dhclient; sleep 6; dhclient {0}; ifconfig {1} 0"'.format(br_name, iface_name) process.run(cmd, shell=True, verbose=True)
def _host_config_check(): status = True err_msg = '' if option == "with_negative_config": out = process.getoutput("dmesg") pattern = r"usb (\d-\d(?:.\d)?):.*idVendor=%s, idProduct=%s" pattern = pattern % (vendorid, productid) obj = re.search(pattern, out, re.ASCII) if not obj: status = False err_msg = "Fail to get the USB device info in host dmesg" return (status, err_msg) error_context.context("Make USB device unconfigured", test.log.info) unconfig_value = params["usbredir_unconfigured_value"] cmd = "echo %s > /sys/bus/usb/devices/%s/bConfigurationValue" cmd = cmd % (unconfig_value, obj.group(1)) test.log.info(cmd) s, o = process.getstatusoutput(cmd) if s: status = False err_msg = "Fail to unconfig the USB device, output: %s" % o return (status, err_msg) if backend == 'spicevmc': gui_group = "Server with GUI" out = process.getoutput('yum group list --installed', allow_output_check='stdout', shell=True) obj = re.search(r"(Installed Environment Groups:.*?)^\S", out, re.S | re.M) if not obj or gui_group not in obj.group(1): gui_groupinstall_cmd = "yum groupinstall -y '%s'" % gui_group s, o = process.getstatusoutput(gui_groupinstall_cmd, shell=True) if s: status = False err_msg = "Fail to install '%s' on host, " % gui_group err_msg += "output: %s" % o return (status, err_msg) virt_viewer_cmd = "rpm -q virt-viewer || yum install -y virt-viewer" s, o = process.getstatusoutput(virt_viewer_cmd, shell=True) if s: status = False err_msg = "Fail to install 'virt-viewer' on host, " err_msg += "output: %s" % o return (status, err_msg) elif backend == 'tcp_socket': create_repo() if not utils_package.package_install("usbredir-server"): status = False err_msg = "Fail to install 'usbredir-server' on host" return (status, err_msg) return (status, err_msg)
def prepare_host_for_test(params, test): """ Setup host and return constants used by other functions :param params: dictionary of test parameters :param test: test object :return: The following constants are returned by this function: numa_memory - dictionary for numa memory setup oversize - memory taken by the main node + 50% of the first neighbour undersize - memory taken by the main node decreased by 25% memory_to_eat - memory to be used by the process - main node size + 10% of neighbour size neighbour - node number of the neighbour to be used for test nodeset_string - nodeset string to be used for numatune (build from the main node number and the neighbour node number) """ # Create a NumaInfo object to get NUMA related information numa_info = utils_misc.NumaInfo() online_nodes = numa_info.get_online_nodes_withmem() if len(online_nodes) < 2: test.cancel("This test needs at least 2 available numa nodes") numa_memory = { 'mode': params.get('memory_mode', 'strict'), # If nodeset is not defined in config, take a first node with memory. 'nodeset': params.get('memory_nodeset', online_nodes[0]) } # Get the size of a main node nodeset_size = int( numa_info.read_from_node_meminfo(int(numa_memory['nodeset']), 'MemTotal')) # Get the size of a first neighbour with memory for node in online_nodes: if str(node) != numa_memory['nodeset']: neighbour = node break nodeset_nb_size = int( numa_info.read_from_node_meminfo(int(neighbour), 'MemTotal')) logging.debug('Memory available on a main node {} is {}'.format( numa_memory['nodeset'], nodeset_size)) logging.debug('Memory available on a neighbour node {} is {}'.format( neighbour, nodeset_nb_size)) # Increase a size by 50% of neighbour node oversize = int(nodeset_size + 0.5 * nodeset_nb_size) # Decrease nodeset size by 25% undersize = int(nodeset_size * 0.25) # Memory to eat is a whole nodeset + 10% of neighbour size memory_to_eat = int(nodeset_size + 0.1 * nodeset_nb_size) nodeset_string = '{},{}'.format(online_nodes[0], neighbour) process.run("swapoff -a", shell=True) if not utils_package.package_install('libcgroup-tools'): test.fail("Failed to install package libcgroup-tools on host.") return numa_memory, oversize, undersize, memory_to_eat, neighbour, nodeset_string
def install_pkgs(session, pkgs, test): """ Install packages within the vm :param session: vm session :param pkgs: str or list, package names to install :param test: the test object :raises: test.error if installation fails """ if pkgs: if not utils_package.package_install(pkgs, session=session): test.error("Fail to install package '%s'" % pkgs)
def test_guest_tpm(expect_version, session, expect_fail): """ Test tpm function in guest :param expect_version: guest tpm version, as host version, or emulator specified :param session: Guest session to be tested :param expect_fail: guest tpm is expectedly fail to work """ logging.info("------Checking guest tpm device work------") if expect_version == "1.2": # Install tpm-tools and test by tcsd method if not utils_package.package_install(["tpm-tools"], session, 360): test.error("Failed to install tpm-tools package in guest") else: status, output = session.cmd_status_output( "systemctl start tcsd") logging.debug(output) if status: if expect_fail: test.cancel( "tpm-crb passthrough only works with host tpm2.0, " "but your host tpm version is 1.2") else: test.fail("Failed to start tcsd.service in guest") else: status, output = session.cmd_status_output("tpm_version") logging.debug(output) if status: test.fail("Guest tpm can not work") else: # If expect_version is tpm2.0, install and test by tpm2-tools if not utils_package.package_install(["tpm2-tools"], session, 360): test.error("Failed to install tpm2-tools package in guest") else: status, output = session.cmd_status_output("tpm2_getrandom 11") logging.debug(output) if status: test.fail("Guest tpm can not work") logging.info("------PASS on guest tpm device work check------")
def setUp(self): """ Avocado-vt uses custom setUp/test/tearDown handling and unlike Avocado it allows skipping tests from any phase. To convince Avocado to allow skips let's say our tests run during setUp phase and report the status in test. """ env_lang = os.environ.get('LANG') os.environ['LANG'] = 'C' try: self._runTest() self.__status = "PASS" # This trick will give better reporting of virt tests being executed # into avocado (skips, warns and errors will display correctly) except exceptions.TestSkipError: raise # This one has to be raised in setUp except: # nopep8 Old-style exceptions are not inherited from Exception() details = sys.exc_info()[1] self.__status = details finally: # Clean libvirtd debug logs if the test is not fail or error if self.params.get("libvirtd_log_cleanup", "no") == "yes": if(self.params.get("vm_type") == 'libvirt' and self.params.get("enable_libvirtd_debug_log", "yes") == "yes"): libvirtd_log = self.params["libvirtd_debug_file"] if("TestFail" not in str(sys.exc_info()[0]) and "TestError" not in str(sys.exc_info()[0])): if libvirtd_log and os.path.isfile(libvirtd_log): logging.info("cleaning libvirtd logs...") os.remove(libvirtd_log) else: # tar the libvirtd log and archive logging.info("archiving libvirtd debug logs") from virttest import utils_package if utils_package.package_install("tar"): archive = os.path.join(libvirtd_log.strip(os.path.basename(libvirtd_log)), "libvirtd.tar.gz") cmd = ("tar -zcf %s -P %s" % (pipes.quote(archive), pipes.quote(libvirtd_log))) if process.system(cmd) == 0: os.remove(libvirtd_log) else: logging.error("Unable to find tar to compress libvirtd " "logs") if env_lang: os.environ['LANG'] = env_lang else: del os.environ['LANG']
def check_name_ip(session): """ Check dns resolving on guest """ # Check if bind-utils is installed if not utils_package.package_install(['bind-utils'], session): test.error("Failed to install bind-utils on guest") # Run host command to check if hostname can be resolved if not guest_ipv4 and not guest_ipv6: test.fail("No ip address found from parameters") guest_ip = guest_ipv4 if guest_ipv4 else guest_ipv6 cmd = "host %s | grep %s" % (guest_name, guest_ip) if session.cmd_status(cmd): test.fail("Can't resolve name %s on guest" % guest_name)
def __init__(self, session=None): """ Initialize firewalld service by installing and creating service obj :param session: ShellSession Object of guest/remote host """ self.service_name = "firewalld" self.session = session self.option = "" if not utils_package.package_install(self.service_name, session=session): logging.error("Firewalld package is not available") if self.session: self.firewalld = "systemctl %s firewalld" else: self.firewalld = service.Factory.create_service("firewalld") if not self.status(): self.start()
def download_file(url, dest_file, test): """ Perform file download via wget :param url: The source url :param dest_file: The dest file path :param test: Avocado test object :return: True or raise exception """ if utils_package.package_install("wget"): if url.count("EXAMPLE"): test.cancel("Please provide the URL %s" % url) download_cmd = ("wget %s -O %s" % (url, dest_file)) if not os.path.exists(dest_file): if process.system(download_cmd, verbose=False, shell=True): test.error("Failed to download boot iso file") return True else: test.error("wget install failed")
vmxml.set_vm_vcpus(vm_name, max_vcpu, current_vcpu, vm_sockets, vm_cores, vm_threads, add_topology=True) try: vm.start() if status_error: test.fail("VM Started with invalid thread %s" % vm_threads) except virt_vm.VMStartError, detail: if not status_error: test.fail("VM failed to start %s" % detail) if not status_error: # try installing powerpc-utils in guest if not skip try: session = vm.wait_for_login() utils_package.package_install(["powerpc-utils"], session, 360) session.close() except Exception, e: test.cancel("Unable to install powerpc-utils package in guest\n %s" % e) # Changing the smt number if smt_number: smt_chk_cmd_mod = "%s=%s" % (smt_chk_cmd, smt_number) error_count += smt_check(vm, smt_chk_cmd_mod, "") guest_cpu_details = cpus_info(vm) # Step 10: Check for threads, cores, sockets if vm_cores != guest_cpu_details[2]: logging.error("Number of cores mismatch:\nExpected number of " "cores: %s\nActual number of cores: %s", vm_cores, guest_cpu_details[2]) error_count += 1
def run(test, params, env): """ Test the command virsh hostname (1) Call virsh hostname (2) Call virsh hostname with an unexpected option (3) Call virsh hostname with libvirtd service stop """ remote_ip = params.get("remote_ip") remote_pwd = params.get("remote_pwd", None) remote_user = params.get("remote_user", "root") remote_uri = params.get("remote_uri", None) if remote_uri and remote_ip.count("EXAMPLE"): test.cancel("Pls configure rempte_ip first") session = None if remote_uri: session = remote.wait_for_login('ssh', remote_ip, '22', remote_user, remote_pwd, r"[\#\$]\s*$") hostname = session.cmd_output("hostname -f").strip() else: hostname_result = process.run("hostname -f", shell=True, ignore_status=True) hostname = hostname_result.stdout_text.strip() # Prepare libvirtd service on local check_libvirtd = "libvirtd" in params if check_libvirtd: libvirtd = params.get("libvirtd") if libvirtd == "off": utils_libvirtd.libvirtd_stop() # Start libvirtd on remote server if remote_uri: if not utils_package.package_install("libvirt", session): test.cancel("Failed to install libvirt on remote server") libvirtd = utils_libvirtd.Libvirtd(session=session) libvirtd.restart() # Run test case if remote_uri: ssh_key.setup_ssh_key(remote_ip, remote_user, remote_pwd) option = params.get("virsh_hostname_options") hostname_test = virsh.hostname(option, uri=remote_uri, ignore_status=True, debug=True) status = 0 if hostname_test == '': status = 1 hostname_test = None # Recover libvirtd service start if libvirtd == "off": utils_libvirtd.libvirtd_start() # Close session if session: session.close() # Check status_error status_error = params.get("status_error") if status_error == "yes": if status == 0: test.fail("Command 'virsh hostname %s' succeeded " "(incorrect command)" % option) elif status_error == "no": if hostname != hostname_test: test.fail( "Virsh cmd gives hostname %s != %s." % (hostname_test, hostname)) if status != 0: test.fail("Command 'virsh hostname %s' failed " "(correct command)" % option)
def run(test, params, env): """ Convert specific esx guest """ for v in params.itervalues(): if "V2V_EXAMPLE" in v: test.cancel("Please set real value for %s" % v) if utils_v2v.V2V_EXEC is None: raise ValueError('Missing command: virt-v2v') vpx_hostname = params.get('vpx_hostname') esx_ip = params.get('esx_hostname') vpx_dc = params.get('vpx_dc') vm_name = params.get('main_vm') output_mode = params.get('output_mode') pool_name = params.get('pool_name', 'v2v_test') pool_type = params.get('pool_type', 'dir') pool_target = params.get('pool_target_path', 'v2v_pool') pvt = libvirt.PoolVolumeTest(test, params) v2v_timeout = int(params.get('v2v_timeout', 1200)) status_error = 'yes' == params.get('status_error', 'no') address_cache = env.get('address_cache') checkpoint = params.get('checkpoint', '') error_list = [] remote_host = vpx_hostname def log_fail(msg): """ Log error and update error list """ logging.error(msg) error_list.append(msg) def check_device_exist(check, virsh_session_id): """ Check if device exist after convertion """ xml = virsh.dumpxml(vm_name, session_id=virsh_session_id).stdout if check == 'cdrom': if "device='cdrom'" not in xml: log_fail('CDROM no longer exists') def check_vmtools(vmcheck): """ Check whether vmware tools packages have been removed """ pkgs = vmcheck.session.cmd('rpm -qa').strip() removed_pkgs = params.get('removed_pkgs').strip().split(',') if not removed_pkgs: test.error('Missing param "removed_pkgs"') for pkg in removed_pkgs: if pkg in pkgs: log_fail('Package "%s" not removed' % pkg) def check_modprobe(vmcheck): """ Check whether content of /etc/modprobe.conf meets expectation """ content = vmcheck.session.cmd('cat /etc/modprobe.conf').strip() logging.debug(content) cfg_content = params.get('cfg_content') if not cfg_content: test.error('Missing content for search') logging.info('Search "%s" in /etc/modprobe.conf', cfg_content) pattern = '\s+'.join(cfg_content.split()) if not re.search(pattern, content): log_fail('Not found "%s"' % cfg_content) def check_device_map(vmcheck): """ Check if the content of device.map meets expectation. """ logging.info(vmcheck.session.cmd('fdisk -l').strip()) device_map = params.get('device_map_path') content = vmcheck.session.cmd('cat %s' % device_map) logging.debug('Content of device.map:\n%s', content) logging.info('Found device: %d', content.count('/dev/')) logging.info('Found virtio device: %d', content.count('/dev/vd')) if content.count('/dev/') != content.count('/dev/vd'): log_fail('Content of device.map not correct') else: logging.info('device.map has been remaped to "/dev/vd*"') def check_snapshot_file(vmcheck): """ Check if the removed file exists after conversion """ removed_file = params.get('removed_file') logging.debug(vmcheck.session.cmd('test -f %s' % removed_file).stderr) if vmcheck.session.cmd('test -f %s' % removed_file).stderr == 0: log_fail('Removed file "%s" exists after conversion') def check_result(result, status_error): """ Check virt-v2v command result """ libvirt.check_exit_status(result, status_error) output = result.stdout + result.stderr if checkpoint == 'empty_cdrom': if status_error: log_fail('Virsh dumpxml failed for empty cdrom image') elif not status_error: if output_mode == 'rhev': if not utils_v2v.import_vm_to_ovirt(params, address_cache, timeout=v2v_timeout): test.fail('Import VM failed') elif output_mode == 'libvirt': virsh.start(vm_name) # Check guest following the checkpoint document after convertion logging.info('Checking common checkpoints for v2v') vmchecker = VMChecker(test, params, env) params['vmchecker'] = vmchecker if checkpoint not in ['GPO_AV', 'ovmf']: ret = vmchecker.run() if len(ret) == 0: logging.info("All common checkpoints passed") # Check specific checkpoints if checkpoint == 'cdrom': virsh_session = utils_sasl.VirshSessionSASL(params) virsh_session_id = virsh_session.get_id() check_device_exist('cdrom', virsh_session_id) if checkpoint == 'vmtools': check_vmtools(vmchecker.checker) if checkpoint == 'modprobe': check_modprobe(vmchecker.checker) if checkpoint == 'device_map': check_device_map(vmchecker.checker) if checkpoint == 'snapshot': check_snapshot_file(vmchecker.checker) # Merge 2 error lists error_list.extend(vmchecker.errors) log_check = utils_v2v.check_log(params, output) if log_check: log_fail(log_check) if len(error_list): test.fail('%d checkpoints failed: %s' % (len(error_list), error_list)) try: v2v_params = { 'hostname': remote_host, 'hypervisor': 'esx', 'main_vm': vm_name, 'vpx_dc': vpx_dc, 'esx_ip': esx_ip, 'new_name': vm_name + utils_misc.generate_random_string(4), 'v2v_opts': '-v -x', 'input_mode': 'libvirt', 'storage': params.get('output_storage', 'default'), 'network': params.get('network'), 'bridge': params.get('bridge'), 'target': params.get('target') } os.environ['LIBGUESTFS_BACKEND'] = 'direct' v2v_uri = utils_v2v.Uri('esx') remote_uri = v2v_uri.get_uri(remote_host, vpx_dc, esx_ip) # Create password file for access to ESX hypervisor vpx_passwd = params.get("vpx_password") logging.debug(vpx_passwd) vpx_passwd_file = os.path.join(data_dir.get_tmp_dir(), "vpx_passwd") with open(vpx_passwd_file, 'w') as pwd_f: pwd_f.write(vpx_passwd) v2v_params['v2v_opts'] += " --password-file %s" % vpx_passwd_file if params.get('output_format'): v2v_params.update({'output_format': params['output_format']}) # Rename guest with special name while converting to rhev if '#' in vm_name and output_mode == 'rhev': v2v_params['new_name'] = v2v_params['new_name'].replace('#', '_') # Create SASL user on the ovirt host if output_mode == 'rhev': user_pwd = "[['%s', '%s']]" % (params.get("sasl_user"), params.get("sasl_pwd")) v2v_sasl = utils_sasl.SASL(sasl_user_pwd=user_pwd) v2v_sasl.server_ip = params.get("remote_ip") v2v_sasl.server_user = params.get('remote_user') v2v_sasl.server_pwd = params.get('remote_pwd') v2v_sasl.setup(remote=True) # Create libvirt dir pool if output_mode == 'libvirt': pvt.pre_pool(pool_name, pool_type, pool_target, '') if checkpoint == 'ovmf': utils_package.package_install('OVMF') if checkpoint == 'root_ask': v2v_params['v2v_opts'] += ' --root ask' v2v_params['custom_inputs'] = params.get('choice', '1') if checkpoint.startswith('root_') and checkpoint != 'root_ask': root_option = params.get('root_option') v2v_params['v2v_opts'] += ' --root %s' % root_option if checkpoint == 'copy_to_local': esx_password = params.get('esx_password') esx_passwd_file = os.path.join(data_dir.get_tmp_dir(), "esx_passwd") logging.info('Prepare esx password file') with open(esx_passwd_file, 'w') as pwd_f: pwd_f.write(esx_password) esx_uri = 'esx://root@%s/?no_verify=1' % esx_ip copy_cmd = 'virt-v2v-copy-to-local -ic %s %s --password-file %s' %\ (esx_uri, vm_name, esx_passwd_file) process.run(copy_cmd) v2v_params['input_mode'] = 'libvirtxml' v2v_params['input_file'] = '%s.xml' % vm_name if checkpoint == 'with_proxy': http_proxy = params.get('esx_http_proxy') https_proxy = params.get('esx_https_proxy') logging.info('Set http_proxy=%s, https_proxy=%s', http_proxy, https_proxy) os.environ['http_proxy'] = http_proxy os.environ['https_proxy'] = https_proxy if checkpoint == 'empty_cdrom': virsh_dargs = {'uri': remote_uri, 'remote_ip': remote_host, 'remote_user': '******', 'remote_pwd': vpx_passwd, 'debug': True} remote_virsh = virsh.VirshPersistent(**virsh_dargs) v2v_result = remote_virsh.dumpxml(vm_name) else: v2v_result = utils_v2v.v2v_cmd(v2v_params) if v2v_params.has_key('new_name'): params['main_vm'] = v2v_params['new_name'] check_result(v2v_result, status_error) finally: if params.get('vmchecker'): params['vmchecker'].cleanup() if output_mode == 'libvirt': pvt.cleanup_pool(pool_name, pool_type, pool_target, '') if checkpoint == 'with_proxy': logging.info('Unset http_proxy&https_proxy') os.environ.pop('http_proxy') os.environ.pop('https_proxy')
def run(test, params, env): """ Test disk encryption option. 1.Prepare backend storage (blkdev/iscsi/gluster/ceph) 2.Use luks format to encrypt the backend storage 3.Prepare a disk xml indicating to the backend storage with valid/invalid luks password 4.Start VM with disk hot/cold plugged 5.Check some disk operations in VM 6.Check backend storage is still in luks format 7.Recover test environment """ vm_name = params.get("main_vm") vm = env.get_vm(vm_name) virsh_dargs = {'debug': True, 'ignore_status': True} def encrypt_dev(device, params): """ Encrypt device with luks format :param device: Storage deivce to be encrypted. :param params: From the dict to get encryption password. """ password = params.get("luks_encrypt_passwd", "password") size = params.get("luks_size", "500M") cmd = ("qemu-img create -f luks " "--object secret,id=sec0,data=`printf '%s' | base64`,format=base64 " "-o key-secret=sec0 %s %s" % (password, device, size)) if process.system(cmd, shell=True): test.fail("Can't create a luks encrypted img by qemu-img") def check_dev_format(device, fmt="luks"): """ Check if device is in luks format :param device: Storage deivce to be checked. :param fmt: Expected disk format. :return: If device's format equals to fmt, return True, else return False. """ cmd_result = process.run("qemu-img" + ' -h', ignore_status=True, shell=True, verbose=False) if b'-U' in cmd_result.stdout: cmd = ("qemu-img info -U %s| grep -i 'file format' " "| grep -i %s" % (device, fmt)) else: cmd = ("qemu-img info %s| grep -i 'file format' " "| grep -i %s" % (device, fmt)) cmd_result = process.run(cmd, ignore_status=True, shell=True) if cmd_result.exit_status: test.fail("device %s is not in %s format. err is: %s" % (device, fmt, cmd_result.stderr)) def check_in_vm(target, old_parts): """ Check mount/read/write disk in VM. :param target: Disk dev in VM. :param old_parts: Original disk partitions in VM. :return: True if check successfully. """ try: session = vm.wait_for_login() if platform.platform().count('ppc64'): time.sleep(10) new_parts = libvirt.get_parts_list(session) added_parts = list(set(new_parts).difference(set(old_parts))) logging.info("Added parts:%s", added_parts) if len(added_parts) != 1: logging.error("The number of new partitions is invalid in VM") return False else: added_part = added_parts[0] cmd = ("fdisk -l /dev/{0} && mkfs.ext4 -F /dev/{0} && " "mkdir -p test && mount /dev/{0} test && echo" " teststring > test/testfile && umount test" .format(added_part)) status, output = session.cmd_status_output(cmd) logging.debug("Disk operation in VM:\nexit code:\n%s\noutput:\n%s", status, output) return status == 0 except (remote.LoginError, virt_vm.VMError, aexpect.ShellError) as e: logging.error(str(e)) return False # Disk specific attributes. device = params.get("virt_disk_device", "disk") device_target = params.get("virt_disk_device_target", "vdd") device_format = params.get("virt_disk_device_format", "raw") device_type = params.get("virt_disk_device_type", "file") device_bus = params.get("virt_disk_device_bus", "virtio") backend_storage_type = params.get("backend_storage_type", "iscsi") # Backend storage options. storage_size = params.get("storage_size", "1G") enable_auth = "yes" == params.get("enable_auth") # Luks encryption info, luks_encrypt_passwd is the password used to encrypt # luks image, and luks_secret_passwd is the password set to luks secret, you # can set a wrong password to luks_secret_passwd for negative tests luks_encrypt_passwd = params.get("luks_encrypt_passwd", "password") luks_secret_passwd = params.get("luks_secret_passwd", "password") # Backend storage auth info use_auth_usage = "yes" == params.get("use_auth_usage") if use_auth_usage: use_auth_uuid = False else: use_auth_uuid = "yes" == params.get("use_auth_uuid", "yes") auth_sec_usage_type = params.get("auth_sec_usage_type", "iscsi") auth_sec_usage_target = params.get("auth_sec_usage_target", "libvirtiscsi") status_error = "yes" == params.get("status_error") check_partitions = "yes" == params.get("virt_disk_check_partitions", "yes") hotplug_disk = "yes" == params.get("hotplug_disk", "no") encryption_in_source = "yes" == params.get("encryption_in_source", "no") auth_in_source = "yes" == params.get("auth_in_source", "no") auth_sec_uuid = "" luks_sec_uuid = "" disk_auth_dict = {} disk_encryption_dict = {} pvt = None if ((encryption_in_source or auth_in_source) and not libvirt_version.version_compare(3, 9, 0)): test.cancel("Cannot put <encryption> or <auth> inside disk <source> " "in this libvirt version.") # Start VM and get all partions in VM. if vm.is_dead(): vm.start() session = vm.wait_for_login() old_parts = libvirt.get_parts_list(session) session.close() vm.destroy(gracefully=False) # Back up xml file. vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) try: # Setup backend storage if backend_storage_type == "iscsi": iscsi_host = params.get("iscsi_host") iscsi_port = params.get("iscsi_port") if device_type == "block": device_source = libvirt.setup_or_cleanup_iscsi(is_setup=True) disk_src_dict = {'attrs': {'dev': device_source}} elif device_type == "network": if enable_auth: chap_user = params.get("chap_user", "redhat") chap_passwd = params.get("chap_passwd", "password") auth_sec_usage = params.get("auth_sec_usage", "libvirtiscsi") auth_sec_dict = {"sec_usage": "iscsi", "sec_target": auth_sec_usage} auth_sec_uuid = libvirt.create_secret(auth_sec_dict) # Set password of auth secret (not luks encryption secret) virsh.secret_set_value(auth_sec_uuid, chap_passwd, encode=True, debug=True) iscsi_target, lun_num = libvirt.setup_or_cleanup_iscsi( is_setup=True, is_login=False, image_size=storage_size, chap_user=chap_user, chap_passwd=chap_passwd, portal_ip=iscsi_host) # ISCSI auth attributes for disk xml if use_auth_uuid: disk_auth_dict = {"auth_user": chap_user, "secret_type": auth_sec_usage_type, "secret_uuid": auth_sec_uuid} elif use_auth_usage: disk_auth_dict = {"auth_user": chap_user, "secret_type": auth_sec_usage_type, "secret_usage": auth_sec_usage_target} else: iscsi_target, lun_num = libvirt.setup_or_cleanup_iscsi( is_setup=True, is_login=False, image_size=storage_size, portal_ip=iscsi_host) device_source = "iscsi://%s:%s/%s/%s" % (iscsi_host, iscsi_port, iscsi_target, lun_num) disk_src_dict = {"attrs": {"protocol": "iscsi", "name": "%s/%s" % (iscsi_target, lun_num)}, "hosts": [{"name": iscsi_host, "port": iscsi_port}]} elif backend_storage_type == "gluster": gluster_vol_name = params.get("gluster_vol_name", "gluster_vol1") gluster_pool_name = params.get("gluster_pool_name", "gluster_pool1") gluster_img_name = params.get("gluster_img_name", "gluster1.img") gluster_host_ip = libvirt.setup_or_cleanup_gluster( is_setup=True, vol_name=gluster_vol_name, pool_name=gluster_pool_name) device_source = "gluster://%s/%s/%s" % (gluster_host_ip, gluster_vol_name, gluster_img_name) disk_src_dict = {"attrs": {"protocol": "gluster", "name": "%s/%s" % (gluster_vol_name, gluster_img_name)}, "hosts": [{"name": gluster_host_ip, "port": "24007"}]} elif backend_storage_type == "ceph": ceph_host_ip = params.get("ceph_host_ip", "EXAMPLE_HOSTS") ceph_mon_ip = params.get("ceph_mon_ip", "EXAMPLE_MON_HOST") ceph_host_port = params.get("ceph_host_port", "EXAMPLE_PORTS") ceph_disk_name = params.get("ceph_disk_name", "EXAMPLE_SOURCE_NAME") ceph_client_name = params.get("ceph_client_name") ceph_client_key = params.get("ceph_client_key") ceph_auth_user = params.get("ceph_auth_user") ceph_auth_key = params.get("ceph_auth_key") enable_auth = "yes" == params.get("enable_auth") key_file = os.path.join(data_dir.get_tmp_dir(), "ceph.key") key_opt = "" # Prepare a blank params to confirm if delete the configure at the end of the test ceph_cfg = "" if not utils_package.package_install(["ceph-common"]): test.error("Failed to install ceph-common") # Create config file if it doesn't exist ceph_cfg = ceph.create_config_file(ceph_mon_ip) if enable_auth: # If enable auth, prepare a local file to save key if ceph_client_name and ceph_client_key: with open(key_file, 'w') as f: f.write("[%s]\n\tkey = %s\n" % (ceph_client_name, ceph_client_key)) key_opt = "--keyring %s" % key_file auth_sec_dict = {"sec_usage": auth_sec_usage_type, "sec_name": "ceph_auth_secret"} auth_sec_uuid = libvirt.create_secret(auth_sec_dict) virsh.secret_set_value(auth_sec_uuid, ceph_auth_key, debug=True) disk_auth_dict = {"auth_user": ceph_auth_user, "secret_type": auth_sec_usage_type, "secret_uuid": auth_sec_uuid} cmd = ("rbd -m {0} {1} info {2} && rbd -m {0} {1} rm " "{2}".format(ceph_mon_ip, key_opt, ceph_disk_name)) else: test.error("No ceph client name/key provided.") device_source = "rbd:%s:mon_host=%s:keyring=%s" % (ceph_disk_name, ceph_mon_ip, key_file) else: device_source = "rbd:%s:mon_host=%s" % (ceph_disk_name, ceph_mon_ip) disk_src_dict = {"attrs": {"protocol": "rbd", "name": ceph_disk_name}, "hosts": [{"name": ceph_host_ip, "port": ceph_host_port}]} elif backend_storage_type == "nfs": pool_name = params.get("pool_name", "nfs_pool") pool_target = params.get("pool_target", "nfs_mount") pool_type = params.get("pool_type", "netfs") nfs_server_dir = params.get("nfs_server_dir", "nfs_server") emulated_image = params.get("emulated_image") image_name = params.get("nfs_image_name", "nfs.img") tmp_dir = data_dir.get_tmp_dir() pvt = libvirt.PoolVolumeTest(test, params) pvt.pre_pool(pool_name, pool_type, pool_target, emulated_image) nfs_mount_dir = os.path.join(tmp_dir, pool_target) device_source = nfs_mount_dir + image_name disk_src_dict = {'attrs': {'file': device_source, 'type_name': 'file'}} else: test.cancel("Only iscsi/gluster/rbd/nfs can be tested for now.") logging.debug("device source is: %s", device_source) luks_sec_uuid = libvirt.create_secret(params) logging.debug("A secret created with uuid = '%s'", luks_sec_uuid) ret = virsh.secret_set_value(luks_sec_uuid, luks_secret_passwd, encode=True, debug=True) encrypt_dev(device_source, params) libvirt.check_exit_status(ret) # Add disk xml. vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name) disk_xml = Disk(type_name=device_type) disk_xml.device = device disk_xml.target = {"dev": device_target, "bus": device_bus} driver_dict = {"name": "qemu", "type": device_format} disk_xml.driver = driver_dict disk_source = disk_xml.new_disk_source(**disk_src_dict) if disk_auth_dict: logging.debug("disk auth dict is: %s" % disk_auth_dict) if auth_in_source: disk_source.auth = disk_xml.new_auth(**disk_auth_dict) else: disk_xml.auth = disk_xml.new_auth(**disk_auth_dict) disk_encryption_dict = {"encryption": "luks", "secret": {"type": "passphrase", "uuid": luks_sec_uuid}} disk_encryption = disk_xml.new_encryption(**disk_encryption_dict) if encryption_in_source: disk_source.encryption = disk_encryption else: disk_xml.encryption = disk_encryption disk_xml.source = disk_source logging.debug("new disk xml is: %s", disk_xml) # Sync VM xml if not hotplug_disk: vmxml.add_device(disk_xml) vmxml.sync() try: vm.start() vm.wait_for_login() except virt_vm.VMStartError as details: # When use wrong password in disk xml for cold plug cases, # VM cannot be started if status_error and not hotplug_disk: logging.info("VM failed to start as expected: %s" % str(details)) else: test.fail("VM should start but failed: %s" % str(details)) if hotplug_disk: result = virsh.attach_device(vm_name, disk_xml.xml, ignore_status=True, debug=True) libvirt.check_exit_status(result, status_error) if check_partitions and not status_error: if not check_in_vm(device_target, old_parts): test.fail("Check disk partitions in VM failed") check_dev_format(device_source) finally: # Recover VM. if vm.is_alive(): vm.destroy(gracefully=False) vmxml_backup.sync("--snapshots-metadata") # Clean up backend storage if backend_storage_type == "iscsi": libvirt.setup_or_cleanup_iscsi(is_setup=False) elif backend_storage_type == "gluster": libvirt.setup_or_cleanup_gluster(is_setup=False, vol_name=gluster_vol_name, pool_name=gluster_pool_name) elif backend_storage_type == "ceph": # Remove ceph configure file if created. if ceph_cfg: os.remove(ceph_cfg) cmd = ("rbd -m {0} {1} info {2} && rbd -m {0} {1} rm " "{2}".format(ceph_mon_ip, key_opt, ceph_disk_name)) cmd_result = process.run(cmd, ignore_status=True, shell=True) logging.debug("result of rbd removal: %s", cmd_result) if os.path.exists(key_file): os.remove(key_file) # Clean up secrets if auth_sec_uuid: virsh.secret_undefine(auth_sec_uuid) if luks_sec_uuid: virsh.secret_undefine(luks_sec_uuid) # Clean up pools if pvt: pvt.cleanup_pool(pool_name, pool_type, pool_target, emulated_image)
def run(test, params, env): """ Test bridge support from network 1) create a linux bridge and connect a physical interface to it 2) define nwfilter with "vdsm-no-mac-spoofing" 3) redefine the vm with the new create bridge and filter 4) check if guest can get public ip after vm start 5) check if guest and host can ping each other 6) check if guest and host can ping outside 7) start another vm connected to the same bridge 8) check if the 2 guests can ping each other """ def create_bridge(br_name, iface_name): """ Create a linux bridge by virsh cmd: 1. Stop NetworkManager and Start network service 2. virsh iface-bridge <iface> <name> [--no-stp] :param br_name: bridge name :param iface_name: physical interface name :return: bridge created or raise exception """ # Make sure the bridge not exist if libvirt.check_iface(br_name, "exists", "--all"): test.cancel("The bridge %s already exist" % br_name) # Create bridge # This cmd run a long time, so set timeout=240 result = virsh.iface_bridge(iface_name, br_name, "--no-stp", debug=True, timeout=240) if result.exit_status: test.fail("Failed to create bridge:\n%s" % result.stderr) def define_nwfilter(filter_name): """ Define nwfilter vdsm-no-mac-spoofing with content like: <filter name='vdsm-no-mac-spoofing' chain='root'> <filterref filter='no-mac-spoofing'/> <filterref filter='no-arp-mac-spoofing'/> </filter> :param filter_name: the name of nwfilter :return: filter created or raise exception """ filter_uuid = params.get("filter_uuid", "11111111-b071-6127-b4ec-111111111111") filter_params = {"filter_name": "vdsm-no-mac-spoofing", "filter_chain": "root", "filter_uuid": filter_uuid, "filterref_name_1": "no-mac-spoofing", "filterref_name_2": "no-arp-mac-spoofing"} filter_xml = libvirt.create_nwfilter_xml(filter_params).xml # Run command result = virsh.nwfilter_define(filter_xml, ignore_status=True, debug=True) if result.exit_status: test.fail("Failed to define nwfilter with %s" % filter_xml) def modify_iface_xml(br_name, nwfilter, vm_name): """ Modify interface xml with the new bridge and the nwfilter :param br_name: bridge name :param nwfilter: nwfilter name :param vm_name: vm name """ vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name) iface_xml = vmxml.get_devices('interface')[0] vmxml.del_device(iface_xml) iface_xml.source = {'bridge': br_name} iface_xml.filterref = iface_xml.new_filterref(name=nwfilter) logging.debug("new interface xml is: %s" % iface_xml) vmxml.add_device(iface_xml) vmxml.sync() def ping(src_ip, dest_ip, ping_count, timeout, session=None): """ Wrap of ping :param src_ip: source address :param dest_ip: destination address :param ping_count: count of icmp packet :param timeout: timeout for the ping command :param session: local execution or session to execute the ping command :return: ping succeed or raise exception """ status, output = utils_net.ping(dest=dest_ip, count=ping_count, interface=src_ip, timeout=timeout, session=session) if status: test.fail("Fail to ping %s from %s" % (dest_ip, src_ip)) # Get test params bridge_name = params.get("bridge_name", "br0") filter_name = params.get("filter_name", "vdsm-no-mac-spoofing") ping_count = params.get("ping_count", "5") ping_timeout = float(params.get("ping_timeout", "10")) iface_name = utils_net.get_net_if(state="UP")[0] bridge_script = NETWORK_SCRIPT + bridge_name iface_script = NETWORK_SCRIPT + iface_name iface_script_bk = os.path.join(data_dir.get_tmp_dir(), "iface-%s.bk" % iface_name) vms = params.get("vms").split() if len(vms) <= 1: test.cancel("Need two VMs to test") else: vm1_name = vms[0] vm2_name = vms[1] vm1 = env.get_vm(vm1_name) vm2 = env.get_vm(vm2_name) # Back up the interface script process.run("cp %s %s" % (iface_script, iface_script_bk), shell=True) # Back up vm xml vm1_xml_bak = vm_xml.VMXML.new_from_dumpxml(vm1_name) vm2_xml_bak = vm_xml.VMXML.new_from_dumpxml(vm2_name) # Stop NetworkManager service NM_service = service.Factory.create_service("NetworkManager") NM_status = NM_service.status() if NM_status is True: NM_service.stop() # Start network service NW_service = service.Factory.create_service("network") NW_status = NW_service.status() if NW_status is None: logging.debug("network service not found") if not utils_package.package_install('network-scripts') or \ not utils_package.package_install('initscripts'): test.cancel("Failed to install network service") if NW_status is not True: logging.debug("network service is not running") NW_service.start() try: create_bridge(bridge_name, iface_name) define_nwfilter(filter_name) modify_iface_xml(bridge_name, filter_name, vm1_name) if vm1.is_alive(): vm1.destroy() vm1.start() # Check if vm can get ip with the new create bridge session1 = session2 = None try: utils_net.update_mac_ip_address(vm1, timeout=120) vm1_ip = vm1.get_address() except Exception as errs: test.fail("vm1 can't get IP with the new create bridge: %s" % errs) # Check guest and host can ping each other host_ip = utils_net.get_ip_address_by_interface(bridge_name) remote_ip = params.get("remote_ip", "www.baidu.com") ping(host_ip, vm1_ip, ping_count, ping_timeout) ping(host_ip, remote_ip, ping_count, ping_timeout) session1 = vm1.wait_for_login() ping(vm1_ip, host_ip, ping_count, ping_timeout, session=session1) ping(vm1_ip, remote_ip, ping_count, ping_timeout, session=session1) # Start vm2 connect to the same bridge modify_iface_xml(bridge_name, filter_name, vm2_name) if vm2.is_alive(): vm2.destroy() vm2.start() # Check if vm1 and vm2 can ping each other try: utils_net.update_mac_ip_address(vm2, timeout=120) vm2_ip = vm2.get_address() except Exception as errs: test.fail("vm2 can't get IP with the new create bridge: %s" % errs) session2 = vm2.wait_for_login() ping(vm2_ip, vm1_ip, ping_count, ping_timeout, session=session2) ping(vm1_ip, vm2_ip, ping_count, ping_timeout, session=session1) finally: logging.debug("Start to restore") vm1_xml_bak.sync() vm2_xml_bak.sync() virsh.nwfilter_undefine(filter_name, ignore_status=True) if libvirt.check_iface(bridge_name, "exists", "--all"): virsh.iface_unbridge(bridge_name, timeout=60, debug=True) if os.path.exists(iface_script_bk): process.run("mv %s %s" % (iface_script_bk, iface_script), shell=True) if os.path.exists(bridge_script): process.run("rm -rf %s" % bridge_script, shell=True) # reload network configuration NW_service.restart() # recover NetworkManager if NM_status is True: NM_service.start()
def run(test, params, env): """ Test command: virsh rename. The command can rename a domain. 1.Prepare test environment. 2.Perform virsh rename operation. 3.Recover test environment. 4.Confirm the test result. """ # Get specific parameter value vm_name = params.get("main_vm") vm = env.get_vm(vm_name) domuuid = vm.get_uuid() vm_ref = params.get("domrename_vm_ref", "name") status_error = "yes" == params.get("status_error", "no") new_name = params.get("vm_new_name", "new") pre_vm_state = params.get("domrename_vm_state", "shutoff") domain_option = params.get("dom_opt", "") new_name_option = params.get("newname_opt", "") add_vm = "yes" == params.get("add_vm", "no") # Replace the varaiables if vm_ref == "name": vm_ref = vm_name elif vm_ref == "uuid": vm_ref = domuuid if new_name == "vm2_name": vm2_name = ("%s" % vm_name[:-1])+"2" new_name = vm2_name # Build input params dom_param = ' '.join([domain_option, vm_ref]) new_name_param = ' '.join([new_name_option, new_name]) # Backup for recovery. vmxml_backup = libvirt_xml.vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) logging.debug("vm xml is %s", vmxml_backup) # Clone additional vms if needed if add_vm: try: utils_path.find_command("virt-clone") except utils_path.CmdNotFoundError: if not utils_package.package_install(["virt-install"]): test.cancel("Failed to install virt-install on host") ret_clone = utils_libguestfs.virt_clone_cmd(vm_name, vm2_name, True, timeout=360) if ret_clone.exit_status: test.fail("Error occured when clone a second vm!") vm2 = libvirt_vm.VM(vm2_name, vm.params, vm.root_dir, vm.address_cache) virsh.dom_list("--name --all", debug=True) # Create object instance for renamed domain new_vm = libvirt_vm.VM(new_name, vm.params, vm.root_dir, vm.address_cache) # Prepare vm state if pre_vm_state != "shutoff": vm.start() if pre_vm_state == "paused": vm.pause() logging.debug("Domain state is now: %s", vm.state()) elif pre_vm_state == "managed_saved": vm.managedsave() elif pre_vm_state == "with_snapshot": virsh.snapshot_create_as(vm_name, "snap1 --disk-only", debug=True) vm.destroy(gracefully=False) try: result = virsh.domrename(dom_param, new_name_param, ignore_status=True, debug=True) # Raise unexpected pass or fail libvirt.check_exit_status(result, status_error) # Return expected failure for negative tests if status_error: logging.debug("Expected failure: %s", result.stderr) return # Checkpoints after domrename succeed else: list_ret = virsh.dom_list("--name --all", debug=True).stdout domname_ret = virsh.domname(domuuid, debug=True).stdout.strip() if new_name not in list_ret or vm_name in list_ret: test.fail("New name does not affect in virsh list") if domname_ret != new_name: test.fail("New domain name does not affect in virsh domname uuid") # Try to start vm with the new name new_vm.start() finally: # Remove additional vms if add_vm and vm2.exists() and result.exit_status: virsh.remove_domain(vm2_name, "--remove-all-storage") # Undefine newly renamed domain if new_vm.exists(): if new_vm.is_alive(): new_vm.destroy(gracefully=False) new_vm.undefine() # Recover domain state if pre_vm_state != "shutoff": if pre_vm_state == "with_snapshot": libvirt.clean_up_snapshots(vm_name) else: if pre_vm_state == "managed_saved": vm.start() vm.destroy(gracefully=False) # Restore VM vmxml_backup.sync()
def run(test, params, env): """ Test rng device options. 1.Prepare test environment, destroy or suspend a VM. 2.Edit xml and start the domain. 3.Perform test operation. 4.Recover test environment. 5.Confirm the test result. """ vm_name = params.get("main_vm") vm = env.get_vm(vm_name) def modify_rng_xml(dparams, sync=True): """ Modify interface xml options """ rng_model = dparams.get("rng_model", "virtio") rng_rate = dparams.get("rng_rate") backend_model = dparams.get("backend_model", "random") backend_type = dparams.get("backend_type") backend_dev = dparams.get("backend_dev", "") backend_source_list = dparams.get("backend_source", "").split() backend_protocol = dparams.get("backend_protocol") vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name) rng_xml = rng.Rng() rng_xml.rng_model = rng_model if rng_rate: rng_xml.rate = ast.literal_eval(rng_rate) backend = rng.Rng.Backend() backend.backend_model = backend_model if backend_type: backend.backend_type = backend_type if backend_dev: backend.backend_dev = backend_dev if backend_source_list: source_list = [ast.literal_eval(source) for source in backend_source_list] backend.source = source_list if backend_protocol: backend.backend_protocol = backend_protocol rng_xml.backend = backend logging.debug("Rng xml: %s", rng_xml) if sync: vmxml.add_device(rng_xml) vmxml.xmltreefile.write() vmxml.sync() else: status = libvirt.exec_virsh_edit( vm_name, [(r":/<devices>/s/$/%s" % re.findall(r"<rng.*<\/rng>", str(rng_xml), re.M )[0].replace("/", "\/"))]) if not status: test.fail("Failed to edit vm xml") def check_qemu_cmd(dparams): """ Verify qemu-kvm command line. """ rng_model = dparams.get("rng_model", "virtio") rng_rate = dparams.get("rng_rate") backend_type = dparams.get("backend_type") backend_source_list = dparams.get("backend_source", "").split() cmd = ("ps -ef | grep %s | grep -v grep" % vm_name) chardev = src_host = src_port = None if backend_type == "tcp": chardev = "socket" elif backend_type == "udp": chardev = "udp" for bc_source in backend_source_list: source = ast.literal_eval(bc_source) if "mode" in source and source['mode'] == "connect": src_host = source['host'] src_port = source['service'] if chardev and src_host and src_port: cmd += (" | grep 'chardev %s,.*host=%s,port=%s'" % (chardev, src_host, src_port)) if rng_model == "virtio": cmd += (" | grep 'device virtio-rng-pci'") if rng_rate: rate = ast.literal_eval(rng_rate) cmd += (" | grep 'max-bytes=%s,period=%s'" % (rate['bytes'], rate['period'])) if process.run(cmd, ignore_status=True, shell=True).exit_status: test.fail("Cann't see rng option" " in command line") def check_host(): """ Check random device on host """ backend_dev = params.get("backend_dev") if backend_dev: cmd = "lsof |grep %s" % backend_dev ret = process.run(cmd, ignore_status=True, shell=True) if ret.exit_status or not ret.stdout.count("qemu"): test.fail("Failed to check random device" " on host, command output: %s" % ret.stdout) def check_snapshot(bgjob=None): """ Do snapshot operation and check the results """ snapshot_name1 = "snap.s1" snapshot_name2 = "snap.s2" if not snapshot_vm_running: vm.destroy(gracefully=False) ret = virsh.snapshot_create_as(vm_name, snapshot_name1) libvirt.check_exit_status(ret) snap_lists = virsh.snapshot_list(vm_name) if snapshot_name not in snap_lists: test.fail("Snapshot %s doesn't exist" % snapshot_name) if snapshot_vm_running: options = "--force" else: options = "" ret = virsh.snapshot_revert( vm_name, ("%s %s" % (snapshot_name, options))) libvirt.check_exit_status(ret) ret = virsh.dumpxml(vm_name) if ret.stdout.count("<rng model="): test.fail("Found rng device in xml") if snapshot_with_rng: if vm.is_alive(): vm.destroy(gracefully=False) if bgjob: bgjob.kill_func() modify_rng_xml(params, False) # Start the domain before disk-only snapshot if vm.is_dead(): # Add random server if params.get("backend_type") == "tcp": cmd = "cat /dev/random | nc -4 -l localhost 1024" bgjob = utils.AsyncJob(cmd) vm.start() vm.wait_for_login().close() err_msgs = ("live disk snapshot not supported" " with this QEMU binary") ret = virsh.snapshot_create_as(vm_name, "%s --disk-only" % snapshot_name2) if ret.exit_status: if ret.stderr.count(err_msgs): test.skip(err_msgs) else: test.fail("Failed to create external snapshot") snap_lists = virsh.snapshot_list(vm_name) if snapshot_name2 not in snap_lists: test.fail("Failed to check snapshot list") ret = virsh.domblklist(vm_name) if not ret.stdout.count(snapshot_name2): test.fail("Failed to find snapshot disk") def check_guest(session): """ Check random device on guest """ rng_files = ( "/sys/devices/virtual/misc/hw_random/rng_available", "/sys/devices/virtual/misc/hw_random/rng_current") rng_avail = session.cmd_output("cat %s" % rng_files[0], timeout=600).strip() rng_currt = session.cmd_output("cat %s" % rng_files[1], timeout=600).strip() logging.debug("rng avail:%s, current:%s", rng_avail, rng_currt) if not rng_currt.count("virtio") or rng_currt not in rng_avail: test.fail("Failed to check rng file on guest") # Read the random device cmd = ("dd if=/dev/hwrng of=rng.test count=100" " && rm -f rng.test") ret, output = session.cmd_status_output(cmd, timeout=600) if ret: test.fail("Failed to read the random device") rng_rate = params.get("rng_rate") if rng_rate: rate_bytes, rate_period = ast.literal_eval(rng_rate).values() rate_conf = float(rate_bytes) / (float(rate_period)/1000) ret = re.search(r"(\d+) bytes.*copied, (\d+.\d+) s", output, re.M) if not ret: test.fail("Can't find rate from output") rate_real = float(ret.group(1)) / float(ret.group(2)) logging.debug("Find rate: %s, config rate: %s", rate_real, rate_conf) if rate_real > rate_conf * 1.2: test.fail("The rate of reading exceed" " the limitation of configuration") if device_num > 1: rng_dev = rng_avail.split() if len(rng_dev) != device_num: test.skip("Multiple virtio-rng devices are not" " supported on this guest kernel. " "Bug: https://bugzilla.redhat.com/" "show_bug.cgi?id=915335") session.cmd("echo -n %s > %s" % (rng_dev[1], rng_files[1])) # Read the random device if session.cmd_status(cmd, timeout=120): test.fail("Failed to read the random device") start_error = "yes" == params.get("start_error", "no") test_host = "yes" == params.get("test_host", "no") test_guest = "yes" == params.get("test_guest", "no") test_qemu_cmd = "yes" == params.get("test_qemu_cmd", "no") test_snapshot = "yes" == params.get("test_snapshot", "no") snapshot_vm_running = "yes" == params.get("snapshot_vm_running", "no") snapshot_with_rng = "yes" == params.get("snapshot_with_rng", "no") snapshot_name = params.get("snapshot_name") device_num = int(params.get("device_num", 1)) if device_num > 1 and not libvirt_version.version_compare(1, 2, 7): test.skip("Multiple virtio-rng devices not " "supported on this libvirt version") # Back up xml file. vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) # Try to install rng-tools on host, it can speed up random rate # if installation failed, ignore the error and continue the test if utils_package.package_install(["rng-tools"]): rngd_conf = "/etc/sysconfig/rngd" rngd_srv = "/usr/lib/systemd/system/rngd.service" if os.path.exists(rngd_conf): # For rhel6 host, add extraoptions with open(rngd_conf, 'w') as f_rng: f_rng.write('EXTRAOPTIONS="--rng-device /dev/urandom"') elif os.path.exists(rngd_srv): # For rhel7 host, modify start options rngd_srv_conf = "/etc/systemd/system/rngd.service" if not os.path.exists(rngd_srv_conf): shutil.copy(rngd_srv, rngd_srv_conf) process.run("sed -i -e 's#^ExecStart=.*#ExecStart=/sbin/rngd" " -f -r /dev/urandom -o /dev/random#' %s" % rngd_srv_conf, shell=True) process.run('systemctl daemon-reload') process.run("service rngd start") # Build the xml and run test. try: bgjob = None # Take snapshot if needed if snapshot_name: if snapshot_vm_running: vm.start() vm.wait_for_login().close() ret = virsh.snapshot_create_as(vm_name, snapshot_name) libvirt.check_exit_status(ret) # Destroy VM first if vm.is_alive(): vm.destroy(gracefully=False) # Build vm xml. dparams = {} if device_num > 1: for i in xrange(device_num): dparams[i] = {"rng_model": params.get( "rng_model_%s" % i, "virtio")} dparams[i].update({"backend_model": params.get( "backend_model_%s" % i, "random")}) bk_type = params.get("backend_type_%s" % i) if bk_type: dparams[i].update({"backend_type": bk_type}) bk_dev = params.get("backend_dev_%s" % i) if bk_dev: dparams[i].update({"backend_dev": bk_dev}) bk_src = params.get("backend_source_%s" % i) if bk_src: dparams[i].update({"backend_source": bk_src}) bk_pro = params.get("backend_protocol_%s" % i) if bk_pro: dparams[i].update({"backend_protocol": bk_pro}) modify_rng_xml(dparams[i], False) else: modify_rng_xml(params, not test_snapshot) try: # Add random server if params.get("backend_type") == "tcp": cmd = "cat /dev/random | nc -4 -l localhost 1024" bgjob = utils.AsyncJob(cmd) # Start the VM. vm.start() if start_error: test.fail("VM started unexpectedly") if test_qemu_cmd: if device_num > 1: for i in xrange(device_num): check_qemu_cmd(dparams[i]) else: check_qemu_cmd(params) if test_host: check_host() session = vm.wait_for_login() if test_guest: check_guest(session) session.close() if test_snapshot: check_snapshot(bgjob) except virt_vm.VMStartError as details: logging.info(str(details)) if not start_error: test.fail('VM failed to start, ' 'please refer to https://bugzilla.' 'redhat.com/show_bug.cgi?id=1220252:' '\n%s' % details) finally: # Delete snapshots. snapshot_lists = virsh.snapshot_list(vm_name) if len(snapshot_lists) > 0: libvirt.clean_up_snapshots(vm_name, snapshot_lists) for snapshot in snapshot_lists: virsh.snapshot_delete(vm_name, snapshot, "--metadata") # Recover VM. if vm.is_alive(): vm.destroy(gracefully=False) logging.info("Restoring vm...") vmxml_backup.sync() if bgjob: bgjob.kill_func()
def run(test, params, env): """ Test interafce xml options. 1.Prepare test environment,destroy or suspend a VM. 2.Edit xml and start the domain. 3.Perform test operation. 4.Recover test environment. 5.Confirm the test result. """ vm_name = params.get("main_vm") vm = env.get_vm(vm_name) virsh_dargs = {'debug': True, 'ignore_status': False} if not utils_package.package_install(["lsof"]): test.cancel("Failed to install dependency package lsof" " on host") def create_iface_xml(iface_mac): """ Create interface xml file """ iface = Interface(type_name=iface_type) source = ast.literal_eval(iface_source) if source: iface.source = source iface.model = iface_model if iface_model else "virtio" iface.mac_address = iface_mac driver_dict = {} driver_host = {} driver_guest = {} if iface_driver: driver_dict = ast.literal_eval(iface_driver) if iface_driver_host: driver_host = ast.literal_eval(iface_driver_host) if iface_driver_guest: driver_guest = ast.literal_eval(iface_driver_guest) iface.driver = iface.new_driver(driver_attr=driver_dict, driver_host=driver_host, driver_guest=driver_guest) logging.debug("Create new interface xml: %s", iface) return iface def modify_iface_xml(update, status_error=False): """ Modify interface xml options """ vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name) xml_devices = vmxml.devices iface_index = xml_devices.index( xml_devices.by_device_tag("interface")[0]) iface = xml_devices[iface_index] if iface_model: iface.model = iface_model else: del iface.model if iface_type: iface.type_name = iface_type del iface.source source = ast.literal_eval(iface_source) if source: net_ifs = utils_net.get_net_if(state="UP") # Check source device is valid or not, # if it's not in host interface list, try to set # source device to first active interface of host if (iface.type_name == "direct" and 'dev' in source and source['dev'] not in net_ifs): logging.warn("Source device %s is not a interface" " of host, reset to %s", source['dev'], net_ifs[0]) source['dev'] = net_ifs[0] iface.source = source backend = ast.literal_eval(iface_backend) if backend: iface.backend = backend driver_dict = {} driver_host = {} driver_guest = {} if iface_driver: driver_dict = ast.literal_eval(iface_driver) if iface_driver_host: driver_host = ast.literal_eval(iface_driver_host) if iface_driver_guest: driver_guest = ast.literal_eval(iface_driver_guest) iface.driver = iface.new_driver(driver_attr=driver_dict, driver_host=driver_host, driver_guest=driver_guest) if iface.address: del iface.address logging.debug("New interface xml file: %s", iface) if unprivileged_user: # Create disk image for unprivileged user disk_index = xml_devices.index( xml_devices.by_device_tag("disk")[0]) disk_xml = xml_devices[disk_index] logging.debug("source: %s", disk_xml.source) disk_source = disk_xml.source.attrs["file"] cmd = ("cp -fZ {0} {1} && chown {2}:{2} {1}" "".format(disk_source, dst_disk, unprivileged_user)) process.run(cmd, shell=True) disk_xml.source = disk_xml.new_disk_source( attrs={"file": dst_disk}) vmxml.devices = xml_devices # Remove all channels to avoid of permission problem channels = vmxml.get_devices(device_type="channel") for channel in channels: vmxml.del_device(channel) vmxml.xmltreefile.write() logging.debug("New VM xml: %s", vmxml) process.run("chmod a+rw %s" % vmxml.xml, shell=True) virsh.define(vmxml.xml, **virsh_dargs) # Try to modify interface xml by update-device or edit xml elif update: iface.xmltreefile.write() ret = virsh.update_device(vm_name, iface.xml, ignore_status=True) libvirt.check_exit_status(ret, status_error) else: vmxml.devices = xml_devices vmxml.xmltreefile.write() try: vmxml.sync() except xcepts.LibvirtXMLError as e: if not define_error: test.fail("Define VM fail: %s" % e) def check_offloads_option(if_name, driver_options, session=None): """ Check interface offloads by ethtool output """ offloads = {"csum": "tx-checksumming", "gso": "generic-segmentation-offload", "tso4": "tcp-segmentation-offload", "tso6": "tx-tcp6-segmentation", "ecn": "tx-tcp-ecn-segmentation", "ufo": "udp-fragmentation-offload"} if session: ret, output = session.cmd_status_output("ethtool -k %s | head" " -18" % if_name) else: out = process.run("ethtool -k %s | head -18" % if_name, shell=True) ret, output = out.exit_status, out.stdout_text if ret: test.fail("ethtool return error code") logging.debug("ethtool output: %s", output) for offload in list(driver_options.keys()): if offload in offloads: if (output.count(offloads[offload]) and not output.count("%s: %s" % ( offloads[offload], driver_options[offload]))): test.fail("offloads option %s: %s isn't" " correct in ethtool output" % (offloads[offload], driver_options[offload])) def run_xml_test(iface_mac): """ Test for interface options in vm xml """ # Get the interface object according the mac address vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name) iface_devices = vmxml.get_devices(device_type="interface") iface = None for iface_dev in iface_devices: if iface_dev.mac_address == iface_mac: iface = iface_dev if not iface: test.fail("Can't find interface with mac" " '%s' in vm xml" % iface_mac) driver_dict = {} if iface_driver: driver_dict = ast.literal_eval(iface_driver) for driver_opt in list(driver_dict.keys()): if not driver_dict[driver_opt] == iface.driver.driver_attr[driver_opt]: test.fail("Can't see driver option %s=%s in vm xml" % (driver_opt, driver_dict[driver_opt])) if iface_target: if ("dev" not in iface.target or not iface.target["dev"].startswith(iface_target)): test.fail("Can't see device target dev in vm xml") # Check macvtap mode by ip link command if iface_target == "macvtap" and "mode" in iface.source: cmd = "ip -d link show %s" % iface.target["dev"] output = process.run(cmd, shell=True).stdout_text logging.debug("ip link output: %s", output) mode = iface.source["mode"] if mode == "passthrough": mode = "passthru" if not re.search("macvtap\s+mode %s" % mode, output): test.fail("Failed to verify macvtap mode") def run_cmdline_test(iface_mac): """ Test for qemu-kvm command line options """ cmd = ("ps -ef | grep %s | grep -v grep " % vm_name) ret = process.run(cmd, shell=True) logging.debug("Command line %s", ret.stdout_text) if test_vhost_net: if not ret.stdout_text.count("vhost=on") and not rm_vhost_driver: test.fail("Can't see vhost options in" " qemu-kvm command line") if iface_model == "virtio": model_option = "device virtio-net-pci" else: model_option = "device rtl8139" iface_cmdline = re.findall(r"%s,(.+),mac=%s" % (model_option, iface_mac), ret.stdout_text) if not iface_cmdline: test.fail("Can't see %s with mac %s in command" " line" % (model_option, iface_mac)) cmd_opt = {} for opt in iface_cmdline[0].split(','): tmp = opt.rsplit("=") cmd_opt[tmp[0]] = tmp[1] logging.debug("Command line options %s", cmd_opt) driver_dict = {} # Test <driver> xml options. if iface_driver: iface_driver_dict = ast.literal_eval(iface_driver) for driver_opt in list(iface_driver_dict.keys()): if driver_opt == "name": continue elif driver_opt == "txmode": if iface_driver_dict["txmode"] == "iothread": driver_dict["tx"] = "bh" else: driver_dict["tx"] = iface_driver_dict["txmode"] elif driver_opt == "queues": driver_dict["mq"] = "on" driver_dict["vectors"] = str(int( iface_driver_dict["queues"]) * 2 + 2) else: driver_dict[driver_opt] = iface_driver_dict[driver_opt] # Test <driver><host/><driver> xml options. if iface_driver_host: driver_dict.update(ast.literal_eval(iface_driver_host)) # Test <driver><guest/><driver> xml options. if iface_driver_guest: driver_dict.update(ast.literal_eval(iface_driver_guest)) for driver_opt in list(driver_dict.keys()): if (driver_opt not in cmd_opt or not cmd_opt[driver_opt] == driver_dict[driver_opt]): test.fail("Can't see option '%s=%s' in qemu-kvm " " command line" % (driver_opt, driver_dict[driver_opt])) logging.info("Find %s=%s in qemu-kvm command line" % (driver_opt, driver_dict[driver_opt])) if test_backend: guest_pid = ret.stdout_text.rsplit()[1] cmd = "lsof %s | grep %s" % (backend["tap"], guest_pid) if process.system(cmd, ignore_status=True, shell=True): test.fail("Guest process didn't open backend file" " %s" % backend["tap"]) cmd = "lsof %s | grep %s" % (backend["vhost"], guest_pid) if process.system(cmd, ignore_status=True, shell=True): test.fail("Guest process didn't open backend file" " %s" % backend["vhost"]) def get_guest_ip(session, mac): """ Wrapper function to get guest ip address """ utils_net.restart_guest_network(session, mac) # Wait for IP address is ready utils_misc.wait_for( lambda: utils_net.get_guest_ip_addr(session, mac), 10) return utils_net.get_guest_ip_addr(session, mac) def check_user_network(session): """ Check user network ip address on guest """ vm_ips = [] vm_ips.append(get_guest_ip(session, iface_mac_old)) if attach_device: vm_ips.append(get_guest_ip(session, iface_mac)) logging.debug("IP address on guest: %s", vm_ips) if len(vm_ips) != len(set(vm_ips)): test.fail("Duplicated IP address on guest. " "Check bug: https://bugzilla.redhat." "com/show_bug.cgi?id=1147238") for vm_ip in vm_ips: if vm_ip is None or not vm_ip.startswith("10.0.2."): test.fail("Found wrong IP address" " on guest") # Check gateway address gateway = utils_net.get_net_gateway(session.cmd_output) if gateway != "10.0.2.2": test.fail("The gateway on guest is not" " right") # Check dns server address ns_list = utils_net.get_net_nameserver(session.cmd_output) if "10.0.2.3" not in ns_list: test.fail("The dns server can't be found" " on guest") def check_mcast_network(session): """ Check multicast ip address on guests """ username = params.get("username") password = params.get("password") src_addr = ast.literal_eval(iface_source)['address'] add_session = additional_vm.wait_for_serial_login(username=username, password=password) vms_sess_dict = {vm_name: session, additional_vm.name: add_session} # Check mcast address on host cmd = "netstat -g | grep %s" % src_addr if process.run(cmd, ignore_status=True, shell=True).exit_status: test.fail("Can't find multicast ip address" " on host") vms_ip_dict = {} # Get ip address on each guest for vms in list(vms_sess_dict.keys()): vm_mac = vm_xml.VMXML.get_first_mac_by_name(vms) vm_ip = get_guest_ip(vms_sess_dict[vms], vm_mac) if not vm_ip: test.fail("Can't get multicast ip" " address on guest") vms_ip_dict.update({vms: vm_ip}) if len(set(vms_ip_dict.values())) != len(vms_sess_dict): test.fail("Got duplicated multicast ip address") logging.debug("Found ips on guest: %s", vms_ip_dict) # Run omping server on host if not utils_package.package_install(["omping"]): test.error("Failed to install omping" " on host") cmd = ("iptables -F;omping -m %s %s" % (src_addr, "192.168.122.1 %s" % ' '.join(list(vms_ip_dict.values())))) # Run a backgroup job waiting for connection of client bgjob = utils_misc.AsyncJob(cmd) # Run omping client on guests for vms in list(vms_sess_dict.keys()): # omping should be installed first if not utils_package.package_install(["omping"], vms_sess_dict[vms]): test.error("Failed to install omping" " on guest") cmd = ("iptables -F; omping -c 5 -T 5 -m %s %s" % (src_addr, "192.168.122.1 %s" % vms_ip_dict[vms])) ret, output = vms_sess_dict[vms].cmd_status_output(cmd) logging.debug("omping ret: %s, output: %s", ret, output) if (not output.count('multicast, xmt/rcv/%loss = 5/5/0%') or not output.count('unicast, xmt/rcv/%loss = 5/5/0%')): test.fail("omping failed on guest") # Kill the backgroup job bgjob.kill_func() status_error = "yes" == params.get("status_error", "no") start_error = "yes" == params.get("start_error", "no") define_error = "yes" == params.get("define_error", "no") unprivileged_user = params.get("unprivileged_user") # Interface specific attributes. iface_type = params.get("iface_type", "network") iface_source = params.get("iface_source", "{}") iface_driver = params.get("iface_driver") iface_model = params.get("iface_model", "virtio") iface_target = params.get("iface_target") iface_backend = params.get("iface_backend", "{}") iface_driver_host = params.get("iface_driver_host") iface_driver_guest = params.get("iface_driver_guest") attach_device = params.get("attach_iface_device") expect_tx_size = params.get("expect_tx_size") change_option = "yes" == params.get("change_iface_options", "no") update_device = "yes" == params.get("update_iface_device", "no") additional_guest = "yes" == params.get("additional_guest", "no") serial_login = "******" == params.get("serial_login", "no") rm_vhost_driver = "yes" == params.get("rm_vhost_driver", "no") test_option_cmd = "yes" == params.get( "test_iface_option_cmd", "no") test_option_xml = "yes" == params.get( "test_iface_option_xml", "no") test_vhost_net = "yes" == params.get( "test_vhost_net", "no") test_option_offloads = "yes" == params.get( "test_option_offloads", "no") test_iface_user = "******" == params.get( "test_iface_user", "no") test_iface_mcast = "yes" == params.get( "test_iface_mcast", "no") test_libvirtd = "yes" == params.get("test_libvirtd", "no") test_guest_ip = "yes" == params.get("test_guest_ip", "no") test_backend = "yes" == params.get("test_backend", "no") check_guest_trans = "yes" == params.get("check_guest_trans", "no") if iface_driver_host or iface_driver_guest or test_backend: if not libvirt_version.version_compare(1, 2, 8): test.cancel("Offloading/backend options not " "supported in this libvirt version") if iface_driver and "queues" in ast.literal_eval(iface_driver): if not libvirt_version.version_compare(1, 0, 6): test.cancel("Queues options not supported" " in this libvirt version") if unprivileged_user: if not libvirt_version.version_compare(1, 1, 1): test.cancel("qemu-bridge-helper not supported" " on this host") virsh_dargs["unprivileged_user"] = unprivileged_user # Create unprivileged user if needed cmd = ("grep {0} /etc/passwd || " "useradd {0}".format(unprivileged_user)) process.run(cmd, shell=True) # Need another disk image for unprivileged user to access dst_disk = "/tmp/%s.img" % unprivileged_user # Destroy VM first if vm.is_alive(): vm.destroy(gracefully=False) # Back up xml file. vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) iface_mac_old = vm_xml.VMXML.get_first_mac_by_name(vm_name) # iface_mac will update if attach a new interface iface_mac = iface_mac_old # Additional vm for test additional_vm = None libvirtd = utils_libvirtd.Libvirtd() try: # Build the xml and run test. try: # Prepare interface backend files if test_backend: if not os.path.exists("/dev/vhost-net"): process.run("modprobe vhost-net", shell=True) backend = ast.literal_eval(iface_backend) backend_tap = "/dev/net/tun" backend_vhost = "/dev/vhost-net" if not backend: backend["tap"] = backend_tap backend["vhost"] = backend_vhost if not start_error: # Create backend files for normal test if not os.path.exists(backend["tap"]): os.rename(backend_tap, backend["tap"]) if not os.path.exists(backend["vhost"]): os.rename(backend_vhost, backend["vhost"]) # Edit the interface xml. if change_option: modify_iface_xml(update=False) if define_error: return if rm_vhost_driver: # remove vhost driver on host and # the character file /dev/vhost-net cmd = ("modprobe -r {0}; " "rm -f /dev/vhost-net".format("vhost_net")) if process.system(cmd, ignore_status=True, shell=True): test.error("Failed to remove vhost_net driver") else: # Load vhost_net driver by default cmd = "modprobe vhost_net" process.system(cmd, shell=True) # Attach a interface when vm is shutoff if attach_device == 'config': iface_mac = utils_net.generate_mac_address_simple() iface_xml_obj = create_iface_xml(iface_mac) iface_xml_obj.xmltreefile.write() ret = virsh.attach_device(vm_name, iface_xml_obj.xml, flagstr="--config", ignore_status=True) libvirt.check_exit_status(ret) # Clone additional vm if additional_guest: guest_name = "%s_%s" % (vm_name, '1') # Clone additional guest timeout = params.get("clone_timeout", 360) utils_libguestfs.virt_clone_cmd(vm_name, guest_name, True, timeout=timeout) additional_vm = vm.clone(guest_name) additional_vm.start() # additional_vm.wait_for_login() # Start the VM. if unprivileged_user: virsh.start(vm_name, **virsh_dargs) cmd = ("su - %s -c 'virsh console %s'" % (unprivileged_user, vm_name)) session = aexpect.ShellSession(cmd) session.sendline() remote.handle_prompts(session, params.get("username"), params.get("password"), r"[\#\$]\s*$", 30) # Get ip address on guest if not get_guest_ip(session, iface_mac): test.error("Can't get ip address on guest") else: # Will raise VMStartError exception if start fails vm.start() if serial_login: session = vm.wait_for_serial_login() else: session = vm.wait_for_login() if start_error: test.fail("VM started unexpectedly") # Attach a interface when vm is running if attach_device == 'live': iface_mac = utils_net.generate_mac_address_simple() iface_xml_obj = create_iface_xml(iface_mac) iface_xml_obj.xmltreefile.write() ret = virsh.attach_device(vm_name, iface_xml_obj.xml, flagstr="--live", ignore_status=True, debug=True) libvirt.check_exit_status(ret, status_error) # Need sleep here for attachment take effect time.sleep(5) # Update a interface options if update_device: modify_iface_xml(update=True, status_error=status_error) # Run tests for qemu-kvm command line options if test_option_cmd: run_cmdline_test(iface_mac) # Run tests for vm xml if test_option_xml: run_xml_test(iface_mac) # Run tests for offloads options if test_option_offloads: if iface_driver_host: ifname_guest = utils_net.get_linux_ifname( session, iface_mac) check_offloads_option( ifname_guest, ast.literal_eval( iface_driver_host), session) if iface_driver_guest: ifname_host = libvirt.get_ifname_host(vm_name, iface_mac) check_offloads_option( ifname_host, ast.literal_eval(iface_driver_guest)) if test_iface_user: # Test user type network check_user_network(session) if test_iface_mcast: # Test mcast type network check_mcast_network(session) # Check guest ip address if test_guest_ip: if not get_guest_ip(session, iface_mac): test.fail("Guest can't get a" " valid ip address") # Check guest RX/TX ring if check_guest_trans: ifname_guest = utils_net.get_linux_ifname(session, iface_mac) ret, outp = session.cmd_status_output("ethtool -g %s" % ifname_guest) if ret: test.fail("ethtool return error code") logging.info("ethtool output is %s", outp) driver_dict = ast.literal_eval(iface_driver) if expect_tx_size: driver_dict['tx_queue_size'] = expect_tx_size for outp_p in outp.split("Current hardware"): if 'rx_queue_size' in driver_dict: if re.search("RX:\s*%s" % driver_dict['rx_queue_size'], outp_p): logging.info("Find RX setting RX:%s by ethtool", driver_dict['rx_queue_size']) else: test.fail("Cannot find matching rx setting") if 'tx_queue_size' in driver_dict: if re.search("TX:\s*%s" % driver_dict['tx_queue_size'], outp_p): logging.info("Find TX settint TX:%s by ethtool", driver_dict['tx_queue_size']) else: test.fail("Cannot find matching tx setting") session.close() # Restart libvirtd and guest, then test again if test_libvirtd: libvirtd.restart() vm.destroy() vm.start() if test_option_xml: run_xml_test(iface_mac) # Detach hot/cold-plugged interface at last if attach_device and not status_error: ret = virsh.detach_device(vm_name, iface_xml_obj.xml, flagstr="", ignore_status=True) libvirt.check_exit_status(ret) except virt_vm.VMStartError as e: logging.info(str(e)) if not start_error: test.fail('VM failed to start\n%s' % e) finally: # Recover VM. logging.info("Restoring vm...") # Restore interface backend files if test_backend: if not os.path.exists(backend_tap): os.rename(backend["tap"], backend_tap) if not os.path.exists(backend_vhost): os.rename(backend["vhost"], backend_vhost) if rm_vhost_driver: # Restore vhost_net driver process.system("modprobe vhost_net", shell=True) if unprivileged_user: virsh.remove_domain(vm_name, "--remove-all-storage", **virsh_dargs) if additional_vm: virsh.remove_domain(additional_vm.name, "--remove-all-storage") # Kill all omping server process on host process.system("pidof omping && killall omping", ignore_status=True, shell=True) if vm.is_alive(): vm.destroy(gracefully=False) vmxml_backup.sync()
logging.info('Set env %s=%s' % (virtio_win_env, iso_path)) os.environ[virtio_win_env] = iso_path if checkpoint == 'cdrom': xml = vm_xml.VMXML.new_from_inactive_dumpxml( vm_name, virsh_instance=virsh_instance) logging.debug(xml.xmltreefile) disks = xml.get_disk_all() logging.debug('Disks: %r', disks) for disk in disks.values(): # Check if vm has cdrom attached if disk.get('device') == 'cdrom' and disk.find('source') is None: test.error('No CDROM image attached') if checkpoint == 'vdsm': extra_pkg = params.get('extra_pkg') logging.info('Install %s', extra_pkg) utils_package.package_install(extra_pkg.split(',')) # Backup conf file for recovery for conf in params['bk_conf'].strip().split(','): logging.debug('Back up %s', conf) shutil.copyfile(conf, conf + '.bk') logging.info('Configure libvirt for vdsm') process.run('vdsm-tool configure --force') logging.info('Start vdsm service') service_manager = service.Factory.create_generic_service() service_manager.start('vdsmd') # Setup user and password user_pwd = "[['%s', '%s']]" % (params.get("sasl_user"),
def run(test, params, env): """ libvirt smt test: 1) prepare the guest with given topology 2) Start and login to the guest 3) Check for ppc64_cpu --smt and smt should be on 4) ppc64_cpu --smt=off and smt should be off 5) ppc64_cpu --smt=on and smt should be on 6) Check for core present using ppc64_cpu 7) Check for online core using ppc64_cpu 8) Check for lscpu for thread, core, socket info updated properly 9) Change the number of cores and check in lscpu :param test: QEMU test object :param params: Dictionary with the test parameters :param env: Dictionary with test environment. """ error_count = 0 def smt_check(vm, cmd, output, extra=None, ignorestatus=False): """ Run and check SMT command inside guest :param vm: VM object :param cmd: Given smt command :param output: Expected output :param extra: Extra output to be added :param ignorestatus: True or False to ignore status :return: error count """ err_count = 0 session = vm.wait_for_login() actual_output = session.cmd_output(cmd).strip() return_output = session.cmd_output('echo $?').strip() if extra: expected_output = output + extra else: expected_output = output if expected_output != actual_output: logging.error("Command: %s failed\nActual output: %s\nExpected " "output: %s", cmd, actual_output, expected_output) if int(return_output) == 0 and not ignorestatus: logging.error("Command: %s returned zero" "\n Expecting a non zero number", cmd) err_count = 1 else: if int(return_output) != 0 and not ignorestatus: logging.error("Command: %s returned non-zero" "\n Expecting zero", cmd) err_count += 1 else: logging.debug("Command: %s ran successfully", cmd) session.close() return err_count def cpus_info(vm, env="guest"): """ To get host cores, threads, sockets in the system :param vm: VM object :param env: guest or host :return: cpu sockets, cores, threads info as list """ if "guest" in env: session = vm.wait_for_login() output = session.cmd_output("lscpu") else: output = process.system_output("lscpu", shell=True) no_cpus = int(re.findall('CPU\(s\):\s*(\d+)', str(output))[0]) no_threads = int(re.findall('Thread\(s\)\sper\score:\s*(\d+)', str(output))[0]) no_cores = int(re.findall('Core\(s\)\sper\ssocket:\s*(\d+)', str(output))[0]) no_sockets = int(re.findall('Socket\(s\):\s*(\d+)', str(output))[0]) cpu_info = [no_cpus, no_threads, no_cores, no_sockets] if "guest" in env: session.close() return cpu_info vm_name = params.get("main_vm") smt_chk_cmd = params.get("smt_chk_cmd", "ppc64_cpu --smt") smt_on_cmd = params.get("smt_on_cmd", "ppc64_cpu --smt=on") smt_off_cmd = params.get("smt_off_cmd", "ppc64_cpu --smt=off") smt_core_pst_cmd = params.get("smt_core_present_cmd", "ppc64_cpu --cores-present") smt_core_on_cmd = params.get("smt_core_on_cmd", "ppc64_cpu --cores-on") smt_chk_on_output = params.get("smt_chk_on_output", "SMT is on") smt_chk_off_output = params.get("smt_chk_off_output", "SMT is off") smt_core_pst_output = params.get("smt_core_pst_output", "Number of cores present =") smt_core_on_output = params.get("smt_core_on_output", "Number of cores online =") smt_threads_per_core_cmd = params.get("smt_threads_per_core_cmd", "ppc64_cpu --threads-per-core") smt_threads_per_core_output = params.get("smt_threads_per_core_ouput", "Threads per core:") status_error = params.get("status_error", "no") == "yes" ignore_status = params.get("ignore_status", "no") == "yes" smt_number = params.get("smt_number", None) max_vcpu = current_vcpu = int(params.get("smp", 8)) vm_cores = int(params.get("smt_vcpu_cores", 8)) vm_threads = int(params.get("smt_vcpu_threads", 1)) vm_sockets = int(params.get("smt_vcpu_sockets", 1)) vm = env.get_vm(vm_name) output = process.system_output(smt_threads_per_core_cmd, shell=True) try: host_threads = int(re.findall('Threads per core:\s+(\d+)', output)[0]) except: test.cancel("Unable to get the host threads") if vm_threads > host_threads and not status_error: test.cancel("Host threads is less than requested guest threads") logging.info("Guest: cores:%d, threads:%d, sockets:%d", vm_cores, vm_threads, vm_sockets) try: vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) org_xml = vmxml.copy() # try installing powerpc-utils in guest if not skip try: session = vm.wait_for_login() utils_package.package_install(["powerpc-utils"], session, 360) session.close() except: test.cancel("Unable to install powerpc-utils package in guest") # Initial Setup of vm vmxml.set_vm_vcpus(vm_name, max_vcpu, current_vcpu, vm_sockets, vm_cores, vm_threads) if not vm.is_alive(): vm.start() # Changing the smt number if smt_number: smt_chk_cmd_mod = "%s=%s" % (smt_chk_cmd, smt_number) error_count += smt_check(vm, smt_chk_cmd_mod, "") guest_cpu_details = cpus_info(vm) # Step 10: Check for threads, cores, sockets if vm_cores != guest_cpu_details[2]: logging.error("Number of cores mismatch:\nExpected number of " "cores: %s\nActual number of cores: %s", vm_cores, guest_cpu_details[2]) error_count += 1 if smt_number: threads = int(smt_number) else: threads = vm_threads if threads != guest_cpu_details[1]: logging.error("Number of threads mismatch:\nExpected number of " "threads: %s\nActual number of threads: %s", threads, guest_cpu_details[1]) error_count += 1 if vm_sockets != guest_cpu_details[3]: logging.error("Number of sockets mismatch:\nExpected number of " "sockets: %s\nActual number of sockets: %s", vm_sockets, guest_cpu_details[3]) error_count += 1 error_count += smt_check(vm, smt_chk_cmd, smt_chk_on_output, ignorestatus=ignore_status) session = vm.wait_for_login() session.cmd_output(smt_off_cmd) session.close() error_count += smt_check(vm, smt_chk_cmd, smt_chk_off_output, ignorestatus=ignore_status) cores = vm_cores * vm_sockets extra = " %s" % cores error_count += smt_check(vm, smt_core_pst_cmd, smt_core_pst_output, extra) extra = " %s" % cores error_count += smt_check(vm, smt_core_on_cmd, smt_core_on_output, extra) extra = " %s" % vm_threads error_count += smt_check(vm, smt_threads_per_core_cmd, smt_threads_per_core_output, extra) # Changing the cores cores -= 1 while cores > 1: smt_core_on_cmd_mod = "%s=%s" % (smt_core_on_cmd, cores) error_count += smt_check(vm, smt_core_on_cmd_mod, "") extra = " %s" % cores error_count += smt_check(vm, smt_core_on_cmd, smt_core_on_output, extra) guest_cpu_details = cpus_info(vm) if cores != (guest_cpu_details[3] * guest_cpu_details[2]): logging.error("The core changes through command: %s not " "reflected in lscpu output", smt_core_on_cmd_mod) error_count += 1 cores -= 1 # wait for sometime before next change of cores time.sleep(5) if error_count > 0: test.fail("The SMT feature has issue, please consult " "previous errors more details") finally: org_xml.sync()
def setup_test_env(params, test): """ Prepare test env for OVMF, Seabios, Gluster, Ceph and download the testing image :param params: Avocado params object :param test: Avocado test object """ boot_type = params.get("boot_type", "seabios") source_protocol = params.get("source_protocol", "") boot_dev = params.get("boot_dev", "hd") boot_iso_url = params.get("boot_iso_url", "EXAMPLE_BOOT_ISO_URL") boot_iso_file = os.path.join(data_dir.get_tmp_dir(), "boot.iso") non_release_os_url = params.get("non_release_os_url", "") download_file_path = os.path.join(data_dir.get_tmp_dir(), "non_released_os.qcow2") mon_host = params.get("mon_host") disk_src_name = params.get("disk_source_name") disk_src_host = params.get("disk_source_host") disk_src_port = params.get("disk_source_port") client_name = params.get("client_name") client_key = params.get("client_key") key_file = os.path.join(data_dir.get_tmp_dir(), "ceph.key") global cleanup_iso_file global cleanup_image_file os_version = params.get("os_version") if not os_version.count("EXAMPLE"): os_version = os_version.split(".")[0] if boot_type == "ovmf": if not libvirt_version.version_compare(2, 0, 0): test.error("OVMF doesn't support in current" " libvirt version.") if not utils_package.package_install('OVMF'): test.error("OVMF package install failed") if os_version == "RHEL-7" and not \ utils_package.package_install('qemu-kvm-rhev'): test.error("qemu-kvm-rhev package install failed") elif not utils_package.package_install('qemu-kvm'): test.error("qemu-kvm package install failed") if boot_type == "seabios" and \ not utils_package.package_install('seabios-bin'): test.error("seabios package install failed") if source_protocol == "gluster" and \ not utils_package.package_install('glusterfs-server'): test.error("glusterfs-server install failed") if source_protocol == "rbd": if utils_package.package_install("ceph-common"): if disk_src_host.count("EXAMPLE") or \ disk_src_port.count("EXAMPLE") or \ disk_src_name.count("EXAMPLE") or \ mon_host.count("EXAMPLE") or \ client_name.count("EXAMPLE") or \ client_key.count("EXAMPLE"): test.cancel("Please provide access info of the ceph") with open(key_file, 'w') as f: f.write("[%s]\n\tkey = %s\n" % (client_name, client_key)) key_opt = "--keyring %s" % key_file # Delete the disk if it exists cmd = ("rbd -m {0} {1} info {2} && rbd -m {0} {1} rm " "{2}".format(mon_host, key_opt, disk_src_name)) process.run(cmd, ignore_status=True, shell=True) else: test.error("ceph-common install failed") if boot_dev == "cdrom": if download_file(boot_iso_url, boot_iso_file, test): cleanup_iso_file = True if non_release_os_url: if download_file(non_release_os_url, download_file_path, test): cleanup_image_file = True
def run(test, params, env): """ Test start domain with nwfilter rules. 1) Prepare parameters. 2) Prepare nwfilter rule and update domain interface to apply. 3) Start domain and check rule. 4) Clean env """ # Prepare parameters filter_name = params.get("filter_name", "testcase") exist_filter = params.get("exist_filter", "no-mac-spoofing") check_cmd = params.get("check_cmd") expect_match = params.get("expect_match") status_error = "yes" == params.get("status_error", "no") mount_noexec_tmp = "yes" == params.get("mount_noexec_tmp", "no") kill_libvirtd = "yes" == params.get("kill_libvirtd", "no") bug_url = params.get("bug_url", "") ipset_command = params.get("ipset_command") vm_name = params.get("main_vm") vm = env.get_vm(vm_name) username = params.get("username") password = params.get("password") # Prepare vm filterref parameters dict list filter_param_list = [] params_key = [] for i in params.keys(): if 'parameter_name_' in i: params_key.append(i) params_key.sort() for i in range(len(params_key)): params_dict = {} params_dict['name'] = params[params_key[i]] params_dict['value'] = params['parameter_value_%s' % i] filter_param_list.append(params_dict) filterref_dict = {} filterref_dict['name'] = filter_name filterref_dict['parameters'] = filter_param_list # backup vm xml vmxml_backup = libvirt_xml.VMXML.new_from_inactive_dumpxml(vm_name) libvirtd = utils_libvirtd.Libvirtd() device_name = None try: rule = params.get("rule") if rule: # Create new filter xml filterxml = utlv.create_nwfilter_xml(params) # Define filter xml virsh.nwfilter_define(filterxml.xml, debug=True) # Update first vm interface with filter vmxml = libvirt_xml.VMXML.new_from_inactive_dumpxml(vm_name) iface_xml = vmxml.get_devices('interface')[0] vmxml.del_device(iface_xml) new_iface = interface.Interface('network') new_iface.xml = iface_xml.xml new_filterref = new_iface.new_filterref(**filterref_dict) new_iface.filterref = new_filterref logging.debug("new interface xml is: %s" % new_iface) vmxml.add_device(new_iface) vmxml.sync() if mount_noexec_tmp: device_name = utlv.setup_or_cleanup_iscsi(is_setup=True) utlv.mkfs(device_name, 'ext4') cmd = "mount %s /tmp -o noexec,nosuid" % device_name process.run(cmd, shell=True) if ipset_command: pkg = "ipset" if not utils_package.package_install(pkg): test.cancel("Can't install ipset on host") process.run(ipset_command, shell=True) # Run command try: vm.start() if not mount_noexec_tmp: vm.wait_for_serial_login(username=username, password=password) vmxml = libvirt_xml.VMXML.new_from_dumpxml(vm_name) iface_xml = vmxml.get_devices('interface')[0] iface_target = iface_xml.target['dev'] logging.debug("iface target dev name is %s", iface_target) # Check iptables or ebtables on host if check_cmd: if "DEVNAME" in check_cmd: check_cmd = check_cmd.replace("DEVNAME", iface_target) ret = utils_misc.wait_for(lambda: not process.system(check_cmd, ignore_status=True, shell=True), timeout=30) if not ret: test.fail("Rum command '%s' failed" % check_cmd) out = to_text(process.system_output(check_cmd, ignore_status=False, shell=True)) if expect_match and not re.search(expect_match, out): test.fail("'%s' not found in output: %s" % (expect_match, out)) except virt_vm.VMStartError as e: # Starting VM failed. if not status_error: test.fail("Test failed in positive case.\n error:" " %s\n%s" % (e, bug_url)) if kill_libvirtd: cmd = "kill -s TERM `pidof libvirtd`" process.run(cmd, shell=True) ret = utils_misc.wait_for(lambda: not libvirtd.is_running(), timeout=30) if not ret: test.fail("Failed to kill libvirtd. %s" % bug_url) finally: if kill_libvirtd: libvirtd.restart() # Clean env if vm.is_alive(): vm.destroy(gracefully=False) # Recover xml of vm. vmxml_backup.sync() # Undefine created filter if filter_name != exist_filter: virsh.nwfilter_undefine(filter_name, debug=True) if mount_noexec_tmp: if device_name: process.run("umount -l %s" % device_name, ignore_status=True, shell=True) utlv.setup_or_cleanup_iscsi(is_setup=False) if ipset_command: process.run("ipset destroy blacklist", shell=True)
def run(test, params, env): """ Test command: virsh dump. This command can dump the core of a domain to a file for analysis. 1. Positive testing 1.1 Dump domain with valid options. 1.2 Avoid file system cache when dumping. 1.3 Compress the dump images to valid/invalid formats. 2. Negative testing 2.1 Dump domain to a non-exist directory. 2.2 Dump domain with invalid option. 2.3 Dump a shut-off domain. """ vm_name = params.get("main_vm", "vm1") vm = env.get_vm(vm_name) options = params.get("dump_options") dump_file = params.get("dump_file", "vm.core") dump_dir = params.get("dump_dir", data_dir.get_tmp_dir()) if os.path.dirname(dump_file) is "": dump_file = os.path.join(dump_dir, dump_file) dump_image_format = params.get("dump_image_format") start_vm = params.get("start_vm") == "yes" paused_after_start_vm = params.get("paused_after_start_vm") == "yes" status_error = params.get("status_error", "no") == "yes" check_bypass_timeout = int(params.get("check_bypass_timeout", "120")) memory_dump_format = params.get("memory_dump_format", "") uri = params.get("virsh_uri") unprivileged_user = params.get('unprivileged_user') if unprivileged_user: if unprivileged_user.count('EXAMPLE'): unprivileged_user = '******' if not libvirt_version.version_compare(1, 1, 1): if params.get('setup_libvirt_polkit') == 'yes': test.cancel("API acl test not supported in current" " libvirt version.") def check_flag(file_flags): """ Check if file flag include O_DIRECT. :param file_flags: The flags of dumped file Note, O_DIRECT(direct disk access hint) is defined as: on x86_64: #define O_DIRECT 00040000 on ppc64le or arch64: #define O_DIRECT 00200000 """ arch = platform.machine() file_flag_check = int('00040000', 16) if 'ppc64' in arch or 'aarch64' in arch: file_flag_check = int('00200000', 16) if int(file_flags, 16) & file_flag_check == file_flag_check: logging.info("File flags include O_DIRECT") return True else: logging.error("File flags doesn't include O_DIRECT") return False def check_bypass(dump_file, result_dict): """ Get the file flags of domain core dump file and check it. """ error = '' cmd1 = "lsof -w %s" % dump_file while True: if not os.path.exists(dump_file) or process.system(cmd1): time.sleep(0.1) continue cmd2 = ("cat /proc/$(%s |awk '/libvirt_i/{print $2}')/fdinfo/1" "|grep flags|awk '{print $NF}'" % cmd1) ret = process.run(cmd2, allow_output_check='combined', shell=True) status, output = ret.exit_status, ret.stdout_text.strip() if status: error = "Fail to get the flags of dumped file" logging.error(error) break if not len(output): continue try: logging.debug("The flag of dumped file: %s", output) if check_flag(output): logging.info("Bypass file system cache " "successfully when dumping") break else: error = "Bypass file system cache fail when dumping" logging.error(error) break except (ValueError, IndexError) as detail: error = detail logging.error(error) break result_dict['bypass'] = error def check_domstate(actual, options): """ Check the domain status according to dump options. """ if options.find('live') >= 0: domstate = "running" if options.find('crash') >= 0 or options.find('reset') > 0: domstate = "running" if paused_after_start_vm: domstate = "paused" elif options.find('crash') >= 0: domstate = "shut off" if options.find('reset') >= 0: domstate = "running" elif options.find('reset') >= 0: domstate = "running" if paused_after_start_vm: domstate = "paused" else: domstate = "running" if paused_after_start_vm: domstate = "paused" if not start_vm: domstate = "shut off" logging.debug("Domain should %s after run dump %s", domstate, options) return (domstate == actual) def check_dump_format(dump_image_format, dump_file): """ Check the format of dumped file. If 'dump_image_format' is not specified or invalid in qemu.conf, then the file shoule be normal raw file, otherwise it shoud be compress to specified format, the supported compress format including: lzop, gzip, bzip2, and xz. For memory-only dump, the default dump format is ELF, and it can also specify format by --format option, the result could be 'elf' or 'data'. """ valid_format = ["lzop", "gzip", "bzip2", "xz", 'elf', 'data'] if len(dump_image_format) == 0 or dump_image_format not in valid_format: logging.debug("No need check the dumped file format") return True else: file_cmd = "file %s" % dump_file ret = process.run(file_cmd, allow_output_check='combined', shell=True) status, output = ret.exit_status, ret.stdout_text.strip() if status: logging.error("Fail to check dumped file %s", dump_file) return False logging.debug("Run file %s output: %s", dump_file, output) actual_format = output.split(" ")[1] if actual_format.lower() != dump_image_format.lower(): logging.error("Compress dumped file to %s fail: %s" % (dump_image_format, actual_format)) return False else: return True # Configure dump_image_format in /etc/libvirt/qemu.conf. qemu_config = utils_config.LibvirtQemuConfig() libvirtd = utils_libvirtd.Libvirtd() # Install lsof pkg if not installed if not utils_package.package_install("lsof"): test.cancel("Failed to install lsof in host\n") if len(dump_image_format): qemu_config.dump_image_format = dump_image_format libvirtd.restart() # Deal with memory-only dump format if len(memory_dump_format): # Make sure libvirt support this option if virsh.has_command_help_match("dump", "--format") is None: test.cancel("Current libvirt version doesn't support" " --format option for dump command") # Make sure QEMU support this format query_cmd = '{"execute":"query-dump-guest-memory-capability"}' qemu_capa = virsh.qemu_monitor_command(vm_name, query_cmd).stdout if (memory_dump_format not in qemu_capa) and not status_error: test.cancel("Unsupported dump format '%s' for" " this QEMU binary" % memory_dump_format) options += " --format %s" % memory_dump_format if memory_dump_format == 'elf': dump_image_format = 'elf' if memory_dump_format in ['kdump-zlib', 'kdump-lzo', 'kdump-snappy']: dump_image_format = 'data' # Back up xml file vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) backup_xml = vmxml.copy() dump_guest_core = params.get("dump_guest_core", "") if dump_guest_core not in ["", "on", "off"]: test.error("invalid dumpCore value: %s" % dump_guest_core) try: # Set dumpCore in guest xml if dump_guest_core: if vm.is_alive(): vm.destroy(gracefully=False) vmxml.dumpcore = dump_guest_core vmxml.sync() vm.start() # check qemu-kvm cmdline vm_pid = vm.get_pid() cmd = "cat /proc/%d/cmdline|xargs -0 echo" % vm_pid cmd += "|grep dump-guest-core=%s" % dump_guest_core result = process.run(cmd, ignore_status=True, shell=True) logging.debug("cmdline: %s" % result.stdout_text) if result.exit_status: test.fail("Not find dump-guest-core=%s in qemu cmdline" % dump_guest_core) else: logging.info("Find dump-guest-core=%s in qemum cmdline", dump_guest_core) # Deal with bypass-cache option if options.find('bypass-cache') >= 0: vm.wait_for_login() result_dict = multiprocessing.Manager().dict() child_process = multiprocessing.Process(target=check_bypass, args=(dump_file, result_dict)) child_process.start() # Run virsh command cmd_result = virsh.dump(vm_name, dump_file, options, unprivileged_user=unprivileged_user, uri=uri, ignore_status=True, debug=True) status = cmd_result.exit_status if 'child_process' in locals(): child_process.join(timeout=check_bypass_timeout) params['bypass'] = result_dict['bypass'] logging.info("Start check result") if not check_domstate(vm.state(), options): test.fail("Domain status check fail.") if status_error: if not status: test.fail("Expect fail, but run successfully") else: if status: test.fail("Expect succeed, but run fail") if not os.path.exists(dump_file): test.fail("Fail to find domain dumped file.") if check_dump_format(dump_image_format, dump_file): logging.info("Successfully dump domain to %s", dump_file) else: test.fail("The format of dumped file is wrong.") if params.get('bypass'): test.fail(params['bypass']) finally: backup_xml.sync() qemu_config.restore() libvirtd.restart() if os.path.isfile(dump_file): os.remove(dump_file)
def run(test, params, env): """ KVM migration test: 1) Get a live VM and clone it. 2) Verify that the source VM supports migration. If it does, proceed with the test. 3) Send a migration command to the source VM and wait until it's finished. 4) Kill off the source VM. 3) Log into the destination VM after the migration is finished. 4) Compare the output of a reference command executed on the source with the output of the same command on the destination machine. :param test: QEMU test object. :param params: Dictionary with test parameters. :param env: Dictionary with the test environment. """ def guest_stress_start(guest_stress_test): """ Start a stress test in guest, Could be 'iozone', 'dd', 'stress' :param type: type of stress test. """ from generic.tests import autotest_control timeout = 0 if guest_stress_test == "autotest": test_type = params.get("test_type") func = autotest_control.run new_params = params.copy() new_params["test_control_file"] = "%s.control" % test_type args = (test, new_params, env) timeout = 60 elif guest_stress_test == "dd": vm = env.get_vm(env, params.get("main_vm")) vm.verify_alive() session = vm.wait_for_login(timeout=login_timeout) func = session.cmd_output args = ("for((;;)) do dd if=/dev/zero of=/tmp/test bs=5M " "count=100; rm -f /tmp/test; done", login_timeout, logging.info) logging.info("Start %s test in guest", guest_stress_test) bg = utils_test.BackgroundTest(func, args) params["guest_stress_test_pid"] = bg bg.start() if timeout: logging.info("sleep %ds waiting guest test start.", timeout) time.sleep(timeout) if not bg.is_alive(): test.fail("Failed to start guest test!") def guest_stress_deamon(): """ This deamon will keep watch the status of stress in guest. If the stress program is finished before migration this will restart it. """ while True: bg = params.get("guest_stress_test_pid") action = params.get("action") if action == "run": logging.debug("Check if guest stress is still running") guest_stress_test = params.get("guest_stress_test") if bg and not bg.is_alive(): logging.debug("Stress process finished, restart it") guest_stress_start(guest_stress_test) time.sleep(30) else: logging.debug("Stress still on") else: if bg and bg.is_alive(): try: stress_stop_cmd = params.get("stress_stop_cmd") vm = env.get_vm(env, params.get("main_vm")) vm.verify_alive() session = vm.wait_for_login() if stress_stop_cmd: logging.warn("Killing background stress process " "with cmd '%s', you would see some " "error message in client test result," "it's harmless.", stress_stop_cmd) session.cmd(stress_stop_cmd) bg.join(10) except Exception: pass break time.sleep(10) def get_functions(func_names, locals_dict): """ Find sub function(s) in this function with the given name(s). """ if not func_names: return [] funcs = [] for f in func_names.split(): f = locals_dict.get(f) if isinstance(f, types.FunctionType): funcs.append(f) return funcs def mig_set_speed(): mig_speed = params.get("mig_speed", "1G") return vm.monitor.migrate_set_speed(mig_speed) def check_dma(): dmesg_pattern = params.get("dmesg_pattern", "ata.*?configured for PIO") dma_pattern = params.get("dma_pattern", r"DMA.*?\(\?\)$") pio_pattern = params.get("pio_pattern", r"PIO.*?pio\d+\s+$") hdparm_cmd = params.get("hdparm_cmd", "i=`ls /dev/[shv]da` ; hdparm -I $i") session_dma = vm.wait_for_login() hdparm_output = session_dma.cmd_output(hdparm_cmd) failed_msg = "" if not re.search(dma_pattern, hdparm_output, re.M): failed_msg += "Failed in DMA check from hdparm output.\n" if not re.search(pio_pattern, hdparm_output, re.M): failed_msg += "Failed in PIO check from hdparm output.\n" if failed_msg: failed_msg += "hdparm output is: %s\n" % hdparm_output dmesg = session_dma.cmd_output("dmesg") if not re.search(dmesg_pattern, dmesg): failed_msg += "Failed in dmesg check.\n" failed_msg += " dmesg from guest is: %s\n" % dmesg if failed_msg: test.fail(failed_msg) login_timeout = int(params.get("login_timeout", 360)) mig_timeout = float(params.get("mig_timeout", "3600")) mig_protocol = params.get("migration_protocol", "tcp") mig_cancel_delay = int(params.get("mig_cancel") == "yes") * 2 mig_exec_cmd_src = params.get("migration_exec_cmd_src") mig_exec_cmd_dst = params.get("migration_exec_cmd_dst") if mig_exec_cmd_src and "gzip" in mig_exec_cmd_src: mig_exec_file = params.get("migration_exec_file", "/var/tmp/exec") mig_exec_file += "-%s" % utils_misc.generate_random_string(8) mig_exec_cmd_src = mig_exec_cmd_src % mig_exec_file mig_exec_cmd_dst = mig_exec_cmd_dst % mig_exec_file offline = params.get("offline", "no") == "yes" check = params.get("vmstate_check", "no") == "yes" living_guest_os = params.get("migration_living_guest", "yes") == "yes" deamon_thread = None vm = env.get_vm(params["main_vm"]) vm.verify_alive() if living_guest_os: session = vm.wait_for_login(timeout=login_timeout) # Get the output of migration_test_command test_command = params.get("migration_test_command") reference_output = session.cmd_output(test_command) # Start some process in the background (and leave the session open) background_command = params.get("migration_bg_command", "") # check whether tcpdump is installed if "tcpdump" in background_command: if not utils_package.package_install("tcpdump", session): test.cancel("Please install tcpdump to proceed") session.sendline(background_command) time.sleep(5) # Start another session with the guest and make sure the background # process is running session2 = vm.wait_for_login(timeout=login_timeout) try: check_command = params.get("migration_bg_check_command", "") error_context.context("Checking the background command in the " "guest pre migration", logging.info) if session2.cmd_status(check_command, timeout=30) != 0: test.error("migration bg check command failed") session2.close() # run some functions before migrate start. pre_migrate = get_functions(params.get("pre_migrate"), locals()) for func in pre_migrate: func() # Start stress test in guest. guest_stress_test = params.get("guest_stress_test") if guest_stress_test: guest_stress_start(guest_stress_test) params["action"] = "run" deamon_thread = utils_test.BackgroundTest( guest_stress_deamon, ()) deamon_thread.start() capabilities = ast.literal_eval(params.get("migrate_capabilities", "{}")) inner_funcs = ast.literal_eval(params.get("migrate_inner_funcs", "[]")) mig_parameters = ast.literal_eval(params.get("migrate_parameters", "None")) target_mig_parameters = params.get("target_migrate_parameters", "None") target_mig_parameters = ast.literal_eval(target_mig_parameters) migrate_parameters = (mig_parameters, target_mig_parameters) # Migrate the VM ping_pong = params.get("ping_pong", 1) for i in range(int(ping_pong)): if i % 2 == 0: logging.info("Round %s ping..." % str(i / 2)) else: logging.info("Round %s pong..." % str(i / 2)) try: vm.migrate(mig_timeout, mig_protocol, mig_cancel_delay, offline, check, migration_exec_cmd_src=mig_exec_cmd_src, migration_exec_cmd_dst=mig_exec_cmd_dst, migrate_capabilities=capabilities, mig_inner_funcs=inner_funcs, env=env, migrate_parameters=migrate_parameters) except qemu_monitor.MonitorNotSupportedMigCapError as e: test.cancel("Unable to access capability: %s" % e) except: raise # Set deamon thread action to stop after migrate params["action"] = "stop" # run some functions after migrate finish. post_migrate = get_functions(params.get("post_migrate"), locals()) for func in post_migrate: func() # Log into the guest again logging.info("Logging into guest after migration...") session2 = vm.wait_for_login(timeout=30) logging.info("Logged in after migration") # Make sure the background process is still running error_context.context("Checking the background command in the " "guest post migration", logging.info) session2.cmd(check_command, timeout=30) # Get the output of migration_test_command output = session2.cmd_output(test_command) # Compare output to reference output if output != reference_output: logging.info("Command output before migration differs from " "command output after migration") logging.info("Command: %s", test_command) logging.info("Output before:" + utils_misc.format_str_for_message(reference_output)) logging.info("Output after:" + utils_misc.format_str_for_message(output)) test.fail("Command '%s' produced different output " "before and after migration" % test_command) finally: # Kill the background process if session2 and session2.is_alive(): bg_kill_cmd = params.get("migration_bg_kill_command", None) ignore_status = params.get("migration_bg_kill_ignore_status", 1) if bg_kill_cmd is not None: try: session2.cmd(bg_kill_cmd) except aexpect.ShellCmdError as details: # If the migration_bg_kill_command rc differs from # ignore_status, it means the migration_bg_command is # no longer alive. Let's ignore the failure here if # that is the case. if not int(details.status) == int(ignore_status): raise except aexpect.ShellTimeoutError: logging.debug("Remote session not responsive, " "shutting down VM %s", vm.name) vm.destroy(gracefully=True) if deamon_thread is not None: # Set deamon thread action to stop after migrate params["action"] = "stop" deamon_thread.join() else: # Just migrate without depending on a living guest OS vm.migrate(mig_timeout, mig_protocol, mig_cancel_delay, offline, check, migration_exec_cmd_src=mig_exec_cmd_src, migration_exec_cmd_dst=mig_exec_cmd_dst, migrate_parameters=migrate_parameters)
def run(test, params, env): """ Test steps: 1) Get the params from params. 2) check the environment 3) Strat the VM and check whether the VM been started successfully 4) Compare the Hugepage memory size to the Guest memory setted. 5) Check the hugepage memory usage. 6) Clean up """ test_type = params.get("test_type", 'normal') tlbfs_enable = 'yes' == params.get("hugetlbfs_enable", 'no') shp_num = int(params.get("static_hugepage_num", 1024)) thp_enable = 'yes' == params.get("trans_hugepage_enable", 'no') mb_enable = 'yes' == params.get("mb_enable", 'yes') delay = int(params.get("delay_time", 10)) # Skip cases early vm_names = [] if test_type == "contrast": vm_names = params.get("vms").split()[:2] if len(vm_names) < 2: raise error.TestNAError("This test requires two VMs") # confirm no VM running allvms = virsh.dom_list('--name').stdout.strip() if allvms != '': raise error.TestNAError("one or more VMs are alive") err_range = float(params.get("mem_error_range", 1.25)) else: vm_names.append(params.get("main_vm")) if test_type == "stress": target_path = params.get("target_path", "/tmp/test.out") elif test_type == "unixbench": unixbench_control_file = params.get("unixbench_controle_file", "unixbench5.control") # backup orignal setting shp_orig_num = utils_memory.get_num_huge_pages() thp_orig_status = utils_memory.get_transparent_hugepage() page_size = utils_memory.get_huge_page_size() # mount/umount hugetlbfs tlbfs_status = utils_misc.is_mounted("hugetlbfs", "/dev/hugepages", "hugetlbfs") if tlbfs_enable is True: if tlbfs_status is not True: utils_misc.mount("hugetlbfs", "/dev/hugepages", "hugetlbfs") else: if tlbfs_status is True: utils_misc.umount("hugetlbfs", "/dev/hugepages", "hugetlbfs") # set static hugepage utils_memory.set_num_huge_pages(shp_num) # enable/disable transparent hugepage if thp_enable: utils_memory.set_transparent_hugepage('always') else: utils_memory.set_transparent_hugepage('never') # set/del memoryBacking tag for vm_name in vm_names: if mb_enable: vm_xml.VMXML.set_memoryBacking_tag(vm_name) else: vm_xml.VMXML.del_memoryBacking_tag(vm_name) utils_libvirtd.libvirtd_restart() non_started_free = utils_memory.get_num_huge_pages_free() vms = [] sessions = [] try: for vm_name in vm_names: # try to start vm and login try: vm = env.get_vm(vm_name) vm.start() except VMError, e: if mb_enable and not tlbfs_enable: # if hugetlbfs not be mounted, # VM start with memoryBacking tag will fail logging.debug(e) else: error_msg = "Test failed in positive case. error: %s\n" % e raise error.TestFail(error_msg) if vm.is_alive() is not True: break vms.append(vm) # try to login and run some program try: session = vm.wait_for_login() except (LoginError, ShellError), e: error_msg = "Test failed in positive case.\n error: %s\n" % e raise error.TestFail(error_msg) sessions.append(session) if test_type == "stress": # prepare file for increasing stress stress_path = prepare_c_file() remote.scp_to_remote(vm.get_address(), 22, 'root', params.get('password'), stress_path, "/tmp/") # Try to install gcc on guest first utils_package.package_install(["gcc"], session, 360) # increasing workload session.cmd("gcc %s -o %s" % (stress_path, target_path)) session.cmd("%s &" % target_path) if test_type == "unixbench": params["main_vm"] = vm_name params["test_control_file"] = unixbench_control_file control_path = os.path.join(test.virtdir, "control", unixbench_control_file) # unixbench test need 'patch' and 'perl' commands installed utils_package.package_install(["patch", "perl"], session, 360) command = utils_test.run_autotest(vm, session, control_path, None, None, params, copy_only=True) session.cmd("%s &" % command, ignore_all_errors=True) # wait for autotest running on vm time.sleep(delay) def _is_unixbench_running(): cmd = "ps -ef | grep perl | grep Run" return not session.cmd_status(cmd) if not utils_misc.wait_for(_is_unixbench_running, timeout=240): raise error.TestNAError("Failed to run unixbench in guest," " please make sure some necessary" " packages are installed in guest," " such as gcc, tar, bzip2") logging.debug("Unixbench test is running in VM")
def run(test, params, env): """ Test send-key command, include all types of codeset and sysrq For normal sendkey test, we create a file to check the command execute by send-key. For sysrq test, check the /var/log/messages and guest status """ if not virsh.has_help_command('send-key'): test.cancel("This version of libvirt does not support the send-key " "test") vm_name = params.get("main_vm", "avocado-vt-vm1") status_error = ("yes" == params.get("status_error", "no")) options = params.get("sendkey_options", "") sysrq_test = ("yes" == params.get("sendkey_sysrq", "no")) sleep_time = int(params.get("sendkey_sleeptime", 2)) readonly = params.get("readonly", False) username = params.get("username") password = params.get("password") create_file = params.get("create_file_name") uri = params.get("virsh_uri") unprivileged_user = params.get('unprivileged_user') if unprivileged_user: if unprivileged_user.count('EXAMPLE'): unprivileged_user = '******' if not libvirt_version.version_compare(1, 1, 1): if params.get('setup_libvirt_polkit') == 'yes': test.cancel("API acl test not supported in current libvirt " "version.") def send_line(send_str): """ send string to guest with send-key and end with Enter """ for send_ch in list(send_str): virsh.sendkey(vm_name, "KEY_%s" % send_ch.upper(), ignore_status=False) virsh.sendkey(vm_name, "KEY_ENTER", ignore_status=False) vm = env.get_vm(vm_name) session = vm.wait_for_login() if sysrq_test: # Is 'rsyslog' installed on guest? It'll be what writes out # to /var/log/messages if not utils_package.package_install("rsyslog", session): test.fail("Fail to install rsyslog, make sure that you have " "usable repo in guest") # clear messages, restart rsyslog, and make sure it's running session.cmd("echo '' > /var/log/messages") session.cmd("service rsyslog restart") ps_stat = session.cmd_status("ps aux |grep rsyslog") if ps_stat != 0: test.fail("rsyslog is not running in guest") # enable sysrq session.cmd("echo 1 > /proc/sys/kernel/sysrq") # make sure the environment is clear if create_file is not None: session.cmd("rm -rf %s" % create_file) try: # wait for tty started tty_stat = "ps aux|grep tty" timeout = 60 while timeout >= 0 and \ session.get_command_status(tty_stat) != 0: time.sleep(1) timeout = timeout - 1 if timeout < 0: test.fail("Can not wait for tty started in 60s") # send user and passwd to guest to login send_line(username) time.sleep(2) send_line(password) time.sleep(2) output = virsh.sendkey(vm_name, options, readonly=readonly, unprivileged_user=unprivileged_user, uri=uri) time.sleep(sleep_time) if output.exit_status != 0: if status_error: logging.info("Failed to sendkey to guest as expected, Error:" "%s.", output.stderr) return else: test.fail("Failed to send key to guest, Error:%s." % output.stderr) elif status_error: test.fail("Expect fail, but succeed indeed.") if create_file is not None: # check if created file exist cmd_ls = "ls %s" % create_file sec_status, sec_output = session.get_command_status_output(cmd_ls) if sec_status == 0: logging.info("Succeed to create file with send key") else: test.fail("Fail to create file with send key, Error:%s" % sec_output) elif sysrq_test: # check /var/log/message info according to different key # Since there's no guarantee when messages will be written # we'll do a check and wait loop for up to 60 seconds timeout = 60 while timeout >= 0: if "KEY_H" in options: get_status = session.cmd_status("cat /var/log/messages|" "grep 'SysRq.*HELP'") elif "KEY_M" in options: get_status = session.cmd_status("cat /var/log/messages|" "grep 'SysRq.*Show Memory'") elif "KEY_T" in options: get_status = session.cmd_status("cat /var/log/messages|" "grep 'SysRq.*Show State'") elif "KEY_B" in options: client_session = vm.wait_for_login() result = virsh.domstate(vm_name, '--reason', ignore_status=True) output = result.stdout.strip() logging.debug("The guest state: %s", output) if not output.count("booted"): get_status = 1 else: get_status = 0 client_session.close() if get_status == 0: timeout = -1 else: session.cmd("echo \"virsh sendkey waiting\" >> /var/log/messages") time.sleep(1) timeout = timeout - 1 if get_status != 0: test.fail("SysRq does not take effect in guest, options is " "%s" % options) else: logging.info("Succeed to send SysRq command") else: test.fail("Test cfg file invalid: either sysrq_params or " "create_file_name must be defined") finally: if create_file is not None: session.cmd("rm -rf %s" % create_file) session.close()
def run(test, params, env): """ Test extended TSEG on Q35 machine types <smm state='on'> <tseg unit='MiB'>48</tseg> </smm> Steps: 1) Edit VM xml for smm or tseg sub element 2) Verify if Guest can boot as expected 3) On i440 machine types, the property does not support. On Q35 machine types, both Seabios and OVMF Guest can bootup """ vm_name = params.get("main_vm", "") vm = env.get_vm(vm_name) smm_state = params.get("smm_state", "off") unit = params.get("tseg_unit") size = params.get("tseg_size") boot_type = params.get("boot_type", "") loader_type = params.get("loader_type") loader = params.get("loader") err_msg = params.get("error_msg", "") vm_arch_name = params.get("vm_arch_name", "x86_64") status_error = ("yes" == params.get("status_error", "no")) if not libvirt_version.version_compare(4, 5, 0): test.cancel("TSEG does not support in " "current libvirt version") if (boot_type == "seabios" and not utils_package.package_install('seabios-bin')): test.cancel("Failed to install Seabios") if (boot_type == 'ovmf' and not utils_package.package_install('OVMF')): test.cancel("Failed to install OVMF") # Back VM XML v_xml_backup = vm_xml.VMXML.new_from_dumpxml(vm_name) v_xml = vm_xml.VMXML.new_from_dumpxml(vm_name) try: # Specify boot loader for OVMF if boot_type == 'ovmf': os_xml = v_xml.os os_xml.loader_type = loader_type os_xml.loader = loader os_xml.loader_readonly = "yes" v_xml.os = os_xml try: features_xml = v_xml.features except xcepts.LibvirtXMLNotFoundError: if vm_arch_name == 'x86_64': # ACPI is required for UEFI on x86_64 v_xml.xmltreefile.create_by_xpath("/features/acpi") features_xml = v_xml.features else: features_xml = vm_xml.VMFeaturesXML() features_xml.smm = smm_state if unit and size: features_xml.smm_tseg_unit = unit features_xml.smm_tseg = size v_xml.features = features_xml logging.debug("New VM XML is:\n%s", v_xml) ret = virsh.define(v_xml.xml) utlv.check_result(ret, expected_fails=err_msg) # Check result if not status_error: vm.start() if unit and size: # If tseg unit is KiB, convert it to MiB # as vm dumpxml convert it automatically if unit == 'KiB': unit, size = unify_to_MiB(unit, size) expect_line = "<tseg unit=\"%s\">%s</tseg>" % (unit, size) utlv.check_dumpxml(vm, expect_line) # Qemu cmdline use mbytes unit, tseg_mbytes = unify_to_MiB(unit, size) expect_line = '-global mch.extended-tseg-mbytes=%s' % size utlv.check_qemu_cmd_line(expect_line) finally: logging.debug("Restore the VM XML") if vm.is_alive(): vm.destroy() # OVMF enable nvram by default v_xml_backup.sync(options="--nvram")
def run(test, params, env): """ Test mtu feature from virtual network """ vm_name = params.get('main_vm') vm = env.get_vm(vm_name) mtu_type = params.get('mtu_type') mtu_size = params.get('mtu_size', '') net = params.get('net', DEFAULT_NET) net_type = params.get('net_type', '') with_iface = 'yes' == params.get('with_iface', 'no') with_net = 'yes' == params.get('with_net', 'no') status_error = 'yes' == params.get('status_error', 'no') check = params.get('check', '') error_msg = params.get('error_msg', '') bridge_name = 'br_mtu' + utils_misc.generate_random_string(3) add_pkg = params.get('add_pkg', '') model = params.get('model', 'virtio') def set_network(size, net='default'): """ Set mtu size to a certain network """ logging.info('Set mtu size of network "%s" to %s', net, size) default_xml = NetworkXML.new_from_net_dumpxml(net) default_xml.mtu = size default_xml.sync() logging.debug(virsh.net_dumpxml(net)) def set_interface(mtu_size='', source_network='default', iface_type='network', iface_model='virtio'): """ Set mtu size to a certain interface """ interface_type = 'bridge' if iface_type in ('bridge', 'openvswitch') else iface_type iface_dict = { 'type': interface_type, 'source': "{'%s': '%s'}" % (interface_type, source_network), 'model': iface_model } if iface_type == 'openvswitch': iface_dict.update({'virtualport_type': 'openvswitch'}) if mtu_size: iface_dict.update({'mtu': "{'size': %s}" % mtu_size}) libvirt.modify_vm_iface(vm_name, 'update_iface', iface_dict) logging.debug(virsh.dumpxml(vm_name).stdout) def get_default_if(): """ Get default interface that is using by vm """ ifaces = utils_net.get_sorted_net_if() logging.debug('Interfaces on host: %s', ifaces) for iface in ifaces[0]: if 'Link detected: yes' in process.run('ethtool %s' % iface).stdout_text: logging.debug('Found host interface "%s"', iface) return iface def create_bridge(): """ Create a bridge on host for test """ cmd_create_br = 'nmcli con add type bridge con-name %s ifname %s' con_name = 'con_' + utils_misc.generate_random_string(3) bridge_name = 'br_' + utils_misc.generate_random_string(3) process.run(cmd_create_br % (con_name, bridge_name), verbose=True) return con_name, bridge_name def create_network_xml(name, network_type, base_if='', **kwargs): """ Create a network xml to be defined """ m_net = NetworkXML(name) m_net.forward = {'mode': 'bridge'} if network_type in ('bridge', 'openvswitch'): m_net.bridge = {'name': kwargs['bridge_name']} elif network_type == 'macvtap': if base_if: m_net.forward_interface = [{'dev': base_if}] if network_type == 'openvswitch': m_net.virtualport_type = 'openvswitch' if 'mtu' in kwargs: m_net.mtu = kwargs['mtu'] logging.debug(m_net) return m_net.xml def create_iface(iface_type, **kwargs): """ Create a interface to be attached to vm """ m_iface = Interface(iface_type) m_iface.mac_address = utils_net.generate_mac_address_simple() if 'base_if' in kwargs: m_iface.source = {'dev': kwargs['base_if'], 'mode': 'vepa'} if 'source_net' in kwargs: m_iface.source = {'network': kwargs['source_net']} if 'mtu' in kwargs: m_iface.mtu = {'size': kwargs['mtu']} if 'model_net' in kwargs: m_iface.model = kwargs['model_net'] logging.debug(m_iface.get_xml()) logging.debug(m_iface) return m_iface def check_mtu(mtu_size, qemu=False): """ Check if mtu meets expectation on host """ error = '' live_vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name) iface_xml = live_vmxml.get_devices('interface')[0] logging.debug(iface_xml.target) dev = iface_xml.target['dev'] ifconfig_info = process.run('ifconfig|grep mtu|grep %s' % dev, shell=True, verbose=True).stdout_text if 'mtu %s' % mtu_size in ifconfig_info: logging.info('PASS on ifconfig check for vnet.') else: error += 'Fail on ifconfig check for vnet.' if qemu: qemu_mtu_info = process.run('ps aux|grep qemu-kvm', shell=True, verbose=True).stdout_text if 'host_mtu=%s' % mtu_size in qemu_mtu_info: logging.info('PASS on qemu cmd line check.') else: error += 'Fail on qemu cmd line check.' if error: test.fail(error) def check_mtu_in_vm(fn_login, mtu_size): """ Check if mtu meets expectations in vm """ session = fn_login() check_cmd = 'ifconfig' output = session.cmd(check_cmd) session.close() logging.debug(output) if 'mtu %s' % mtu_size not in output: test.fail('MTU check inside vm failed.') else: logging.debug("MTU check inside vm passed.") try: bk_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) bk_netxml = NetworkXML.new_from_net_dumpxml(DEFAULT_NET) if add_pkg: add_pkg = add_pkg.split() utils_package.package_install(add_pkg) if 'openvswitch' in add_pkg: br = 'ovsbr0' + utils_misc.generate_random_string(3) process.run('systemctl start openvswitch.service', shell=True, verbose=True) process.run('ovs-vsctl add-br %s' % br, shell=True, verbose=True) process.run('ovs-vsctl show', shell=True, verbose=True) if not check or check in ['save', 'managedsave', 'hotplug_save']: # Create bridge or network and set mtu iface_type = 'network' if net_type in ('bridge', 'openvswitch'): if net_type == 'bridge': params['con_name'], br = create_bridge() if mtu_type == 'network': test_net = create_network_xml( bridge_name, net_type, bridge_name=br ) virsh.net_create(test_net, debug=True) virsh.net_dumpxml(bridge_name, debug=True) if mtu_type == 'interface': iface_type = net_type bridge_name = br elif net_type == 'network': if mtu_type == 'network': set_network(mtu_size) iface_mtu = 0 if mtu_type == 'interface': iface_mtu = mtu_size if mtu_type == 'network' and with_iface: mtu_size = str(int(mtu_size)//2) iface_mtu = mtu_size source_net = bridge_name if net_type in ('bridge', 'openvswitch') else 'default' # set mtu in vm interface set_interface(iface_mtu, source_network=source_net, iface_type=iface_type, iface_model=model) vm.start() vm_login = vm.wait_for_serial_login if net_type in ('bridge', 'openvswitch') else vm.wait_for_login vm_login().close() check_qemu = True if mtu_type == 'interface' else False # Test mtu after save vm if check in ('save', 'hotplug_save'): if check == 'hotplug_save': iface = create_iface('network', source_net='default', mtu=mtu_size, model_net=model) params['mac'] = iface.mac_address virsh.attach_device(vm_name, iface.xml, debug=True) virsh.dumpxml(vm_name, debug=True) dom_xml = vm_xml.VMXML.new_from_dumpxml(vm_name) if params['mac'] not in str(dom_xml): test.fail('Failed to attach interface with mtu') save_path = os.path.join(data_dir.get_tmp_dir(), vm_name + '.save') virsh.save(vm_name, save_path, debug=True) virsh.restore(save_path, debug=True) if check == 'managedsave': virsh.managedsave(vm_name, debug=True) virsh.start(vm_name, debug=True) # Check in both host and vm check_mtu(mtu_size, check_qemu) check_mtu_in_vm(vm_login, mtu_size) vm_login(timeout=60).close() if check == 'hotplug_save': virsh.detach_interface(vm_name, 'network %s' % params['mac'], debug=True) time.sleep(5) dom_xml = vm_xml.VMXML.new_from_dumpxml(vm_name) if params['mac'] in str(dom_xml): test.fail('Failed to detach interface with mtu after save-restore') else: hotplug = 'yes' == params.get('hotplug', 'False') if check == 'net_update': result = virsh.net_update( DEFAULT_NET, 'modify', 'mtu', '''"<mtu size='%s'/>"''' % mtu_size, debug=True ) if check in ('macvtap', 'bridge_net', 'ovswitch_net'): base_if = get_default_if() macv_name = 'direct-macvtap' + utils_misc.generate_random_string(3) # Test mtu in different type of network if mtu_type == 'network': if check == 'macvtap': test_net = create_network_xml(macv_name, 'macvtap', base_if, mtu=mtu_size) if check == 'bridge_net': params['con_name'], br = create_bridge() test_net = create_network_xml( bridge_name, 'bridge', mtu=mtu_size, bridge_name=br ) if check == 'ovswitch_net': test_net = create_network_xml( bridge_name, 'openvswitch', mtu=mtu_size, bridge_name=br ) if 'net_create' in params['id']: result = virsh.net_create(test_net, debug=True) if 'net_define' in params['id']: result = virsh.net_define(test_net, debug=True) # Test mtu with or without a binding network elif mtu_type == 'interface': vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) if with_net: test_net = create_network_xml(macv_name, 'macvtap', base_if) virsh.net_create(test_net, debug=True) iface = create_iface('network', source_net=macv_name, mtu=mtu_size) if hotplug: result = virsh.attach_device(vm_name, iface.xml, debug=True) else: vmxml.add_device(iface) vmxml.sync() result = virsh.start(vm_name) else: iface = create_iface('direct', base_if=base_if, mtu=mtu_size) if hotplug: result = virsh.attach_device(vm_name, iface.xml, debug=True) else: vmxml.add_device(iface) result = virsh.define(vmxml.xml, debug=True) if check == 'invalid_val': iface = create_iface('network', source_net='default', mtu=mtu_size) result = virsh.attach_device(vm_name, iface.xml, debug=True) # Check result libvirt.check_exit_status(result, status_error) libvirt.check_result(result, [error_msg]) finally: bk_xml.sync() bk_netxml.sync() if 'test_net' in locals(): virsh.net_destroy(bridge_name, debug=True) if params.get('con_name'): process.run('nmcli con del %s' % params['con_name'], verbose=True) if add_pkg: process.run("ovs-vsctl del-br %s" % br, verbose=True) utils_package.package_remove(add_pkg)
def run(test, params, env): """ Test steps: 1) Get the params from params. 2) check the environment 3) Strat the VM and check whether the VM been started successfully 4) Compare the Hugepage memory size to the Guest memory setted. 5) Check the hugepage memory usage. 6) Clean up """ test_type = params.get("test_type", 'normal') tlbfs_enable = 'yes' == params.get("hugetlbfs_enable", 'no') shp_num = int(params.get("static_hugepage_num", 1024)) thp_enable = 'yes' == params.get("trans_hugepage_enable", 'no') mb_enable = 'yes' == params.get("mb_enable", 'yes') delay = int(params.get("delay_time", 10)) # Skip cases early vm_names = [] if test_type == "contrast": vm_names = params.get("vms").split()[:2] if len(vm_names) < 2: test.cancel("This test requires two VMs") # confirm no VM running allvms = virsh.dom_list('--name').stdout.strip() if allvms != '': test.cancel("one or more VMs are alive") err_range = float(params.get("mem_error_range", 1.25)) else: vm_names.append(params.get("main_vm")) if test_type == "stress": target_path = params.get("target_path", "/tmp/test.out") elif test_type == "unixbench": unixbench_control_file = params.get("unixbench_controle_file", "unixbench5.control") # backup orignal setting shp_orig_num = utils_memory.get_num_huge_pages() thp_orig_status = utils_memory.get_transparent_hugepage() page_size = utils_memory.get_huge_page_size() # mount/umount hugetlbfs tlbfs_status = utils_misc.is_mounted("hugetlbfs", "/dev/hugepages", "hugetlbfs") if tlbfs_enable is True: if tlbfs_status is not True: utils_misc.mount("hugetlbfs", "/dev/hugepages", "hugetlbfs") else: if tlbfs_status is True: utils_misc.umount("hugetlbfs", "/dev/hugepages", "hugetlbfs") # set static hugepage utils_memory.set_num_huge_pages(shp_num) # enable/disable transparent hugepage if thp_enable: utils_memory.set_transparent_hugepage('always') else: utils_memory.set_transparent_hugepage('never') # set/del memoryBacking tag for vm_name in vm_names: if mb_enable: vm_xml.VMXML.set_memoryBacking_tag(vm_name) else: vm_xml.VMXML.del_memoryBacking_tag(vm_name) utils_libvirtd.libvirtd_restart() non_started_free = utils_memory.get_num_huge_pages_free() vms = [] sessions = [] try: for vm_name in vm_names: # try to start vm and login try: vm = env.get_vm(vm_name) vm.start() except VMError as e: if mb_enable and not tlbfs_enable: # if hugetlbfs not be mounted, # VM start with memoryBacking tag will fail logging.debug(e) else: error_msg = "Test failed in positive case. error: %s\n" % e test.fail(error_msg) if vm.is_alive() is not True: break vms.append(vm) # try to login and run some program try: session = vm.wait_for_login() except (LoginError, ShellError) as e: error_msg = "Test failed in positive case.\n error: %s\n" % e test.fail(error_msg) sessions.append(session) if test_type == "stress": # prepare file for increasing stress stress_path = prepare_c_file() remote.scp_to_remote(vm.get_address(), 22, 'root', params.get('password'), stress_path, "/tmp/") # Try to install gcc on guest first utils_package.package_install(["gcc"], session, 360) # increasing workload session.cmd("gcc %s -o %s" % (stress_path, target_path)) session.cmd("%s &" % target_path) if test_type == "unixbench": params["main_vm"] = vm_name params["test_control_file"] = unixbench_control_file control_path = os.path.join(test.virtdir, "control", unixbench_control_file) # unixbench test need 'patch' and 'perl' commands installed utils_package.package_install(["patch", "perl"], session, 360) command = utils_test.run_autotest(vm, session, control_path, None, None, params, copy_only=True) session.cmd("%s &" % command, ignore_all_errors=True) # wait for autotest running on vm time.sleep(delay) def _is_unixbench_running(): cmd = "ps -ef | grep perl | grep Run" return not session.cmd_status(cmd) if not utils_misc.wait_for(_is_unixbench_running, timeout=240): test.cancel("Failed to run unixbench in guest," " please make sure some necessary" " packages are installed in guest," " such as gcc, tar, bzip2") logging.debug("Unixbench test is running in VM") if test_type == "contrast": # wait for vm finish starting completely time.sleep(delay) if not (mb_enable and not tlbfs_enable): logging.debug("starting analyzing the hugepage usage...") pid = vms[-1].get_pid() started_free = utils_memory.get_num_huge_pages_free() # Get the thp usage from /proc/pid/smaps started_anon = utils_memory.get_num_anon_huge_pages(pid) static_used = non_started_free - started_free hugepage_used = static_used * page_size if test_type == "contrast": # get qemu-kvm memory consumption by top cmd = "top -b -n 1|awk '$1 == %s {print $10}'" % pid rate = process.run(cmd, ignore_status=False, verbose=True, shell=True).stdout_text.strip() qemu_kvm_used = (utils_memory.memtotal() * float(rate)) / 100 logging.debug("rate: %s, used-by-qemu-kvm: %f, used-by-vm: %d", rate, qemu_kvm_used, hugepage_used) if abs(qemu_kvm_used - hugepage_used) > hugepage_used * (err_range - 1): test.fail("Error for hugepage usage") if test_type == "stress": if non_started_free <= started_free: logging.debug("hugepage usage:%d -> %d", non_started_free, started_free) test.fail("Error for hugepage usage with stress") if mb_enable is not True: if static_used > 0: test.fail("VM use static hugepage without" " memoryBacking element") if thp_enable is not True and started_anon > 0: test.fail("VM use transparent hugepage, while" " it's disabled") else: if tlbfs_enable is not True: if static_used > 0: test.fail("VM use static hugepage without tlbfs" " mounted") if thp_enable and started_anon <= 0: test.fail("VM doesn't use transparent" " hugepage") else: if shp_num > 0: if static_used <= 0: test.fail("VM doesn't use static" " hugepage") else: if static_used > 0: test.fail("VM use static hugepage," " while it's set to zero") if thp_enable is not True: if started_anon > 0: test.fail("VM use transparent hugepage," " while it's disabled") else: if shp_num == 0 and started_anon <= 0: test.fail("VM doesn't use transparent" " hugepage, while static" " hugepage is disabled") finally: # end up session for session in sessions: session.close() for vm in vms: if vm.is_alive(): vm.destroy() for vm_name in vm_names: if mb_enable: vm_xml.VMXML.del_memoryBacking_tag(vm_name) else: vm_xml.VMXML.set_memoryBacking_tag(vm_name) utils_libvirtd.libvirtd_restart() if tlbfs_enable is True: if tlbfs_status is not True: utils_misc.umount("hugetlbfs", "/dev/hugepages", "hugetlbfs") else: if tlbfs_status is True: utils_misc.mount("hugetlbfs", "/dev/hugepages", "hugetlbfs") utils_memory.set_num_huge_pages(shp_orig_num) utils_memory.set_transparent_hugepage(thp_orig_status)