def install_moongen(session, ip, user, port, password): """ Install moogen on remote moongen host """ # copy MoonGen.zip to remote moongen host moongen_pkg = params.get("moongen_pkg") local_path = os.path.join(data_dir.get_deps_dir(), "performance/%s" % moongen_pkg) remote.scp_to_remote(ip, shell_port, username, password, local_path, "/home") # install moongen cmd_str = "rm -rf /home/MoonGen" cmd_str += " && unzip /home/%s -d /home" % params.get("moongen_pkg") cmd_str += " && cd /home/MoonGen && ./build.sh" if session.cmd_status(cmd_str, timeout=300) != 0: test.error("Fail to install program on monngen host") # set hugepages session.cmd(params.get("generator_hugepages_cmd"), ignore_all_errors=True) # probe vfio and vfip-pci cmd_probe = "modprobe vfio; modprobe vfio-pci" session.cmd_status(cmd_probe, timeout=300) # bind nic moongen_dpdk_nic = params.get("moongen_dpdk_nic").split() for i in list(moongen_dpdk_nic): cmd_bind = "dpdk-devbind --bind=vfio-pci %s" % i if session.cmd_status(cmd_bind) != 0: test.error("Fail to bind nic %s on monngen host" % i)
def scp_benckmark(self, username, password, host_path, guest_path, port='22'): """ Scp a benchmark tool from the local host to the guest. """ scp_to_remote(self.vm.get_address(), port, username, password, host_path, guest_path) self.env_files.append(guest_path)
def post_migration( self, vm, cancel_delay, mig_offline, dsthost, mig_exec_cmd, not_wait_for_migration, fd, mig_data ): if mig_data.params.get("host_mig_offline") == "yes": src_tmp = vm.params.get("migration_sfiles_path") dst_tmp = vm.params.get("migration_dfiles_path") username = vm.params.get("username") password = vm.params.get("password") remote.scp_to_remote(dsthost, "22", username, password, src_tmp, dst_tmp)
def post_migration(self, vm, cancel_delay, mig_offline, dsthost, mig_exec_cmd, not_wait_for_migration, fd, mig_data): if mig_data.params.get("host_mig_offline") == "yes": src_tmp = vm.params.get("migration_sfiles_path") dst_tmp = vm.params.get("migration_dfiles_path") username = vm.params.get("username") password = vm.params.get("password") remote.scp_to_remote(dsthost, "22", username, password, src_tmp, dst_tmp)
def create_or_del_networks(pf_name, params, remote_virsh_session=None, is_del=False): """ Create or delete network on local or remote :param params: Dictionary with the test parameters :param pf_name: The name of PF :param remote_virsh_session: The virsh session object to the remote host :param is_del: Whether the networks should be deleted :raise: test.fail when fails to define/start network """ net_bridge_name = params.get("net_bridge_name", "host-bridge") net_bridge_fwd = params.get("net_bridge_fwd", '{"mode": "bridge"}') bridge_name = params.get("bridge_name", "br0") bridge_dict = {"net_name": net_bridge_name, "net_forward": net_bridge_fwd, "net_bridge": '{"name": "%s"}' % bridge_name} net_list = [bridge_dict] enable_hostdev_iface = "yes" == params.get("enable_hostdev_iface", "no") if enable_hostdev_iface: net_hostdev_name = params.get("net_hostdev_name", "hostdev-net") net_hostdev_fwd = params.get("net_hostdev_fwd", '{"mode": "hostdev", "managed": "yes"}') net_dict = {"net_name": net_hostdev_name, "net_forward": net_hostdev_fwd, "net_forward_pf": '{"dev": "%s"}' % pf_name} net_list.append(net_dict) if not is_del: for net_params in net_list: net_dev = libvirt.create_net_xml(net_params.get("net_name"), net_params) if not remote_virsh_session: if net_dev.get_active(): net_dev.undefine() net_dev.define() net_dev.start() else: remote.scp_to_remote(server_ip, '22', server_user, server_pwd, net_dev.xml, net_dev.xml, limit="", log_filename=None, timeout=600, interface=None) remote_virsh_session.net_define(net_dev.xml, **virsh_args) remote_virsh_session.net_start(net_params.get("net_name"), **virsh_args) else: virsh_session = virsh if remote_virsh_session: virsh_session = remote_virsh_session for nname in [n_dict.get("net_name") for n_dict in net_list]: if nname not in virsh_session.net_state_dict(): continue virsh_session.net_destroy(nname, debug=True, ignore_status=True) virsh_session.net_undefine(nname, debug=True, ignore_status=True)
def env_setup(session, ip, user, port, password): error.context("Setup env for %s" % ip) ssh_cmd(session, "service iptables stop") ssh_cmd(session, "echo 1 > /proc/sys/net/ipv4/conf/all/arp_ignore") netperf_dir = os.path.join(os.environ['AUTODIR'], "tests/netperf2") for i in params.get("netperf_files").split(): remote.scp_to_remote(ip, shell_port, username, password, "%s/%s" % (netperf_dir, i), "/tmp/") ssh_cmd(session, params.get("setup_cmd"))
def create_or_del_network(net_dict, is_del=False, remote_args=None): """ Create or delete network on local or remote :param net_dict: Dictionary with the network parameters :param is_del: Whether the networks should be deleted :param remote_args: The parameters for remote """ remote_virsh_session = None if remote_args: remote_virsh_session = virsh.VirshPersistent(**remote_args) if not is_del: net_dev = libvirt.create_net_xml(net_dict.get("net_name"), net_dict) if not remote_virsh_session: if net_dev.get_active(): net_dev.undefine() net_dev.define() net_dev.start() else: remote_ip = remote_args.get("remote_ip") remote_user = remote_args.get("remote_user") remote_pwd = remote_args.get("remote_pwd") if not all([remote_ip, remote_user, remote_pwd]): raise exceptions.TestError( "remote_[ip|user|pwd] are necessary!") remote.scp_to_remote(remote_ip, '22', remote_user, remote_pwd, net_dev.xml, net_dev.xml, limit="", log_filename=None, timeout=600, interface=None) remote_virsh_session.net_define(net_dev.xml, debug=True) remote_virsh_session.net_start(net_dict.get("net_name"), debug=True) remote.run_remote_cmd("rm -rf %s" % net_dev.xml, remote_args) else: virsh_session = virsh if remote_virsh_session: virsh_session = remote_virsh_session if net_dict.get("net_name") in virsh_session.net_state_dict(): virsh_session.net_destroy(net_dict.get("net_name"), debug=True, ignore_status=True) virsh_session.net_undefine(net_dict.get("net_name"), debug=True, ignore_status=True) if remote_virsh_session: remote_virsh_session.close_session()
def env_setup(session, ip_addr, username, shell_port, password): """ Test env setup """ error.context("Setup env for %s" % ip_addr) ssh_cmd(session, "service iptables stop; true") netperf_dir = os.path.join(data_dir.get_root_dir(), "shared/deps") for i in params.get("netperf_files").split(): remote.scp_to_remote(ip_addr, shell_port, username, password, "%s/%s" % (netperf_dir, i), "/tmp/") ssh_cmd(session, params.get("setup_cmd"))
def env_setup(session, ip, user, port, password): error.context("Setup env for %s" % ip) ssh_cmd(session, "service iptables stop") ssh_cmd(session, "echo 1 > /proc/sys/net/ipv4/conf/all/arp_ignore") netperf_dir = os.path.join(data_dir.get_root_dir(), "shared/deps") for i in params.get("netperf_files").split(): remote.scp_to_remote(ip, shell_port, username, password, "%s/%s" % (netperf_dir, i), "/tmp/") ssh_cmd(session, params.get("setup_cmd")) agent_path = os.path.join(test.virtdir, "scripts/netperf_agent.py") remote.scp_to_remote(ip, shell_port, username, password, agent_path, "/tmp/")
def env_setup(ip): logging.debug("Setup env for %s" % ip) ssh_key.setup_ssh_key(hostname=ip, user=username, port=shell_port, password=password) ssh_cmd(ip, "service iptables stop") ssh_cmd(ip, "echo 1 > /proc/sys/net/ipv4/conf/all/arp_ignore") netperf_dir = os.path.join(os.environ['AUTODIR'], "tests/netperf2") for i in params.get("netperf_files").split(): remote.scp_to_remote(ip, shell_port, username, password, "%s/%s" % (netperf_dir, i), "/tmp/") ssh_cmd(ip, params.get("setup_cmd"))
def env_setup(session, ip, user, port, password): error_context.context("Setup env for %s" % ip) if params.get("env_setup_cmd"): ssh_cmd(session, params.get("env_setup_cmd"), ignore_status=True) pkg = params["netperf_pkg"] pkg = os.path.join(data_dir.get_deps_dir(), pkg) remote.scp_to_remote(ip, shell_port, username, password, pkg, "/tmp") ssh_cmd(session, params.get("setup_cmd")) agent_path = os.path.join(test.virtdir, "scripts/netperf_agent.py") remote.scp_to_remote(ip, shell_port, username, password, agent_path, "/tmp")
def env_setup(session, ip, user, port, password): error.context("Setup env for %s" % ip) ssh_cmd(session, "service iptables stop; true") ssh_cmd(session, "echo 1 > /proc/sys/net/ipv4/conf/all/arp_ignore") netperf_dir = os.path.join(data_dir.get_root_dir(), "shared/deps") for i in params.get("netperf_files").split(): remote.scp_to_remote(ip, shell_port, username, password, "%s/%s" % (netperf_dir, i), "/tmp/") ssh_cmd(session, params.get("setup_cmd")) agent_path = os.path.join(test.virtdir, "scripts/netperf_agent.py") remote.scp_to_remote(ip, shell_port, username, password, agent_path, "/tmp/")
def env_setup(session, ip, user, port, password): error.context("Setup env for %s" % ip) ssh_cmd(session, "iptables -F; true") ssh_cmd(session, "service iptables stop; true") ssh_cmd(session, "echo 1 > /proc/sys/net/ipv4/conf/all/arp_ignore") download_link = params.get("netperf_download_link") download_dir = data_dir.get_download_dir() md5sum = params.get("pkg_md5sum") pkg = utils.unmap_url_cache(download_dir, download_link, md5sum) remote.scp_to_remote(ip, shell_port, username, password, pkg, "/tmp") ssh_cmd(session, params.get("setup_cmd")) agent_path = os.path.join(test.virtdir, "scripts/netperf_agent.py") remote.scp_to_remote(ip, shell_port, username, password, agent_path, "/tmp")
def env_setup(session, ip, user, port, password): error_context.context("Setup env for %s" % ip) ssh_cmd(session, "iptables -F", ignore_status=True) ssh_cmd(session, "service iptables stop", ignore_status=True) ssh_cmd(session, "systemctl stop firewalld.service", ignore_status=True) ssh_cmd(session, "echo 2 > /proc/sys/net/ipv4/conf/all/arp_ignore") ssh_cmd(session, "echo 0 > /sys/kernel/mm/ksm/run", ignore_status=True) pkg = params["netperf_pkg"] pkg = os.path.join(data_dir.get_deps_dir(), pkg) remote.scp_to_remote(ip, shell_port, username, password, pkg, "/tmp") ssh_cmd(session, params.get("setup_cmd")) agent_path = os.path.join(test.virtdir, "scripts/netperf_agent.py") remote.scp_to_remote(ip, shell_port, username, password, agent_path, "/tmp")
def vm_sync(vmxml, vm_name=None, virsh_instance=virsh): """ A wrapper to sync vm xml on localhost and remote host :param vmxml: domain VMXML instance :param vm_name: The name of VM :param virsh_instance: virsh instance object """ if vm_name and virsh_instance != virsh: remote.scp_to_remote(server_ip, '22', server_user, server_pwd, vmxml.xml, vmxml.xml) if virsh_instance.domain_exists(vm_name): if virsh_instance.is_alive(vm_name): virsh_instance.destroy(vm_name, ignore_status=True) virsh_instance.undefine(vmxml.xml, ignore_status=True) virsh_instance.define(vmxml.xml, debug=True) else: vmxml.sync()
def env_setup(session, ip_addr, username, shell_port, password): """ Test env setup """ error.context("Setup env for %s" % ip_addr) ssh_cmd(session, "service iptables stop; true") netperf_links = params["netperf_links"].split() remote_dir = params.get("remote_dir", "/var/tmp") for netperf_link in netperf_links: if utils.is_url(netperf_link): download_dir = data_dir.get_download_dir() md5sum = params.get("pkg_md5sum") netperf_dir = utils.unmap_url_cache(download_dir, netperf_link, md5sum) elif netperf_link: netperf_dir = os.path.join(data_dir.get_root_dir(), "shared/%s" % netperf_link) remote.scp_to_remote(ip_addr, shell_port, username, password, netperf_dir, remote_dir) ssh_cmd(session, params.get("setup_cmd"))
def run(test, params, env): """ Run Pktgen test between host/guest 1) Boot the main vm, or just grab it if it's already booted. 2) Configure pktgen server(only linux) 3) Run pktgen test, finish when timeout or env["pktgen_run"] != True :param test: QEMU test object. :param params: Dictionary with the test parameters. :param env: Dictionary with test environment. """ login_timeout = float(params.get("login_timeout", 360)) error.context("Init the VM, and try to login", logging.info) external_host = params.get("external_host") if not external_host: get_host_cmd = "ip route | awk '/default/ {print $3}'" external_host = utils.system_output(get_host_cmd) vm = env.get_vm(params["main_vm"]) vm.verify_alive() session = vm.wait_for_login(timeout=login_timeout) error.context("Pktgen server environment prepare", logging.info) # pktgen server only support linux, since pktgen is a linux kernel module pktgen_server = params.get("pktgen_server", "localhost") params_server = params.object_params("pktgen_server") s_shell_client = params_server.get("shell_client", "ssh") s_shell_port = params_server.get("shell_port", "22") s_username = params_server.get("username", "root") s_passwd = params_server.get("password", "123456") s_shell_prompt = params_server.get("shell_prompt") server_session = "" # pktgen server is autotest virtual guest(only linux) if pktgen_server in params.get("vms", "vm1 vm2"): vm_pktgen = env.get_vm(pktgen_server) vm_pktgen.verify_alive() server_session = vm_pktgen.wait_for_login(timeout=login_timeout) runner = server_session.cmd_output_safe pktgen_ip = vm_pktgen.get_address() pktgen_mac = vm_pktgen.get_mac_address() server_interface = utils_net.get_linux_ifname(server_session, pktgen_mac) # pktgen server is a external host assigned elif re.match(r"((\d){1,3}\.){3}(\d){1,3}", pktgen_server): pktgen_ip = pktgen_server server_session = remote.wait_for_login(s_shell_client, pktgen_ip, s_shell_port, s_username, s_passwd, s_shell_prompt) runner = server_session.cmd_output_safe server_interface = params.get("server_interface") if not server_interface: raise error.TestNAError("Must config server interface before test") else: # using host as a pktgen server server_interface = params.get("netdst", "switch") host_nic = utils_net.Interface(server_interface) pktgen_ip = host_nic.get_ip() pktgen_mac = host_nic.get_mac() runner = utils.system # copy pktgen_test scipt to the test server. local_path = os.path.join(data_dir.get_root_dir(), "shared/scripts/pktgen.sh") remote_path = "/tmp/pktgen.sh" remote.scp_to_remote(pktgen_ip, s_shell_port, s_username, s_passwd, local_path, remote_path) error.context("Run pktgen test") run_threads = params.get("pktgen_threads", 1) pktgen_stress_timeout = float(params.get("pktgen_test_timeout", 600)) exec_cmd = "%s %s %s %s %s" % (remote_path, vm.get_address(), vm.get_mac_address(), server_interface, run_threads) try: env["pktgen_run"] = True try: # Set a run flag in env, when other case call this case as a sub # backgroud process, can set run flag to False to stop this case. start_time = time.time() stop_time = start_time + pktgen_stress_timeout while (env["pktgen_run"] and time.time < stop_time): runner(exec_cmd, timeout=pktgen_stress_timeout) # using ping to kill the pktgen stress except aexpect.ShellTimeoutError: session.cmd("ping %s" % pktgen_ip, ignore_all_errors=True) finally: env["pktgen_run"] = False error.context("Verify Host and guest kernel no error and call trace", logging.info) vm.verify_kernel_crash() utils_misc.verify_host_dmesg() error.context("Ping external host after pktgen test", logging.info) status, output = utils_test.ping(dest=external_host, session=session, timeout=240, count=20) loss_ratio = utils_test.get_loss_ratio(output) if (loss_ratio > int(params.get("packet_lost_ratio", 5)) or loss_ratio == -1): logging.debug("Ping %s output: %s" % (external_host, output)) raise error.TestFail("Guest network connction unusable," + "packet lost ratio is '%%%d'" % loss_ratio) if server_session: server_session.close() if session: session.close()
def run(test, params, env): """ Verify the QMP even with -device pvpanic when trigger crash,this case will: 1) Start VM with pvpanic device. 2) Check if pvpanic device exists in guest. 3) Trigger crash in guest. 4) Check vm status with QMP. :param test: QEMU test object :param params: Dictionary with the test parameters :param env: Dictionary with test environmen. """ stop_kdump_command = params["stop_kdump_command"] trigger_crash = params["trigger_crash"] qmp_check_info = params["qmp_check_info"] check_info = params.get("check_info") is_aarch64 = params.get("vm_arch_name") == "aarch64" check_pci_cmd = params.get("check_pci_cmd") check_capability_cmd = params.get("check_capability_cmd") # trigger kernel panic config trigger_kernel_panic = params.get("trigger_kernel_panic") username = params.get('username') password = params.get('password') port = params.get('file_transfer_port') guest_path = params.get('guest_path') depends_pkgs = params.get('depends_pkgs') cmd_make = params.get('cmd_make') io_timeout = params.get_numeric('io_timeout') vm = env.get_vm(params["main_vm"]) session = vm.wait_for_login() guest_addr = vm.get_address() if check_info: qtree_info = vm.monitor.info("qtree") if check_info not in qtree_info: test.fail("Not find pvpanic device in guest") if trigger_kernel_panic: host_path = os.path.join(data_dir.get_deps_dir(), 'trigger_panic_drive') scp_to_remote(guest_addr, port, username, password, host_path, guest_path) if not utils_package.package_install(depends_pkgs, session): test.cancel("Please install %s inside guest to proceed", depends_pkgs) session.cmd(cmd_make % guest_path, io_timeout) try: session.cmd(stop_kdump_command) if is_aarch64: pci = session.cmd_output(check_pci_cmd).strip() capability_info = session.cmd_output(check_capability_cmd % pci) test.log.info("The pvpanic capability info of guest: %s", capability_info) session.cmd(trigger_crash, timeout=5) except aexpect.ShellTimeoutError: pass else: test.fail("Guest should crash.") finally: output = vm.monitor.get_status() if qmp_check_info not in str(output): test.fail("Guest status is not guest-panicked") if session: session.close()
def run(test, params, env): """ Test virtio-fs by sharing the data between host and guest. Steps: 1. Create shared directories on the host. 2. Run virtiofsd daemons on the host. 3. Boot a guest on the host with virtiofs options. 4. Log into guest then mount the virtiofs targets. 5. Generate files or run stress on the mount points inside guest. :param test: QEMU test object. :param params: Dictionary with the test parameters. :param env: Dictionary with test environment. """ def get_viofs_exe(session): """ Get viofs.exe from virtio win iso,such as E:\viofs\2k19\amd64 """ logging.info("Get virtiofs exe full path.") media_type = params["virtio_win_media_type"] try: get_drive_letter = getattr(virtio_win, "drive_letter_%s" % media_type) get_product_dirname = getattr(virtio_win, "product_dirname_%s" % media_type) get_arch_dirname = getattr(virtio_win, "arch_dirname_%s" % media_type) except AttributeError: test.error("Not supported virtio win media type '%s'", media_type) viowin_ltr = get_drive_letter(session) if not viowin_ltr: test.error("Could not find virtio-win drive in guest") guest_name = get_product_dirname(session) if not guest_name: test.error("Could not get product dirname of the vm") guest_arch = get_arch_dirname(session) if not guest_arch: test.error("Could not get architecture dirname of the vm") exe_middle_path = ("{name}\\{arch}" if media_type == "iso" else "{arch}\\{name}").format(name=guest_name, arch=guest_arch) exe_file_name = "virtiofs.exe" exe_find_cmd = 'dir /b /s %s\\%s | findstr "\\%s\\\\"' exe_find_cmd %= (viowin_ltr, exe_file_name, exe_middle_path) exe_path = session.cmd(exe_find_cmd).strip() logging.info("Found exe file '%s'", exe_path) return exe_path # data io config cmd_dd = params.get('cmd_dd') cmd_md5 = params.get('cmd_md5') cmd_new_folder = params.get('cmd_new_folder') cmd_copy_file = params.get('cmd_copy_file') cmd_del_folder = params.get('cmd_del_folder') # pjdfs test config cmd_pjdfstest = params.get('cmd_pjdfstest') cmd_unpack = params.get('cmd_unpack') cmd_yum_deps = params.get('cmd_yum_deps') cmd_autoreconf = params.get('cmd_autoreconf') cmd_configure = params.get('cmd_configure') cmd_make = params.get('cmd_make') pjdfstest_pkg = params.get('pjdfstest_pkg') username = params.get('username') password = params.get('password') port = params.get('file_transfer_port') # fio config fio_options = params.get('fio_options') io_timeout = params.get_numeric('io_timeout') # xfstest config cmd_xfstest = params.get('cmd_xfstest') fs_dest_fs2 = params.get('fs_dest_fs2') cmd_download_xfstest = params.get('cmd_download_xfstest') cmd_yum_install = params.get('cmd_yum_install') cmd_make_xfs = params.get('cmd_make_xfs') cmd_setenv = params.get('cmd_setenv') cmd_setenv_nfs = params.get('cmd_setenv_nfs', '') cmd_useradd = params.get('cmd_useradd') fs_dest_fs1 = params.get('fs_dest_fs1') cmd_get_tmpfs = params.get('cmd_get_tmpfs') cmd_set_tmpfs = params.get('cmd_set_tmpfs') size_mem1 = params.get('size_mem1') # xfstest-nfs config setup_local_nfs = params.get('setup_local_nfs') if cmd_xfstest: # /dev/shm is the default memory-backend-file, the default value is the # half of the host memory. Increase it to guest memory size to avoid crash ori_tmpfs_size = process.run(cmd_get_tmpfs, shell=True).stdout_text.replace("\n", "") logging.debug("original tmpfs size is %s", ori_tmpfs_size) params["post_command"] = cmd_set_tmpfs % ori_tmpfs_size params["pre_command"] = cmd_set_tmpfs % size_mem1 if setup_local_nfs: for fs in params.objects("filesystems"): nfs_params = params.object_params(fs) params["export_dir"] = nfs_params.get("export_dir") params["nfs_mount_src"] = nfs_params.get("nfs_mount_src") params["nfs_mount_dir"] = nfs_params.get("fs_source_dir") nfs_local = nfs.Nfs(params) nfs_local.setup() params["start_vm"] = "yes" env_process.preprocess(test, params, env) os_type = params.get("os_type") vm = env.get_vm(params.get("main_vm")) vm.verify_alive() session = vm.wait_for_login() host_addr = vm.get_address() if os_type == "windows": cmd_timeout = params.get_numeric("cmd_timeout", 120) driver_name = params["driver_name"] install_path = params["install_path"] check_installed_cmd = params["check_installed_cmd"] % install_path # Check whether windows driver is running,and enable driver verifier session = utils_test.qemu.windrv_check_running_verifier(session, vm, test, driver_name) # install winfsp tool error_context.context("Install winfsp for windows guest.", logging.info) installed = session.cmd_status(check_installed_cmd) == 0 if installed: logging.info("Winfsp tool is already installed.") else: install_cmd = utils_misc.set_winutils_letter(session, params["install_cmd"]) session.cmd(install_cmd, cmd_timeout) if not utils_misc.wait_for(lambda: not session.cmd_status( check_installed_cmd), 60): test.error("Winfsp tool is not installed.") for fs in params.objects("filesystems"): fs_params = params.object_params(fs) fs_target = fs_params.get("fs_target") fs_dest = fs_params.get("fs_dest") fs_source = fs_params.get("fs_source_dir") base_dir = fs_params.get("fs_source_base_dir", data_dir.get_data_dir()) if not os.path.isabs(fs_source): fs_source = os.path.join(base_dir, fs_source) host_data = os.path.join(fs_source, 'fs_test') if os_type == "linux": error_context.context("Create a destination directory %s " "inside guest." % fs_dest, logging.info) utils_misc.make_dirs(fs_dest, session) if not cmd_xfstest: error_context.context("Mount virtiofs target %s to %s inside" " guest." % (fs_target, fs_dest), logging.info) if not utils_disk.mount(fs_target, fs_dest, 'virtiofs', session=session): test.fail('Mount virtiofs target failed.') else: error_context.context("Start virtiofs service in guest.", logging.info) viofs_sc_create_cmd = params["viofs_sc_create_cmd"] viofs_sc_start_cmd = params["viofs_sc_start_cmd"] viofs_sc_query_cmd = params["viofs_sc_query_cmd"] logging.info("Check if virtiofs service is registered.") status, output = session.cmd_status_output(viofs_sc_query_cmd) if "not exist as an installed service" in output: logging.info("Register virtiofs service in windows guest.") exe_path = get_viofs_exe(session) viofs_sc_create_cmd = viofs_sc_create_cmd % exe_path sc_create_s, sc_create_o = session.cmd_status_output(viofs_sc_create_cmd) if sc_create_s != 0: test.fail("Failed to register virtiofs service, output is %s" % sc_create_o) logging.info("Check if virtiofs service is started.") status, output = session.cmd_status_output(viofs_sc_query_cmd) if "RUNNING" not in output: logging.info("Start virtiofs service.") sc_start_s, sc_start_o = session.cmd_status_output(viofs_sc_start_cmd) if sc_start_s != 0: test.fail("Failed to start virtiofs service, output is %s" % sc_start_o) else: logging.info("Virtiofs service is running.") viofs_log_file_cmd = params.get("viofs_log_file_cmd") if viofs_log_file_cmd: error_context.context("Check if LOG file is created.", logging.info) log_dir_s = session.cmd_status(viofs_log_file_cmd) if log_dir_s != 0: test.fail("Virtiofs log is not created.") # get fs dest for vm virtio_fs_disk_label = fs_target error_context.context("Get Volume letter of virtio fs target, the disk" "lable is %s." % virtio_fs_disk_label, logging.info) vol_con = "VolumeName='%s'" % virtio_fs_disk_label vol_func = utils_misc.get_win_disk_vol(session, condition=vol_con) volume_letter = utils_misc.wait_for(lambda: vol_func, cmd_timeout) if volume_letter is None: test.fail("Could not get virtio-fs mounted volume letter.") fs_dest = "%s:" % volume_letter guest_file = os.path.join(fs_dest, 'fs_test') logging.info("The guest file in shared dir is %s", guest_file) try: if cmd_dd: error_context.context("Creating file under %s inside " "guest." % fs_dest, logging.info) session.cmd(cmd_dd % guest_file, io_timeout) if os_type == "linux": cmd_md5_vm = cmd_md5 % guest_file else: guest_file_win = guest_file.replace("/", "\\") cmd_md5_vm = cmd_md5 % (volume_letter, guest_file_win) md5_guest = session.cmd_output(cmd_md5_vm, io_timeout).strip().split()[0] logging.info(md5_guest) md5_host = process.run("md5sum %s" % host_data, io_timeout).stdout_text.strip().split()[0] if md5_guest != md5_host: test.fail('The md5 value of host is not same to guest.') if cmd_new_folder and cmd_copy_file and cmd_del_folder: error_context.context("Folder test under %s inside " "guest." % fs_dest, logging.info) session.cmd(cmd_new_folder % fs_dest) test_file = guest_file if os_type == "linux" \ else "%s:\\%s" % (volume_letter, 'fs_test') session.cmd(cmd_copy_file % (test_file, fs_dest)) session.cmd(cmd_del_folder % fs_dest) if fio_options: error_context.context("Run fio on %s." % fs_dest, logging.info) fio = generate_instance(params, vm, 'fio') try: fio.run(fio_options % guest_file, io_timeout) finally: fio.clean() vm.verify_dmesg() if cmd_pjdfstest: error_context.context("Run pjdfstest on %s." % fs_dest, logging.info) host_path = os.path.join(data_dir.get_deps_dir('pjdfstest'), pjdfstest_pkg) scp_to_remote(host_addr, port, username, password, host_path, fs_dest) session.cmd(cmd_unpack.format(fs_dest), 180) session.cmd(cmd_yum_deps, 180) session.cmd(cmd_autoreconf % fs_dest, 180) session.cmd(cmd_configure.format(fs_dest), 180) session.cmd(cmd_make % fs_dest, io_timeout) session.cmd(cmd_pjdfstest % fs_dest, io_timeout) if cmd_xfstest: error_context.context("Run xfstest on guest.", logging.info) utils_misc.make_dirs(fs_dest_fs2, session) if session.cmd_status(cmd_download_xfstest, 360): test.error("Failed to download xfstests-dev") session.cmd(cmd_yum_install, 180) session.cmd(cmd_make_xfs, 360) session.cmd(cmd_setenv, 180) session.cmd(cmd_setenv_nfs, 180) session.cmd(cmd_useradd, 180) try: output = session.cmd_output(cmd_xfstest, io_timeout) logging.info("%s", output) if 'Failed' in output: test.fail('The xfstest failed.') else: break except (aexpect.ShellStatusError, aexpect.ShellTimeoutError): test.fail('The xfstest failed.') finally: if os_type == "linux": utils_disk.umount(fs_target, fs_dest, 'virtiofs', session=session) utils_misc.safe_rmdir(fs_dest, session=session) if setup_local_nfs: session.close() vm.destroy() for fs in params.objects("filesystems"): nfs_params = params.object_params(fs) params["export_dir"] = nfs_params.get("export_dir") params["nfs_mount_dir"] = nfs_params.get("fs_source_dir") params["rm_export_dir"] = nfs_params.get("export_dir") params["rm_mount_dir"] = nfs_params.get("fs_source_dir") nfs_local = nfs.Nfs(params) nfs_local.cleanup() utils_misc.safe_rmdir(params["export_dir"]) # during all virtio fs is mounted, reboot vm if params.get('reboot_guest', 'no') == 'yes': def get_vfsd_num(): """ Get virtiofsd daemon number during vm boot up. :return: virtiofsd daemon count. """ cmd_ps_virtiofsd = params.get('cmd_ps_virtiofsd') vfsd_num = 0 for device in vm.devices: if isinstance(device, qdevices.QVirtioFSDev): sock_path = device.get_param('sock_path') cmd_ps_virtiofsd = cmd_ps_virtiofsd % sock_path vfsd_ps = process.system_output(cmd_ps_virtiofsd, shell=True) vfsd_num += len(vfsd_ps.strip().splitlines()) return vfsd_num error_context.context("Check virtiofs daemon before reboot vm.", logging.info) vfsd_num_bf = get_vfsd_num() error_context.context("Reboot guest and check virtiofs daemon.", logging.info) vm.reboot() if not vm.is_alive(): test.fail("After rebooting vm quit unexpectedly.") vfsd_num_af = get_vfsd_num() if vfsd_num_bf != vfsd_num_af: test.fail("Virtiofs daemon is different before and after reboot.\n" "Before reboot: %s\n" "After reboot: %s\n", (vfsd_num_bf, vfsd_num_af))
if vm.is_alive() is not True: break vms.append(vm) # try to login and run some program try: session = vm.wait_for_login() except (LoginError, ShellError), e: error_msg = "Test failed in positive case.\n error: %s\n" % e raise error.TestFail(error_msg) sessions.append(session) if test_type == "stress": # prepare file for increasing stress stress_path = prepare_c_file() remote.scp_to_remote(vm.get_address(), 22, 'root', params.get('password'), stress_path, "/tmp/") # increasing workload session.cmd("gcc %s -o %s" % (stress_path, target_path)) session.cmd("%s &" % target_path) if test_type == "unixbench": params["main_vm"] = vm_name params["test_control_file"] = unixbench_control_file control_path = os.path.join(test.virtdir, "control", unixbench_control_file) command = utils_test.run_autotest(vm, session, control_path, None, None, params, copy_only=True) session.cmd("%s &" % command) # wait for autotest running on vm
def run(test, params, env): """ Test virtio-fs by sharing the data between host and guest. Steps: 1. Create shared directories on the host. 2. Run virtiofsd daemons on the host. 3. Boot a guest on the host with virtiofs options. 4. Log into guest then mount the virtiofs targets. 5. Generate files or run stress on the mount points inside guest. :param test: QEMU test object. :param params: Dictionary with the test parameters. :param env: Dictionary with test environment. """ cmd_dd = params.get('cmd_dd') cmd_md5 = params.get('cmd_md5') cmd_pjdfstest = params.get('cmd_pjdfstest') cmd_unpack = params.get('cmd_unpack') cmd_yum_deps = params.get('cmd_yum_deps') cmd_autoreconf = params.get('cmd_autoreconf') cmd_configure = params.get('cmd_configure') cmd_make = params.get('cmd_make') pjdfstest_pkg = params.get('pjdfstest_pkg') fio_options = params.get('fio_options') io_timeout = params.get_numeric('io_timeout') username = params.get('username') password = params.get('password') port = params.get('file_transfer_port') vm = env.get_vm(params.get("main_vm")) vm.verify_alive() session = vm.wait_for_login() host_addr = vm.get_address() for fs in params.objects("filesystems"): fs_params = params.object_params(fs) fs_target = fs_params.get("fs_target") fs_dest = fs_params.get("fs_dest") fs_source = fs_params.get("fs_source_dir") base_dir = fs_params.get("fs_source_base_dir", data_dir.get_data_dir()) if not os.path.isabs(fs_source): fs_source = os.path.join(base_dir, fs_source) guest_data = os.path.join(fs_dest, 'fs_test') host_data = os.path.join(fs_source, 'fs_test') error_context.context( "Create a destination directory %s " "inside guest." % fs_dest, logging.info) utils_misc.make_dirs(fs_dest, session) error_context.context( "Mount virtiofs target %s to %s inside guest." % (fs_target, fs_dest), logging.info) utils_disk.mount(fs_target, fs_dest, 'virtiofs', session=session) try: if cmd_dd: logging.info("Creating file under %s inside guest." % fs_dest) session.cmd(cmd_dd % guest_data, io_timeout) logging.info("Compare the md5 between guest and host.") md5_guest = session.cmd(cmd_md5 % guest_data, io_timeout).strip().split()[0] logging.info(md5_guest) md5_host = process.run( cmd_md5 % host_data, io_timeout).stdout_text.strip().split()[0] if md5_guest != md5_host: test.fail('The md5 value of host is not same to guest.') if fio_options: error_context.context("Run fio on %s." % fs_dest, logging.info) fio = generate_instance(params, vm, 'fio') try: fio.run(fio_options % guest_data, io_timeout) finally: fio.clean() vm.verify_dmesg() if cmd_pjdfstest: error_context.context("Run pjdfstest on %s." % fs_dest, logging.info) host_path = os.path.join(data_dir.get_deps_dir('pjdfstest'), pjdfstest_pkg) scp_to_remote(host_addr, port, username, password, host_path, fs_dest) session.cmd(cmd_unpack.format(fs_dest), 180) session.cmd(cmd_yum_deps, 180) session.cmd(cmd_autoreconf % fs_dest, 180) session.cmd(cmd_configure.format(fs_dest), 180) session.cmd(cmd_make % fs_dest, io_timeout) session.cmd(cmd_pjdfstest % fs_dest, io_timeout) finally: utils_disk.umount(fs_target, fs_dest, 'virtiofs', session=session) utils_misc.safe_rmdir(fs_dest, session=session)
if vm.is_alive() is not True: break vms.append(vm) # try to login and run some program try: session = vm.wait_for_login() except (LoginError, ShellError), e: error_msg = "Test failed in positive case.\n error: %s\n" % e raise error.TestFail(error_msg) sessions.append(session) if test_type == "stress": # prepare file for increasing stress stress_path = prepare_c_file() remote.scp_to_remote(vm.get_address(), 22, 'root', params.get('password'), stress_path, "/tmp/") # Try to install gcc on guest first utils_misc.yum_install(["gcc"], session, 360) # increasing workload session.cmd("gcc %s -o %s" % (stress_path, target_path)) session.cmd("%s &" % target_path) if test_type == "unixbench": params["main_vm"] = vm_name params["test_control_file"] = unixbench_control_file control_path = os.path.join(test.virtdir, "control", unixbench_control_file) command = utils_test.run_autotest(vm, session, control_path,
def run(test, params, env): """ Test steps: 1) Get the params from params. 2) check the environment 3) Strat the VM and check whether the VM been started successfully 4) Compare the Hugepage memory size to the Guest memory setted. 5) Check the hugepage memory usage. 6) Clean up """ test_type = params.get("test_type", 'normal') tlbfs_enable = 'yes' == params.get("hugetlbfs_enable", 'no') shp_num = int(params.get("static_hugepage_num", 1024)) thp_enable = 'yes' == params.get("trans_hugepage_enable", 'no') mb_enable = 'yes' == params.get("mb_enable", 'yes') delay = int(params.get("delay_time", 10)) # Skip cases early vm_names = [] if test_type == "contrast": vm_names = params.get("vms").split()[:2] if len(vm_names) < 2: raise error.TestNAError("This test requires two VMs") # confirm no VM running allvms = virsh.dom_list('--name').stdout.strip() if allvms != '': raise error.TestNAError("one or more VMs are alive") err_range = float(params.get("mem_error_range", 1.25)) else: vm_names.append(params.get("main_vm")) if test_type == "stress": target_path = params.get("target_path", "/tmp/test.out") elif test_type == "unixbench": unixbench_control_file = params.get("unixbench_controle_file", "unixbench5.control") # backup orignal setting shp_orig_num = utils_memory.get_num_huge_pages() thp_orig_status = utils_memory.get_transparent_hugepage() page_size = utils_memory.get_huge_page_size() # mount/umount hugetlbfs tlbfs_status = utils_misc.is_mounted("hugetlbfs", "/dev/hugepages", "hugetlbfs") if tlbfs_enable is True: if tlbfs_status is not True: utils_misc.mount("hugetlbfs", "/dev/hugepages", "hugetlbfs") else: if tlbfs_status is True: utils_misc.umount("hugetlbfs", "/dev/hugepages", "hugetlbfs") # set static hugepage utils_memory.set_num_huge_pages(shp_num) # enable/disable transparent hugepage if thp_enable: utils_memory.set_transparent_hugepage('always') else: utils_memory.set_transparent_hugepage('never') # set/del memoryBacking tag for vm_name in vm_names: if mb_enable: vm_xml.VMXML.set_memoryBacking_tag(vm_name) else: vm_xml.VMXML.del_memoryBacking_tag(vm_name) utils_libvirtd.libvirtd_restart() non_started_free = utils_memory.get_num_huge_pages_free() vms = [] sessions = [] try: for vm_name in vm_names: # try to start vm and login try: vm = env.get_vm(vm_name) vm.start() except VMError, e: if mb_enable and not tlbfs_enable: # if hugetlbfs not be mounted, # VM start with memoryBacking tag will fail logging.debug(e) else: error_msg = "Test failed in positive case. error: %s\n" % e raise error.TestFail(error_msg) if vm.is_alive() is not True: break vms.append(vm) # try to login and run some program try: session = vm.wait_for_login() except (LoginError, ShellError), e: error_msg = "Test failed in positive case.\n error: %s\n" % e raise error.TestFail(error_msg) sessions.append(session) if test_type == "stress": # prepare file for increasing stress stress_path = prepare_c_file() remote.scp_to_remote(vm.get_address(), 22, 'root', params.get('password'), stress_path, "/tmp/") # Try to install gcc on guest first utils_package.package_install(["gcc"], session, 360) # increasing workload session.cmd("gcc %s -o %s" % (stress_path, target_path)) session.cmd("%s &" % target_path) if test_type == "unixbench": params["main_vm"] = vm_name params["test_control_file"] = unixbench_control_file control_path = os.path.join(test.virtdir, "control", unixbench_control_file) # unixbench test need 'patch' and 'perl' commands installed utils_package.package_install(["patch", "perl"], session, 360) command = utils_test.run_autotest(vm, session, control_path, None, None, params, copy_only=True) session.cmd("%s &" % command, ignore_all_errors=True) # wait for autotest running on vm time.sleep(delay) def _is_unixbench_running(): cmd = "ps -ef | grep perl | grep Run" return not session.cmd_status(cmd) if not utils_misc.wait_for(_is_unixbench_running, timeout=240): raise error.TestNAError("Failed to run unixbench in guest," " please make sure some necessary" " packages are installed in guest," " such as gcc, tar, bzip2") logging.debug("Unixbench test is running in VM")
def scp_package(src, dst): """ Copy file from the host to the guest. """ scp_to_remote(vm.get_address(), '22', params.get('username'), params.get('password'), src, dst)
def run(test, params, env): """ Run Pktgen test between host/guest 1) Boot the main vm, or just grab it if it's already booted. 2) Configure pktgen server(only linux) 3) Run pktgen test, finish when timeout or env["pktgen_run"] != True :param test: QEMU test object. :param params: Dictionary with the test parameters. :param env: Dictionary with test environment. """ login_timeout = float(params.get("login_timeout", 360)) error.context("Init the VM, and try to login", logging.info) external_host = params.get("external_host") if not external_host: get_host_cmd = "ip route | awk '/default/ {print $3}'" external_host = utils.system_output(get_host_cmd) vm = env.get_vm(params["main_vm"]) vm.verify_alive() session = vm.wait_for_login(timeout=login_timeout) error.context("Pktgen server environment prepare", logging.info) # pktgen server only support linux, since pktgen is a linux kernel module pktgen_server = params.get("pktgen_server", "localhost") params_server = params.object_params("pktgen_server") s_shell_client = params_server.get("shell_client", "ssh") s_shell_port = params_server.get("shell_port", "22") s_username = params_server.get("username", "root") s_passwd = params_server.get("password", "123456") s_shell_prompt = params_server.get("shell_prompt") server_session = "" # pktgen server is autotest virtual guest(only linux) if pktgen_server in params.get("vms", "vm1 vm2"): vm_pktgen = env.get_vm(pktgen_server) vm_pktgen.verify_alive() server_session = vm_pktgen.wait_for_login(timeout=login_timeout) runner = server_session.cmd_output_safe pktgen_ip = vm_pktgen.get_address() pktgen_mac = vm_pktgen.get_mac_address() server_interface = utils_net.get_linux_ifname(server_session, pktgen_mac) # pktgen server is a external host assigned elif re.match(r"((\d){1,3}\.){3}(\d){1,3}", pktgen_server): pktgen_ip = pktgen_server server_session = remote.wait_for_login( s_shell_client, pktgen_ip, s_shell_port, s_username, s_passwd, s_shell_prompt ) runner = server_session.cmd_output_safe server_interface = params.get("server_interface") if not server_interface: raise error.TestNAError("Must config server interface before test") else: # using host as a pktgen server server_interface = params.get("netdst", "switch") host_nic = utils_net.Interface(server_interface) pktgen_ip = host_nic.get_ip() pktgen_mac = host_nic.get_mac() runner = utils.system # copy pktgen_test scipt to the test server. local_path = os.path.join(data_dir.get_root_dir(), "shared/scripts/pktgen.sh") remote_path = "/tmp/pktgen.sh" remote.scp_to_remote(pktgen_ip, s_shell_port, s_username, s_passwd, local_path, remote_path) error.context("Run pktgen test", logging.info) run_threads = params.get("pktgen_threads", 1) pktgen_stress_timeout = float(params.get("pktgen_test_timeout", 600)) exec_cmd = "%s %s %s %s %s" % (remote_path, vm.get_address(), vm.get_mac_address(), server_interface, run_threads) try: env["pktgen_run"] = True try: # Set a run flag in env, when other case call this case as a sub # backgroud process, can set run flag to False to stop this case. start_time = time.time() stop_time = start_time + pktgen_stress_timeout while env["pktgen_run"] and time.time() < stop_time: runner(exec_cmd, timeout=pktgen_stress_timeout) # using ping to kill the pktgen stress except aexpect.ShellTimeoutError: session.cmd("ping %s" % pktgen_ip, ignore_all_errors=True) finally: env["pktgen_run"] = False error.context("Verify Host and guest kernel no error and call trace", logging.info) vm.verify_kernel_crash() utils_misc.verify_host_dmesg() error.context("Ping external host after pktgen test", logging.info) status, output = utils_test.ping(dest=external_host, session=session, timeout=240, count=20) loss_ratio = utils_test.get_loss_ratio(output) if loss_ratio > int(params.get("packet_lost_ratio", 5)) or loss_ratio == -1: logging.debug("Ping %s output: %s" % (external_host, output)) raise error.TestFail("Guest network connction unusable," + "packet lost ratio is '%d%%'" % loss_ratio) if server_session: server_session.close() if session: session.close()
def run(test, params, env): """ Test virtio-fs by sharing the data between host and guest. Steps: 1. Create shared directories on the host. 2. Run virtiofsd daemons on the host. 3. Boot a guest on the host with virtiofs options. 4. Log into guest then mount the virtiofs targets. 5. Generate files or run stress on the mount points inside guest. :param test: QEMU test object. :param params: Dictionary with the test parameters. :param env: Dictionary with test environment. """ def get_viofs_exe(session): """ Get viofs.exe from virtio win iso,such as E:\viofs\2k19\amd64 """ media_type = params["virtio_win_media_type"] try: get_drive_letter = getattr(virtio_win, "drive_letter_%s" % media_type) get_product_dirname = getattr(virtio_win, "product_dirname_%s" % media_type) get_arch_dirname = getattr(virtio_win, "arch_dirname_%s" % media_type) except AttributeError: test.error("Not supported virtio win media type '%s'", media_type) viowin_ltr = get_drive_letter(session) if not viowin_ltr: test.error("Could not find virtio-win drive in guest") guest_name = get_product_dirname(session) if not guest_name: test.error("Could not get product dirname of the vm") guest_arch = get_arch_dirname(session) if not guest_arch: test.error("Could not get architecture dirname of the vm") exe_middle_path = ("{name}\\{arch}" if media_type == "iso" else "{arch}\\{name}").format(name=guest_name, arch=guest_arch) exe_file_name = "virtiofs.exe" exe_find_cmd = 'dir /b /s %s\\%s | findstr "\\%s\\\\"' exe_find_cmd %= (viowin_ltr, exe_file_name, exe_middle_path) exe_path = session.cmd(exe_find_cmd).strip() logging.info("Found exe file '%s'", exe_path) return exe_path # data io config cmd_dd = params.get('cmd_dd') cmd_md5 = params.get('cmd_md5') # pjdfs test config cmd_pjdfstest = params.get('cmd_pjdfstest') cmd_unpack = params.get('cmd_unpack') cmd_yum_deps = params.get('cmd_yum_deps') cmd_autoreconf = params.get('cmd_autoreconf') cmd_configure = params.get('cmd_configure') cmd_make = params.get('cmd_make') pjdfstest_pkg = params.get('pjdfstest_pkg') username = params.get('username') password = params.get('password') port = params.get('file_transfer_port') # fio config fio_options = params.get('fio_options') io_timeout = params.get_numeric('io_timeout') os_type = params.get("os_type") vm = env.get_vm(params.get("main_vm")) vm.verify_alive() session = vm.wait_for_login() host_addr = vm.get_address() if os_type == "windows": cmd_timeout = params.get_numeric("cmd_timeout", 120) driver_name = params["driver_name"] install_path = params["install_path"] check_installed_cmd = params["check_installed_cmd"] % install_path # Check whether windows driver is running,and enable driver verifier session = utils_test.qemu.windrv_check_running_verifier(session, vm, test, driver_name) # install winfsp tool error_context.context("Install winfsp for windows guest.", logging.info) installed = session.cmd_status(check_installed_cmd) == 0 if installed: logging.info("Winfsp tool is already installed.") else: install_cmd = utils_misc.set_winutils_letter(session, params["install_cmd"]) session.cmd(install_cmd, cmd_timeout) if not utils_misc.wait_for(lambda: not session.cmd_status( check_installed_cmd), 60): test.error("Winfsp tool is not installed.") for fs in params.objects("filesystems"): fs_params = params.object_params(fs) fs_target = fs_params.get("fs_target") fs_dest = fs_params.get("fs_dest") fs_source = fs_params.get("fs_source_dir") base_dir = fs_params.get("fs_source_base_dir", data_dir.get_data_dir()) if not os.path.isabs(fs_source): fs_source = os.path.join(base_dir, fs_source) host_data = os.path.join(fs_source, 'fs_test') if os_type == "linux": error_context.context("Create a destination directory %s " "inside guest." % fs_dest, logging.info) utils_misc.make_dirs(fs_dest, session) error_context.context("Mount virtiofs target %s to %s inside" " guest." % (fs_target, fs_dest), logging.info) utils_disk.mount(fs_target, fs_dest, 'virtiofs', session=session) else: error_context.context("Start virtiofs service in guest.", logging.info) exe_path = get_viofs_exe(session) start_vfs_cmd = params["start_vfs_cmd"] % exe_path session.sendline(start_vfs_cmd) error_context.context("Check if virtiofs service is started.", logging.info) check_virtiofs_cmd = params["check_virtiofs_cmd"] if not utils_misc.wait_for(lambda: re.search("virtiofs", session.cmd_output( check_virtiofs_cmd), re.IGNORECASE), 30): test.fail("Virtiofs service is failed to start.") virtio_fs_disk_label = fs_target error_context.context("Get Volume letter of virtio fs target, the disk" "lable is %s." % virtio_fs_disk_label, logging.info) vol_con = "VolumeName='%s'" % virtio_fs_disk_label vol_func = utils_misc.get_win_disk_vol(session, condition=vol_con) volume_letter = utils_misc.wait_for(lambda: vol_func, cmd_timeout) fs_dest = "%s:" % volume_letter guest_file = os.path.join(fs_dest, 'fs_test') logging.info("The guest file in shared dir is %s" % guest_file) try: if cmd_dd: logging.info("Creating file under %s inside guest." % fs_dest) session.cmd(cmd_dd % guest_file, io_timeout) if os_type == "linux": cmd_md5_vm = cmd_md5 % guest_file else: guest_file_win = guest_file.replace("/", "\\") cmd_md5_vm = cmd_md5 % (volume_letter, guest_file_win) md5_guest = session.cmd_output(cmd_md5_vm, io_timeout).strip().split()[0] logging.info(md5_guest) md5_host = process.run("md5sum %s" % host_data, io_timeout).stdout_text.strip().split()[0] if md5_guest != md5_host: test.fail('The md5 value of host is not same to guest.') if fio_options: error_context.context("Run fio on %s." % fs_dest, logging.info) fio = generate_instance(params, vm, 'fio') try: fio.run(fio_options % guest_file, io_timeout) finally: fio.clean() vm.verify_dmesg() if cmd_pjdfstest: error_context.context("Run pjdfstest on %s." % fs_dest, logging.info) host_path = os.path.join(data_dir.get_deps_dir('pjdfstest'), pjdfstest_pkg) scp_to_remote(host_addr, port, username, password, host_path, fs_dest) session.cmd(cmd_unpack.format(fs_dest), 180) session.cmd(cmd_yum_deps, 180) session.cmd(cmd_autoreconf % fs_dest, 180) session.cmd(cmd_configure.format(fs_dest), 180) session.cmd(cmd_make % fs_dest, io_timeout) session.cmd(cmd_pjdfstest % fs_dest, io_timeout) finally: if os_type == "linux": utils_disk.umount(fs_target, fs_dest, 'virtiofs', session=session) utils_misc.safe_rmdir(fs_dest, session=session)
def run(test, params, env): """ Test steps: 1) Get the params from params. 2) check the environment 3) Strat the VM and check whether the VM been started successfully 4) Compare the Hugepage memory size to the Guest memory setted. 5) Check the hugepage memory usage. 6) Clean up """ test_type = params.get("test_type", 'normal') tlbfs_enable = 'yes' == params.get("hugetlbfs_enable", 'no') shp_num = int(params.get("static_hugepage_num", 1024)) thp_enable = 'yes' == params.get("trans_hugepage_enable", 'no') mb_enable = 'yes' == params.get("mb_enable", 'yes') delay = int(params.get("delay_time", 10)) # Skip cases early vm_names = [] if test_type == "contrast": vm_names = params.get("vms").split()[:2] if len(vm_names) < 2: test.cancel("This test requires two VMs") # confirm no VM running allvms = virsh.dom_list('--name').stdout.strip() if allvms != '': test.cancel("one or more VMs are alive") err_range = float(params.get("mem_error_range", 1.25)) else: vm_names.append(params.get("main_vm")) if test_type == "stress": target_path = params.get("target_path", "/tmp/test.out") elif test_type == "unixbench": unixbench_control_file = params.get("unixbench_controle_file", "unixbench5.control") # backup orignal setting shp_orig_num = utils_memory.get_num_huge_pages() thp_orig_status = utils_memory.get_transparent_hugepage() page_size = utils_memory.get_huge_page_size() # mount/umount hugetlbfs tlbfs_status = utils_misc.is_mounted("hugetlbfs", "/dev/hugepages", "hugetlbfs") if tlbfs_enable is True: if tlbfs_status is not True: utils_misc.mount("hugetlbfs", "/dev/hugepages", "hugetlbfs") else: if tlbfs_status is True: utils_misc.umount("hugetlbfs", "/dev/hugepages", "hugetlbfs") # set static hugepage utils_memory.set_num_huge_pages(shp_num) # enable/disable transparent hugepage if thp_enable: utils_memory.set_transparent_hugepage('always') else: utils_memory.set_transparent_hugepage('never') # set/del memoryBacking tag for vm_name in vm_names: if mb_enable: vm_xml.VMXML.set_memoryBacking_tag(vm_name) else: vm_xml.VMXML.del_memoryBacking_tag(vm_name) utils_libvirtd.libvirtd_restart() non_started_free = utils_memory.get_num_huge_pages_free() vms = [] sessions = [] try: for vm_name in vm_names: # try to start vm and login try: vm = env.get_vm(vm_name) vm.start() except VMError as e: if mb_enable and not tlbfs_enable: # if hugetlbfs not be mounted, # VM start with memoryBacking tag will fail logging.debug(e) else: error_msg = "Test failed in positive case. error: %s\n" % e test.fail(error_msg) if vm.is_alive() is not True: break vms.append(vm) # try to login and run some program try: session = vm.wait_for_login() except (LoginError, ShellError) as e: error_msg = "Test failed in positive case.\n error: %s\n" % e test.fail(error_msg) sessions.append(session) if test_type == "stress": # prepare file for increasing stress stress_path = prepare_c_file() remote.scp_to_remote(vm.get_address(), 22, 'root', params.get('password'), stress_path, "/tmp/") # Try to install gcc on guest first utils_package.package_install(["gcc"], session, 360) # increasing workload session.cmd("gcc %s -o %s" % (stress_path, target_path)) session.cmd("%s &" % target_path) if test_type == "unixbench": params["main_vm"] = vm_name params["test_control_file"] = unixbench_control_file control_path = os.path.join(test.virtdir, "control", unixbench_control_file) # unixbench test need 'patch' and 'perl' commands installed utils_package.package_install(["patch", "perl"], session, 360) command = utils_test.run_autotest(vm, session, control_path, None, None, params, copy_only=True) session.cmd("%s &" % command, ignore_all_errors=True) # wait for autotest running on vm time.sleep(delay) def _is_unixbench_running(): cmd = "ps -ef | grep perl | grep Run" return not session.cmd_status(cmd) if not utils_misc.wait_for(_is_unixbench_running, timeout=240): test.cancel("Failed to run unixbench in guest," " please make sure some necessary" " packages are installed in guest," " such as gcc, tar, bzip2") logging.debug("Unixbench test is running in VM") if test_type == "contrast": # wait for vm finish starting completely time.sleep(delay) if not (mb_enable and not tlbfs_enable): logging.debug("starting analyzing the hugepage usage...") pid = vms[-1].get_pid() started_free = utils_memory.get_num_huge_pages_free() # Get the thp usage from /proc/pid/smaps started_anon = utils_memory.get_num_anon_huge_pages(pid) static_used = non_started_free - started_free hugepage_used = static_used * page_size if test_type == "contrast": # get qemu-kvm memory consumption by top cmd = "top -b -n 1|awk '$1 == %s {print $10}'" % pid rate = process.run(cmd, ignore_status=False, verbose=True, shell=True).stdout_text.strip() qemu_kvm_used = (utils_memory.memtotal() * float(rate)) / 100 logging.debug("rate: %s, used-by-qemu-kvm: %f, used-by-vm: %d", rate, qemu_kvm_used, hugepage_used) if abs(qemu_kvm_used - hugepage_used) > hugepage_used * (err_range - 1): test.fail("Error for hugepage usage") if test_type == "stress": if non_started_free <= started_free: logging.debug("hugepage usage:%d -> %d", non_started_free, started_free) test.fail("Error for hugepage usage with stress") if mb_enable is not True: if static_used > 0: test.fail("VM use static hugepage without" " memoryBacking element") if thp_enable is not True and started_anon > 0: test.fail("VM use transparent hugepage, while" " it's disabled") else: if tlbfs_enable is not True: if static_used > 0: test.fail("VM use static hugepage without tlbfs" " mounted") if thp_enable and started_anon <= 0: test.fail("VM doesn't use transparent" " hugepage") else: if shp_num > 0: if static_used <= 0: test.fail("VM doesn't use static" " hugepage") else: if static_used > 0: test.fail("VM use static hugepage," " while it's set to zero") if thp_enable is not True: if started_anon > 0: test.fail("VM use transparent hugepage," " while it's disabled") else: if shp_num == 0 and started_anon <= 0: test.fail("VM doesn't use transparent" " hugepage, while static" " hugepage is disabled") finally: # end up session for session in sessions: session.close() for vm in vms: if vm.is_alive(): vm.destroy() for vm_name in vm_names: if mb_enable: vm_xml.VMXML.del_memoryBacking_tag(vm_name) else: vm_xml.VMXML.set_memoryBacking_tag(vm_name) utils_libvirtd.libvirtd_restart() if tlbfs_enable is True: if tlbfs_status is not True: utils_misc.umount("hugetlbfs", "/dev/hugepages", "hugetlbfs") else: if tlbfs_status is True: utils_misc.mount("hugetlbfs", "/dev/hugepages", "hugetlbfs") utils_memory.set_num_huge_pages(shp_orig_num) utils_memory.set_transparent_hugepage(thp_orig_status)
def scp_to_remote(local_path="", remote_path=""): remote.scp_to_remote(dsthost, 22, "root", passwd, local_path, remote_path) vm.copy_files_to(local_path, remote_path)
def prepare_ceph_disk(ceph_params, remote_virsh_dargs, test, runner_on_target): """ Prepare one image on remote ceph server with enabled or disabled auth And expose it to VM by network access :param ceph_params: parameter to setup ceph. :param remote_virsh_dargs: parameter to remote virsh. :param test: test itself. """ # Ceph server config parameters virsh_dargs = {'debug': True, 'ignore_status': True} prompt = ceph_params.get("prompt", r"[\#\$]\s*$") ceph_disk = "yes" == ceph_params.get("ceph_disk") mon_host = ceph_params.get('mon_host') client_name = ceph_params.get('client_name') client_key = ceph_params.get("client_key") vol_name = ceph_params.get("vol_name") disk_img = ceph_params.get("disk_img") key_file = ceph_params.get("key_file") disk_format = ceph_params.get("disk_format") key_opt = "" # Auth and secret config parameters. auth_user = ceph_params.get("auth_user") auth_key = ceph_params.get("auth_key") auth_type = ceph_params.get("auth_type") auth_usage = ceph_params.get("secret_usage") secret_uuid = ceph_params.get("secret_uuid") # Remote host parameters. remote_ip = ceph_params.get("server_ip") remote_user = ceph_params.get("server_user", "root") remote_pwd = ceph_params.get("server_pwd") # Clean up dirty secrets in test environments if there are. dirty_secret_list = get_secret_list() if dirty_secret_list: for dirty_secret_uuid in dirty_secret_list: virsh.secret_undefine(dirty_secret_uuid) # Install ceph-common package which include rbd command if utils_package.package_install(["ceph-common"]): if client_name and client_key: # Clean up dirty secrets on remote host. try: remote_virsh = virsh.VirshPersistent(**remote_virsh_dargs) remote_dirty_secret_list = get_secret_list(remote_virsh) for dirty_secret_uuid in remote_dirty_secret_list: remote_virsh.secret_undefine(dirty_secret_uuid) except (process.CmdError, remote.SCPError) as detail: raise exceptions.TestError(detail) finally: logging.debug('clean up secret on remote host') remote_virsh.close_session() with open(key_file, 'w') as f: f.write("[%s]\n\tkey = %s\n" % (client_name, client_key)) key_opt = "--keyring %s" % key_file # Create secret xml sec_xml = secret_xml.SecretXML("no", "no") sec_xml.usage = auth_type sec_xml.usage_name = auth_usage sec_xml.uuid = secret_uuid sec_xml.xmltreefile.write() logging.debug("Secret xml: %s", sec_xml) ret = virsh.secret_define(sec_xml.xml) libvirt.check_exit_status(ret) secret_uuid = re.findall(r".+\S+(\ +\S+)\ +.+\S+", ret.stdout.strip())[0].lstrip() logging.debug("Secret uuid %s", secret_uuid) if secret_uuid is None: test.fail("Failed to get secret uuid") # Set secret value ret = virsh.secret_set_value(secret_uuid, auth_key, **virsh_dargs) libvirt.check_exit_status(ret) # Create secret on remote host. local_path = sec_xml.xml remote_path = '/var/lib/libvirt/images/new_secret.xml' remote_folder = '/var/lib/libvirt/images' cmd = 'mkdir -p %s && chmod 777 %s && touch %s' % ( remote_folder, remote_folder, remote_path) cmd_result = remote.run_remote_cmd(cmd, ceph_params, runner_on_target) status, output = cmd_result.exit_status, cmd_result.stdout_text.strip( ) if status: test.fail("Failed to run '%s' on the remote: %s" % (cmd, output)) remote.scp_to_remote(remote_ip, '22', remote_user, remote_pwd, local_path, remote_path, limit="", log_filename=None, timeout=600, interface=None) cmd = "/usr/bin/virsh secret-define --file %s" % remote_path cmd_result = remote.run_remote_cmd(cmd, ceph_params, runner_on_target) status, output = cmd_result.exit_status, cmd_result.stdout_text.strip( ) if status: test.fail("Failed to run '%s' on the remote: %s" % (cmd, output)) # Set secret value on remote host. cmd = "/usr/bin/virsh secret-set-value --secret %s --base64 %s" % ( secret_uuid, auth_key) cmd_result = remote.run_remote_cmd(cmd, ceph_params, runner_on_target) status, output = cmd_result.exit_status, cmd_result.stdout_text.strip( ) if status: test.fail("Failed to run '%s' on the remote: %s" % (cmd, output)) # Delete the disk if it exists disk_src_name = "%s/%s" % (vol_name, disk_img) cmd = ("rbd -m {0} {1} info {2} && rbd -m {0} {1} rm " "{2}".format(mon_host, key_opt, disk_src_name)) process.run(cmd, ignore_status=True, shell=True) # Convert the disk format first_disk_device = ceph_params.get('first_disk') blk_source = first_disk_device['source'] disk_path = ("rbd:%s:mon_host=%s" % (disk_src_name, mon_host)) if auth_user and auth_key: disk_path += (":id=%s:key=%s" % (auth_user, auth_key)) disk_cmd = ("rbd -m %s %s info %s || qemu-img convert" " -O %s %s %s" % (mon_host, key_opt, disk_src_name, disk_format, blk_source, disk_path)) process.run(disk_cmd, ignore_status=False, shell=True) return (key_opt, secret_uuid)
def run(test, params, env): """ Test basic QinQ - 10 * 4096 with bridge backend 1) Create a private bridge 2) Boot a VM over private bridge 3) Create interfaces in guest with qinq.sh 4) Set IP on guest L1 interface and bring this interface on 5) Create 802.1ad interface on host with the private bridge 6) Start tcpdump on host 7) Do ping test 8) Check tcpdump result with vlan tag and ethertype 9) Set IP on guest L2 interface and bring this interface on 10) Create 802.1q interface on host with the 802.1ad interface 11) Start tcpdump on host 12) Do ping test 13) Check tcpdump result with vlan tag and ethertype 14) SCP file transfer between host and guest :param test: KVM test object :param params: Dictionary with the test parameters :param env: Dictionary with test environment. """ def copy_qinq_file(vm, guest_qinq_dir): """ Copy qinq file from host to guest :param vm: guest vm :param guest_qinq_dir: qing script dir in guest """ error_context.context("Copy qinq script to guest", logging.info) host_qinq_dir = os.path.join(data_dir.get_deps_dir(), params.get("copy_qinq_script")) vm.copy_files_to(host_qinq_dir, guest_qinq_dir) def check_tcpdump_result(session, iface_name, ethertype, ethertype2=None, vlan_tag=None, vlan_tag2=None, enable_logging=False): """ Check tcpdump result. :param session: guest session :param iface_name: the tcpdump file of the interface :param ethertype: ethertype value need to be matched :param ethertype2: ethertype value 2 needed to be matched if not None :param vlan_tag: vlan tag value needed to be matched if not None :param vlan_tag2: vlan tag value 2 needed to be matched if not None :param enable_logging: whether to dump tcpdump results during test """ get_tcpdump_log_cmd = params["get_tcpdump_log_cmd"] % iface_name tcpdump_content = session.cmd_output(get_tcpdump_log_cmd, timeout=300).strip() lines = tcpdump_content.splitlines() sum = 0 for i in range(len(lines)): if enable_logging: logging.info("line %s: %s", i, lines[i]) if not ethertype2: if "ICMP echo re" in lines[i] and \ ethertype in lines[i-1]: sum += 1 if vlan_tag and vlan_tag not in lines[i - 1]: if "too much work for irq" in lines[i - 1]: continue else: test.fail( "in %s tcpdump log, there should be vlan " "tag %s" % (iface_name, vlan_tag)) elif not vlan_tag: if "vlan" in lines[i - 1]: test.fail("in %s tcpdump log, there should not be " "vlan tag" % iface_name) else: if "ICMP echo re" in lines[i] and \ ethertype in lines[i-1] and \ ethertype2 in lines[i-1]: sum += 1 if vlan_tag not in lines[i-1] or \ vlan_tag2 not in lines[i-1]: if "too much work for irq" in lines[i - 1]: continue else: test.fail( "in %s tcpdump log, there should be vlan " "tag %s" % (iface_name, vlan_tag)) if sum == 0: test.fail("in %s tcpdump log, ethertype is not %s" % (iface_name, ethertype)) def compare_host_guest_md5sum(): """ Compare md5 value of file on host and guest :param name: file name """ logging.info("Comparing md5sum on guest and host") host_result = crypto.hash_file(host_path, algorithm="md5") try: output = session.cmd_output("md5sum %s" % guest_path, 120).split()[0] guest_result = re.findall(r"\w+", output)[0] except IndexError: logging.error("Could not get file md5sum in guest") return False logging.debug("md5sum: guest(%s), host(%s)", guest_result, host_result) return guest_result == host_result if params["netdst"] not in utils_net.Bridge().list_br(): test.cancel("Only support Linux bridge") login_timeout = int(params.get("login_timeout", "600")) vm = env.get_vm(params["main_vm"]) vm.verify_alive() session = vm.wait_for_login(timeout=login_timeout) guest_qinq_dir = params["guest_qinq_dir"] copy_qinq_file(vm, guest_qinq_dir) session.close() vm.destroy(gracefully=True) brname = params.get("private_bridge", "tmpbr") host_bridges = utils_net.Bridge() if brname in host_bridges.list_br(): utils_net.Interface(brname).down() host_bridges.del_bridge(brname) set_ip_cmd = params["set_ip_cmd"] logging.debug("Create private bridge %s", brname) host_bridges.add_bridge(brname) host_bridge_iface = utils_net.Interface(brname) logging.debug("Bring up %s", brname) process.system(set_ip_cmd % ("192.168.1.1", brname)) host_bridge_iface.up() try: login_timeout = int(params.get("login_timeout", "600")) params['netdst'] = brname params["start_vm"] = "yes" params["image_snapshot"] = "yes" env_process.preprocess_vm(test, params, env, params["main_vm"]) vm = env.get_vm(params["main_vm"]) vm.verify_alive() session = vm.wait_for_serial_login(timeout=login_timeout) stop_NM_cmd = params.get("stop_NM_cmd") session.cmd(stop_NM_cmd, ignore_all_errors=True) mac = vm.get_mac_address() nic_name = utils_net.get_linux_ifname(session, mac) # Set first_nic IP in guest ip = params["ip_vm"] session.cmd_output(set_ip_cmd % (ip, nic_name)) # Create vlans via script qinq.sh output = session.cmd_output("sh %sqinq.sh %s" % (guest_qinq_dir, nic_name), timeout=300) logging.info("%s", output) # Set interface v1v10 IP in guest L1tag_iface = params["L1tag_iface"] L1tag_iface_ip = params["L1tag_iface_ip"] session.cmd_output(set_ip_cmd % (L1tag_iface_ip, L1tag_iface)) session.cmd("ip link set %s up" % L1tag_iface) output = session.cmd_output("ip addr show %s" % L1tag_iface, timeout=120) logging.info(output) # Start tcpdump on L1tag interface and first_nic in guest error_context.context("Start tcpdump in %s" % params["main_vm"], logging.info) L1tag_tcpdump_log = params.get("tcpdump_log") % L1tag_iface L1tag_tcpdump_cmd = params.get("tcpdump_cmd") % (L1tag_iface, L1tag_tcpdump_log) first_nic_tcpdump_log = params.get("tcpdump_log") % nic_name first_nic_tcpdump_cmd = params.get("tcpdump_cmd") % ( nic_name, first_nic_tcpdump_log) session.sendline(L1tag_tcpdump_cmd) time.sleep(2) session.sendline(first_nic_tcpdump_cmd) time.sleep(5) # Create 802.1ad vlan via bridge in host error_context.context("Create 802.1ad vlan via bridge %s" % brname, logging.info) advlan_ifname = params["advlan_name"] add_advlan_cmd = params["add_advlan_cmd"] process.system_output(add_advlan_cmd) advlan_iface = utils_net.Interface(advlan_ifname) advlan_iface.set_mac(params["advlan_mac"]) process.system(set_ip_cmd % (params["advlan_ip"], advlan_ifname)) advlan_iface.up() output = process.getoutput("ip addr show %s" % advlan_ifname) logging.info(output) # Ping guest from host via 802.1ad vlan interface error_context.context( "Start ping test from host to %s via %s" % (L1tag_iface_ip, advlan_ifname), logging.info) ping_count = int(params.get("ping_count")) status, output = utils_net.ping(L1tag_iface_ip, ping_count, interface=advlan_ifname, timeout=float(ping_count) * 1.5) if status != 0: test.fail("Ping returns non-zero value %s" % output) package_lost = utils_test.get_loss_ratio(output) if package_lost != 0: test.fail("%s packeage lost when ping guest ip %s " % (package_lost, L1tag_iface_ip)) # Stop tcpdump and check result session.cmd_output_safe("pkill tcpdump") check_tcpdump_result(session, L1tag_iface, "ethertype IPv4 (0x0800)") check_tcpdump_result(session, nic_name, "ethertype 802.1Q-QinQ (0x88a8)", vlan_tag="vlan 10,") # Set IP on L2 tag on the guest interface with vid 20 L2tag_iface = params["L2tag_iface"] L2tag_iface_ip = params["L2tag_iface_ip"] session.cmd_output(set_ip_cmd % (L2tag_iface_ip, L2tag_iface)) session.cmd("ip link set %s up" % L2tag_iface) output = session.cmd_output("ip addr show %s" % L2tag_iface, timeout=120) logging.info(output) # Start tcpdump on L1tag and L2tag interfaces and first_nic in guest error_context.context("Start tcpdump in %s" % params["main_vm"], logging.info) L2tag_tcpdump_log = params.get("tcpdump_log") % L2tag_iface L2tag_tcpdump_cmd = params.get("tcpdump_cmd") % (L2tag_iface, L2tag_tcpdump_log) session.sendline(L1tag_tcpdump_cmd) time.sleep(2) session.sendline(L2tag_tcpdump_cmd) time.sleep(2) session.sendline(first_nic_tcpdump_cmd) time.sleep(5) # Create 802.1q vlan via 802.1ad vlan in host error_context.context( "Create 802.1q vlan via 802.1ad vlan %s" % advlan_ifname, logging.info) qvlan_ifname = params["qvlan_name"] add_qvlan_cmd = params["add_qvlan_cmd"] process.system_output(add_qvlan_cmd) qvlan_iface = utils_net.Interface(qvlan_ifname) process.system(set_ip_cmd % (params["qvlan_ip"], qvlan_ifname)) qvlan_iface.up() output = process.getoutput("ip addr show %s" % qvlan_ifname) logging.info(output) # Ping guest from host via 802.1q vlan interface error_context.context( "Start ping test from host to %s via %s" % (L2tag_iface_ip, qvlan_ifname), logging.info) status, output = utils_net.ping(L2tag_iface_ip, ping_count, interface=qvlan_ifname, timeout=float(ping_count) * 1.5) if status != 0: test.fail("Ping returns non-zero value %s" % output) package_lost = utils_test.get_loss_ratio(output) if package_lost >= 5: test.fail("%s packeage lost when ping guest ip %s " % (package_lost, L2tag_iface_ip)) # Stop tcpdump and check result session.cmd_output_safe("pkill tcpdump") check_tcpdump_result(session, L1tag_iface, "ethertype 802.1Q (0x8100)", vlan_tag="vlan 20,") check_tcpdump_result(session, L2tag_iface, "ethertype IPv4 (0x0800)") check_tcpdump_result(session, nic_name, ethertype="ethertype 802.1Q-QinQ (0x88a8)", ethertype2="ethertype 802.1Q", vlan_tag="vlan 10,", vlan_tag2="vlan 20,") # scp file to guest with L2 vlan tag file_size = int(params.get("file_size", "4096")) host_path = os.path.join(test.tmpdir, "transferred_file") guest_path = params.get("guest_path", "/var/tmp/transferred_file") transfer_timeout = int(params.get("transfer_timeout", 1000)) cmd = "dd if=/dev/zero of=%s bs=1M count=%d" % (host_path, file_size) error_context.context("Creating %dMB file on host" % file_size, logging.info) process.run(cmd) error_context.context( "Transferring file host -> guest, " "timeout: %ss" % transfer_timeout, logging.info) shell_port = int(params.get("shell_port", 22)) password = params["password"] username = params["username"] remote.scp_to_remote(L2tag_iface_ip, shell_port, username, password, host_path, guest_path) if not compare_host_guest_md5sum(): test.fail("md5sum mismatch on guest and host") finally: session.cmd("rm -rf %s" % guest_path) session.close() vm.destroy(gracefully=True) host_bridge_iface.down() host_bridges.del_bridge(brname)
def run(test, params, env): """ Test migration with special network settings 1) migrate guest with bridge type interface connected to ovs bridge 2) migrate guest with direct type interface when a macvtap device name exists on dest host :param test: test object :param params: Dictionary with the test parameters :param env: Dictionary with test environment. """ def check_vm_network_accessed(ping_dest, session=None): """ The operations to the VM need to be done before or after migration happens :param ping_dest: The destination to be ping :param session: The session object to the host :raise: test.fail when ping fails """ # Confirm local/remote VM can be accessed through network. logging.info("Check VM network connectivity") status, output = utils_net.ping(ping_dest, count=10, timeout=20, output_func=logging.debug, session=session) if status != 0: test.fail("Ping failed, status: %s, output: %s" % (status, output)) def vm_sync(vmxml, vm_name=None, virsh_instance=virsh): """ A wrapper to sync vm xml on localhost and remote host :param vmxml: domain VMXML instance :param vm_name: The name of VM :param virsh_instance: virsh instance object """ if vm_name and virsh_instance != virsh: remote.scp_to_remote(server_ip, '22', server_user, server_pwd, vmxml.xml, vmxml.xml) if virsh_instance.domain_exists(vm_name): if virsh_instance.is_alive(vm_name): virsh_instance.destroy(vm_name, ignore_status=True) virsh_instance.undefine(vmxml.xml, ignore_status=True) virsh_instance.define(vmxml.xml, debug=True) else: vmxml.sync() def update_iface_xml(vm_name, iface_dict, virsh_instance=virsh): """ Update interfaces for guest :param vm_name: The name of VM :param iface_dict: The interface configurations params :param virsh_instance: virsh instance object """ logging.debug("update iface xml") vmxml = vm_xml.VMXML.new_from_inactive_dumpxml( vm_name, virsh_instance=virsh_instance) vmxml.remove_all_device_by_type('interface') vm_sync(vmxml, vm_name, virsh_instance=virsh_instance) iface = interface.Interface('network') iface.xml = libvirt.modify_vm_iface(vm_name, "get_xml", iface_dict, virsh_instance=virsh_instance) vmxml.add_device(iface) vmxml.xmltreefile.write() vm_sync(vmxml, vm_name, virsh_instance=virsh_instance) logging.debug("VM XML after updating interface: %s" % vmxml) def update_net_dict(net_dict, runner=utils_net.local_runner): """ Update network dict :param net_dict: The network dict to be updated :param runner: Command runner :return: Updated network dict """ if net_dict.get("net_name", "") == "direct-macvtap": logging.info("Updating network iface name") iface_name = utils_net.get_net_if(runner=runner, state="UP")[0] net_dict.update({"forward_iface": iface_name}) else: # TODO: support other types logging.info("No need to update net_dict. We only support to " "update direct-macvtap type for now.") logging.debug("net_dict is %s" % net_dict) return net_dict def get_remote_direct_mode_vm_mac(vm_name, uri): """ Get mac of remote direct mode VM :param vm_name: The name of VM :param uri: The uri on destination :return: mac :raise: test.fail when the result of virsh domiflist is incorrect """ vm_mac = None res = virsh.domiflist( vm_name, uri=uri, ignore_status=False).stdout_text.strip().split("\n") if len(res) < 2: test.fail("Unable to get remote VM's mac: %s" % res) else: vm_mac = res[-1].split()[-1] return vm_mac def create_fake_tap(remote_session): """ Create a fake macvtap on destination host. :param remote_session: The session to the destination host. :return: The new tap device """ tap_cmd = "ls /dev/tap* |awk -F 'tap' '{print $NF}'" tap_idx = remote_session.cmd_output(tap_cmd).strip() if not tap_idx: test.fail("Unable to get tap index using %s." % tap_cmd) fake_tap_dest = 'tap'+str(int(tap_idx)+1) logging.debug("creating a fake tap %s...", fake_tap_dest) cmd = "touch /dev/%s" % fake_tap_dest remote_session.cmd(cmd) return fake_tap_dest migration_test = migration.MigrationTest() migration_test.check_parameters(params) libvirt_version.is_libvirt_feature_supported(params) # Params to update disk using shared storage params["disk_type"] = "file" params["disk_source_protocol"] = "netfs" params["mnt_path_name"] = params.get("nfs_mount_dir") # Local variables virsh_args = {"debug": True} server_ip = params.get("server_ip") server_user = params.get("server_user", "root") server_pwd = params.get("server_pwd") client_ip = params.get("client_ip") client_pwd = params.get("client_pwd") virsh_options = params.get("virsh_options", "") extra = params.get("virsh_migrate_extra") options = params.get("virsh_migrate_options", "--live --p2p --verbose") restart_dhclient = params.get("restart_dhclient", "dhclient -r; dhclient") ping_dest = params.get("ping_dest", "www.baidu.com") extra_args = migration_test.update_virsh_migrate_extra_args(params) migrate_vm_back = "yes" == params.get("migrate_vm_back", "no") target_vm_name = params.get("target_vm_name") direct_mode = "yes" == params.get("direct_mode", "no") check_macvtap_exists = "yes" == params.get("check_macvtap_exists", "no") create_fake_tap_dest = "yes" == params.get("create_fake_tap_dest", "no") macvtap_cmd = params.get("macvtap_cmd") modify_target_vm = "yes" == params.get("modify_target_vm", "no") ovs_bridge_name = params.get("ovs_bridge_name") network_dict = eval(params.get("network_dict", '{}')) iface_dict = eval(params.get("iface_dict", '{}')) remote_virsh_dargs = {'remote_ip': server_ip, 'remote_user': server_user, 'remote_pwd': server_pwd, 'unprivileged_user': None, 'ssh_remote_auth': True} cmd_parms = {'server_ip': server_ip, 'server_user': server_user, 'server_pwd': server_pwd} virsh_session_remote = None libvirtd_conf = None mig_result = None target_org_xml = None target_vm_session = None target_vm = None exp_macvtap = [] fake_tap_dest = None # params for migration connection params["virsh_migrate_desturi"] = libvirt_vm.complete_uri( params.get("migrate_dest_host")) params["virsh_migrate_connect_uri"] = libvirt_vm.complete_uri( params.get("migrate_source_host")) src_uri = params.get("virsh_migrate_connect_uri") dest_uri = params.get("virsh_migrate_desturi") vm_name = params.get("migrate_main_vm") vm = env.get_vm(vm_name) vm.verify_alive() bk_uri = vm.connect_uri postcopy_options = params.get("postcopy_options") action_during_mig = None if postcopy_options: extra = "%s %s" % (extra, postcopy_options) action_during_mig = virsh.migrate_postcopy # For safety reasons, we'd better back up xmlfile. new_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) orig_config_xml = new_xml.copy() try: # Create a remote runner for later use runner_on_target = remote.RemoteRunner(host=server_ip, username=server_user, password=server_pwd) remote_session = remote.remote_login("ssh", server_ip, "22", server_user, server_pwd, r'[$#%]') virsh_session_remote = virsh.VirshPersistent(**remote_virsh_dargs) if target_vm_name: target_vm = libvirt_vm.VM(target_vm_name, params, vm.root_dir, vm.address_cache) target_vm.connect_uri = dest_uri if not virsh_session_remote.domain_exists(target_vm_name): test.error("VM %s should be installed on %s." % (target_vm_name, server_ip)) # Backup guest's xml on remote target_org_xml = vm_xml.VMXML.new_from_inactive_dumpxml( target_vm_name, virsh_instance=virsh_session_remote) # Scp original xml to remote for restoration remote.scp_to_remote(server_ip, '22', server_user, server_pwd, target_org_xml.xml, target_org_xml.xml) logging.debug("target xml is %s" % target_org_xml) if ovs_bridge_name: status, stdout = utils_net.create_ovs_bridge(ovs_bridge_name) if status: test.fail("Failed to create ovs bridge on local. Status: %s" "Stdout: %s" % (status, stdout)) status, stdout = utils_net.create_ovs_bridge( ovs_bridge_name, session=remote_session) if status: test.fail("Failed to create ovs bridge on remote. Status: %s" "Stdout: %s" % (status, stdout)) if network_dict: update_net_dict(network_dict, runner=remote_session.cmd) libvirt_network.create_or_del_network( network_dict, remote_args=remote_virsh_dargs) logging.info("dest: network created") update_net_dict(network_dict) libvirt_network.create_or_del_network(network_dict) logging.info("localhost: network created") if target_vm_name: if modify_target_vm and iface_dict: logging.info("Updating remote VM's interface") update_iface_xml(target_vm_name, iface_dict, virsh_instance=virsh_session_remote) target_vm.start() target_vm_session = target_vm.wait_for_serial_login(timeout=240) check_vm_network_accessed(ping_dest, session=target_vm_session) if check_macvtap_exists and macvtap_cmd: # Get macvtap device's index on remote after target_vm started idx = remote_session.cmd_output(macvtap_cmd).strip() if not idx: test.fail("Unable to get macvtap index using %s." % macvtap_cmd) # Generate the expected macvtap devices' index list exp_macvtap = ['macvtap'+idx, 'macvtap'+str(int(idx)+1)] if create_fake_tap_dest: fake_tap_dest = create_fake_tap(remote_session) remote_session.close() # Change domain network xml if iface_dict: if "mac" not in iface_dict: mac = utils_net.generate_mac_address_simple() iface_dict.update({'mac': mac}) else: mac = iface_dict["mac"] update_iface_xml(vm_name, iface_dict) # Change the disk of the vm libvirt.set_vm_disk(vm, params) if not vm.is_alive(): try: vm.start() except virt_vm.VMStartError as err: test.fail("Failed to start VM: %s" % err) logging.debug("Guest xml after starting:\n%s", vm_xml.VMXML.new_from_dumpxml(vm_name)) # Check local guest network connection before migration if vm.serial_console is not None: vm.cleanup_serial_console() vm.create_serial_console() vm_session = vm.wait_for_serial_login(timeout=240) if not utils_package.package_install('dhcp-client', session=vm_session): test.error("Failed to install dhcp-client on guest.") utils_net.restart_guest_network(vm_session) vm_ip = utils_net.get_guest_ip_addr(vm_session, mac) logging.debug("VM IP Addr: %s", vm_ip) if direct_mode: check_vm_network_accessed(ping_dest, session=vm_session) else: check_vm_network_accessed(vm_ip) # Execute migration process vms = [vm] migration_test.do_migration(vms, None, dest_uri, 'orderly', options, thread_timeout=900, ignore_status=True, virsh_opt=virsh_options, func=action_during_mig, extra_opts=extra, **extra_args) mig_result = migration_test.ret # Check network accessibility after migration if int(mig_result.exit_status) == 0: vm.connect_uri = dest_uri if vm.serial_console is not None: vm.cleanup_serial_console() vm.create_serial_console() vm_session_after_mig = vm.wait_for_serial_login(timeout=240) vm_session_after_mig.cmd(restart_dhclient) check_vm_network_accessed(ping_dest, session=vm_session_after_mig) if check_macvtap_exists and macvtap_cmd: remote_session = remote.remote_login("ssh", server_ip, "22", server_user, server_pwd, r'[$#%]') # Check macvtap devices' index after migration idx = remote_session.cmd_output(macvtap_cmd) act_macvtap = ['macvtap'+i for i in idx.strip().split("\n")] if act_macvtap != exp_macvtap: test.fail("macvtap devices after migration are incorrect!" " Actual: %s, Expected: %s. " % (act_macvtap, exp_macvtap)) else: if fake_tap_dest: res = remote.run_remote_cmd("ls /dev/%s" % fake_tap_dest, params, runner_on_target) libvirt.check_exit_status(res) if target_vm_session: check_vm_network_accessed(ping_dest, session=target_vm_session) # Execute migration from remote if migrate_vm_back: ssh_connection = utils_conn.SSHConnection(server_ip=client_ip, server_pwd=client_pwd, client_ip=server_ip, client_pwd=server_pwd) try: ssh_connection.conn_check() except utils_conn.ConnectionError: ssh_connection.conn_setup() ssh_connection.conn_check() # Pre migration setup for local machine migration_test.migrate_pre_setup(src_uri, params) cmd = "virsh migrate %s %s %s" % (vm_name, options, src_uri) logging.debug("Start migration: %s", cmd) cmd_result = remote.run_remote_cmd(cmd, params, runner_on_target) logging.info(cmd_result) if cmd_result.exit_status: test.fail("Failed to run '%s' on remote: %s" % (cmd, cmd_result)) logging.debug("VM is migrated back.") vm.connect_uri = bk_uri if vm.serial_console is not None: vm.cleanup_serial_console() vm.create_serial_console() vm_session_after_mig_bak = vm.wait_for_serial_login(timeout=240) vm_session_after_mig_bak.cmd(restart_dhclient) check_vm_network_accessed(ping_dest, vm_session_after_mig_bak) finally: logging.debug("Recover test environment") vm.connect_uri = bk_uri migration_test.cleanup_vm(vm, dest_uri) logging.info("Recovery VM XML configration") orig_config_xml.sync() remote_session = remote.remote_login("ssh", server_ip, "22", server_user, server_pwd, r'[$#%]') if target_vm and target_vm.is_alive(): target_vm.destroy(gracefully=False) if target_org_xml and target_vm_name: logging.info("Recovery XML configration for %s.", target_vm_name) virsh_session_remote = virsh.VirshPersistent(**remote_virsh_dargs) vm_sync(target_org_xml, vm_name=target_vm_name, virsh_instance=virsh_session_remote) virsh_session_remote.close_session() if fake_tap_dest: remote_session.cmd_output_safe("rm -rf /dev/%s" % fake_tap_dest) if network_dict: libvirt_network.create_or_del_network( network_dict, is_del=True, remote_args=remote_virsh_dargs) libvirt_network.create_or_del_network(network_dict, is_del=True) if ovs_bridge_name: utils_net.delete_ovs_bridge(ovs_bridge_name) utils_net.delete_ovs_bridge(ovs_bridge_name, session=remote_session) remote_session.close() if target_vm_session: target_vm_session.close() if virsh_session_remote: virsh_session_remote.close_session() if migrate_vm_back: if 'ssh_connection' in locals(): ssh_connection.auto_recover = True migration_test.migrate_pre_setup(src_uri, params, cleanup=True) logging.info("Remove local NFS image") source_file = params.get("source_file") if source_file: libvirt.delete_local_disk("file", path=source_file)