def test(self): log_file_server = os.path.join(self.workdir, "file_server") self.configure_kdump() self.configure_nfs() session_check = remote.RemoteRunner("ssh", self.ip_server, 22, self.user_name_server, self.password_server, self.prompt_server, "\n", log_file_server, 100, 10, None) session_check.run("date +%s", 100, "True") time_init = self.run_cmd_out("cat %s | tail -3 | head -1 | cut -d' ' -f3" % log_file_server).strip() session_crash = remote.remote_login("ssh", self.ip, 22, self.user_name, self.password, self.prompt, "\n", None, 100, None, None, False) session_crash.sendline('echo 1 > /proc/sys/kernel/sysrq;') session_crash.sendline('echo "c" > /proc/sysrq-trigger;') time.sleep(300) self.log.info("Connecting to nfs server") session_check = remote.RemoteRunner("ssh", self.ip_server, 22, self.user_name_server, self.password_server, self.prompt_server, "\n", log_file_server, 100, 10, None) if self.distro == "rhel": nfs_dir_path = os.path.join(self.nfs_path, "var", "crash") self.log.info(nfs_dir_path) session_check.run("ls -lrt %s;" % nfs_dir_path, 100, "True") crash_dir = self.run_cmd_out("cat %s | grep drwxr | awk '{print $NF}' | tail -1" % log_file_server) path_crash_dir = os.path.join(nfs_dir_path, crash_dir) session_check.run("stat -c%%Z %s" % path_crash_dir, 100, "True") time_created = self.run_cmd_out("cat %s | tail -3 | head -1 | cut -d' ' -f3" % log_file_server).strip() if time_created < time_init: self.fail("Dump is not saved in ssh server") session_check.run("ls -lrt %s" % path_crash_dir, 100, "True") for files in self.file_list: if files not in open(log_file_server).read(): self.fail("%s is not saved" % files)
def test(self): log_file = os.path.join(self.srcdir, "file") session_init = remote.RemoteRunner("ssh", self.ip, 22, self.user_name, self.password, self.prompt, "\n", log_file, 100, 10, None) session_init.run("cat /boot/config-`uname -r` | grep PSTORE", 600, "True") if not self.run_cmd_out("cat %s | grep -Eai 'CONFIG_PSTORE=y'" % log_file): self.fail("Pstore in not configured") session_init.run("mount", 600, "True") if not self.run_cmd_out("cat %s | grep -Eai 'debugfs on /sys/kernel/debug'" % log_file): self.fail("debugfs is not mounted") session_init.run("ls -lrt /sys/fs/pstore", 100, "True") file_list = ['common-nvram', 'dmesg-nvram'] for files in file_list: if files not in open(log_file).read(): self.fail("%s is not saved" % files) process.run("echo "" > %s" % log_file, ignore_status=True, sudo=True, shell=True) session_init.run("date +%s", 100, "True") time_init = self.run_cmd_out("cat %s | tail -3 | head -1 | cut -d' ' -f3" % log_file).strip() session1 = remote.remote_login("ssh", self.ip, 22, self.user_name, self.password, self.prompt, "\n", None, 100, None, None, False) session1.sendline('echo "c" > /proc/sysrq-trigger;') time.sleep(600) self.log.info("Connecting after reboot") session2 = remote.RemoteRunner("ssh", self.ip, 22, self.user_name, self.password, self.prompt, "\n", log_file, 100, 10, None) session2.run("ls -lrt /sys/fs/pstore", 100, "True") for files in file_list: if files not in open(log_file).read(): self.fail("%s is not saved" % files) file_path = os.path.join('/sys/fs/pstore', "*%s*" % files) session2.run("stat -c%%Z %s" % file_path, 100, "True") time_created = self.run_cmd_out("cat %s | tail -3 | head -1 | cut -d' ' -f3" % log_file).strip() if time_created < time_init: self.fail("New %s is not saved" % files) process.run("echo "" > %s" % log_file, ignore_status=True, sudo=True, shell=True) session2.run("cat /etc/os-release", 600, "True") if "rhel" in open(log_file).read(): session2.run("yum install sos", 600, "True") if "Ubuntu" in open(log_file).read(): session2.run("yum install sosreport", 600, "True") session2.run("sosreport --no-report --batch --build", 100, "True") dir_name = self.run_cmd_out("cat %s | grep located | cut -d':' -f2" % log_file).strip() sosreport_dir = os.path.join(dir_name, '/sys/fs/pstore/') session2.run("ls -lrt %s" % sosreport_dir, 100, "True") for files in file_list: if files not in open(log_file).read(): self.fail("%s is not saved" % files) file_path = os.path.join(sosreport_dir, "*%s*" % files) session2.run("stat -c%%Z %s" % file_path, 100, "True") time_created = self.run_cmd_out("cat %s | tail -3 | head -1 | cut -d' ' -f3" % log_file).strip() if time_created < time_init: self.fail("sosreport contains wrong %s file" % files)
def test(self): log_file = os.path.join(self.workdir, "file") session_int = remote.RemoteRunner("ssh", self.ip, 22, self.user_name, self.password, self.prompt, "\n", log_file, 100, 10, None) session_int.run("cat /etc/os-release", 600, "True") if "Ubuntu" in open(log_file).read(): file_list = ['dmesg', 'dump'] f_val = 12 session_int.run("DEBIAN_FRONTEND=noninteractive apt-get install -y linux-crashdump;", 600, "True") crashkernel_value = 'GRUB_CMDLINE_LINUX_DEFAULT=\"$GRUB_CMDLINE_LINUX_DEFAULT\ crashkernel=2G-4G:320M,4G-32G:512M,32G-64G:1024M,64G-128G:2048M,128G-:4096M\"' cmd = "echo \'%s\' > /etc/default/grub.d/kexec-tools.cfg;" % crashkernel_value session_int.run(cmd, 600, "True") session_int.run("sudo update-grub;", 600, "True") session_reboot = remote.remote_login("ssh", self.ip, 22, self.user_name, self.password, self.prompt, "\n", None, 100, None, None, False) session_reboot.sendline('reboot;') time.sleep(600) self.log.info("Connecting after reboot") session_status = remote.RemoteRunner("ssh", self.ip, 22, self.user_name, self.password, self.prompt, "\n", log_file, 100, 10, None) session_status.run("kdump-config show", 600, "True") if self.run_cmd_out("cat %s | grep -Eai 'Not ready to'" % log_file): self.fail("Kdump is not operational") else: self.log.info("Kdump status is operational") if "rhel" in open(log_file).read(): file_list = ['vmcore-dmesg.txt', 'vmcore'] f_val = 11 session_int.run("kdumpctl status", 600, "True") if self.run_cmd_out("cat %s | grep -Eai 'Kdump is not operational'" % log_file): self.fail("Kdump is not operational") else: self.log.info("Kdump status is operational") session_crash = remote.remote_login("ssh", self.ip, 22, self.user_name, self.password, self.prompt, "\n", None, 100, None, None, False) session_crash.sendline('echo 1 > /proc/sys/kernel/sysrq;') session_crash.sendline('echo "c" > /proc/sysrq-trigger;') time.sleep(600) self.log.info("Connecting after reboot") session_check = remote.RemoteRunner("ssh", self.ip, 22, self.user_name, self.password, self.prompt, "\n", log_file, 100, 10, None) session_check.run("ls -lrt /var/crash", 100, "True") crash_dir = self.run_cmd_out("cat %s | grep drwxr | tail -1 | cut -d' ' -f%s" % (log_file, f_val)) path_crash_dir = os.path.join("/var/crash", crash_dir) print path_crash_dir session_check.run("ls -lrt %s" % path_crash_dir, 100, "True") for files in file_list: if files not in open(log_file).read(): self.fail("%s is not saved" % files)
def configure_nfs(self): log_file_nfs = os.path.join(self.workdir, "file2") session_nfs = remote.RemoteRunner("ssh", self.ip, 22, self.user_name, self.password, self.prompt, "\n", log_file_nfs, 100, 10, None) session_nfs.run("cat /etc/os-release", 20, "True") if self.distro == "Ubuntu": session_nfs.run("apt-get install -y nfs-common;", 60, "True") session_nfs.run("apt-get install -y nfs-kernel-server;", 60, "True") session_nfs.run("service nfs-server start;", 60, "True") session_nfs.run("echo 'NFS=\"%s:%s\"' >> /etc/default/kdump-tools;" % (self.ip_server, self.nfs_path), 60, "True") session_nfs.run("mount -t nfs %s:%s /var/crash;" % (self.ip_server, self.nfs_path), 60, "True") session_nfs.run("kdump-config unload;", 60, "True") session_nfs.run("kdump-config load;", 60, "True") session_nfs.run("sed -i '$d' /etc/default/kdump-tools;", 60, "True") session_nfs.run("kdump-config show", 60, "True") if self.run_cmd_out("cat %s | grep -Eai 'Not ready to'" % log_file_nfs): self.fail("Kdump is not operational after configuring nfs") else: self.log.info("Kdump status is operational after configuring nfs") if self.distro == "rhel": session_nfs.run("yum -y install nfs-utils;", 60, "True") session_nfs.run("service nfs start;", 60, "True") session_nfs.run("cp -f /etc/kdump.conf /etc/kdump.conf.tmp;", 60, "True") session_nfs.run("echo 'nfs %s:%s' >> /etc/kdump.conf;" % (self.ip_server, self.nfs_path), 60, "True") session_nfs.run("sed -i 's/-l --message-level/-l -F --message-level/' /etc/kdump.conf;", 60, "True") session_nfs.run("mount -t nfs %s:%s /var/crash;" % (self.ip_server, self.nfs_path), 60, "True") session_nfs.run("kdumpctl restart;", 60, "True") session_nfs.run("mv -f /etc/kdump.conf.tmp /etc/kdump.conf;", 60, "True") session_nfs.run("kdumpctl status", 60, "True") if self.run_cmd_out("cat %s | grep -Eai 'Kdump is not operational'" % log_file_nfs): self.fail("Kdump is not operational after configuring nfs") else: self.log.info("Kdump status is operational after configuring nfs")
def get_vddk_thumbprint(host, password, uri_type, prompt=r"[\#\$\[\]]"): """ Get vddk thumbprint from VMware vCenter :param host: hostname or IP address :param password: Password :param uri_type: conversion source uri type :param prompt: Shell prompt (regular expression) """ if uri_type == 'esx': cmd = 'openssl x509 -in /etc/vmware/ssl/rui.crt -fingerprint -sha1 -noout' else: cmd = 'openssl x509 -in /etc/vmware-vpx/ssl/rui.crt -fingerprint -sha1 -noout' r_runner = remote.RemoteRunner( host=host, password=password, prompt=prompt, preferred_authenticaton='password,keyboard-interactive') cmdresult = r_runner.run(cmd) logging.debug("vddk thumbprint:\n%s", cmdresult.stdout) vddk_thumbprint = cmdresult.stdout.strip().split('=')[1] return vddk_thumbprint
def test(self): log_file = os.path.join(self.srcdir, "file") session1 = remote.remote_login("ssh", self.ip, 22, self.user_name, self.password, self.prompt, "\n", None, 100, None, None, False) session1.sendline('echo "c" > /proc/sysrq-trigger;') time.sleep(600) self.log.info("Connecting after reboot") session2 = remote.RemoteRunner("ssh", self.ip, 22, self.user_name, self.password, self.prompt, "\n", log_file, 100, 10, None) session2.run("ls -lrt /sys/fs/pstore", 100, "True") file_list = ['common-nvram', 'dmesg-nvram'] for files in file_list: if files not in open(log_file).read(): self.fail("%s is not saved" % files) process.run("echo " " > %s" % log_file, ignore_status=True, sudo=True, shell=True) session2.run("cat /etc/os-release", 600, "True") if "rhel" in open(log_file).read(): session2.run("yum install sos", 600, "True") if "Ubuntu" in open(log_file).read(): session2.run("yum install sosreport", 600, "True") session2.run("sosreport --no-report --batch --build", 100, "True") dir_name = self.run_cmd_out("cat %s | grep located | cut -d':' -f2" % log_file).strip() sosreport_dir = os.path.join(dir_name, '/sys/fs/pstore/') session2.run("ls -lrt %s" % sosreport_dir, 100, "True") for files in file_list: if files not in open(log_file).read(): self.fail("%s is not saved" % files)
def is_modular_daemon(session=None): """ Check whether modular daemon is enabled :params session: An session to guest or remote host :return: True if modular daemon is enabled """ if session: runner = remote.RemoteRunner(session=session).run host_key = runner('hostname').stdout_text.strip() if host_key not in IS_MODULAR_DAEMON: IS_MODULAR_DAEMON[host_key] = None else: runner = process.run host_key = "local" if IS_MODULAR_DAEMON[host_key] is None: daemons = ["virtqemud.socket", "virtinterfaced.socket", "virtnetworkd.socket", "virtnodedevd.socket", "virtnwfilterd.socket", "virtsecretd.socket", "virtstoraged.socket", "virtproxyd.socket"] if any([service.Factory.create_service(d, run=runner).status() for d in daemons]): IS_MODULAR_DAEMON[host_key] = True else: IS_MODULAR_DAEMON[host_key] = False return IS_MODULAR_DAEMON[host_key]
def configure_kdump(self): log_file_init = os.path.join(self.workdir, "file1") session_int = remote.RemoteRunner("ssh", self.ip, 22, self.user_name, self.password, self.prompt, "\n", log_file_init, 100, 10, None) session_int.run("cat /etc/os-release", 20, "True") if "Ubuntu" in open(log_file_init).read(): self.distro = "Ubuntu" self.file_list = ['dmesg', 'dump'] session_int.run( "DEBIAN_FRONTEND=noninteractive apt-get install -y linux-crashdump;", 600, "True") crashkernel_value = 'GRUB_CMDLINE_LINUX_DEFAULT=\"$GRUB_CMDLINE_LINUX_DEFAULT\ crashkernel=2G-4G:320M,4G-32G:512M,32G-64G:1024M,64G-128G:2048M,128G-:4096M\"' cmd = "echo \'%s\' > /etc/default/grub.d/kexec-tools.cfg;" % crashkernel_value session_int.run(cmd, 60, "True") session_int.run("sudo update-grub;", 600, "True") session_reboot = remote.remote_login("ssh", self.ip, 22, self.user_name, self.password, self.prompt, "\n", None, 100, None, None, False) session_reboot.sendline('reboot;') time.sleep(600) self.log.info("Connecting after reboot") session_status = remote.RemoteRunner("ssh", self.ip, 22, self.user_name, self.password, self.prompt, "\n", log_file_init, 100, 10, None) session_status.run("kdump-config show", 60, "True") if self.run_cmd_out("cat %s | grep -Eai 'Not ready to'" % log_file_init): self.fail("Kdump is not operational") else: self.log.info("Kdump status is operational") session_status.session.kill() if "rhel" in open(log_file_init).read(): self.distro = "rhel" self.file_list = ['vmcore-dmesg.txt', 'vmcore'] session_int.run("kdumpctl status", 60, "True") if self.run_cmd_out( "cat %s | grep -Eai 'Kdump is not operational'" % log_file_init): self.fail("Kdump is not operational") else: self.log.info("Kdump status is operational")
def run(test, params, env): """ Logs guest's hostname. 1) Decide whether use host/guest 2) Check current service status 3) Start (Stop) $service 4) Check status of $service 5) Stop (Start) $service 6) Check service status :param test: QEMU test object :param params: Dictionary with the test parameters :param env: Dictionary with test environment. """ if params.get('test_on_guest') == "yes": # error_context.context() is common method to log test steps used to verify # what exactly was tested. error_context.context("Using guest.", logging.info) vm = env.get_vm(params["main_vm"]) session = vm.wait_for_login() # RemoteRunner is object, which simulates the utils.run() behavior # on remote consoles runner = remote.RemoteRunner(session=session).run else: error_context.context("Using host", logging.info) runner = process.run error_context.context("Initialize service manager", logging.info) service = SpecificServiceManager(params["test_service"], runner) error_context.context("Testing service %s" % params["test_service"], logging.info) original_status = service.status() logging.info("Original status=%s", original_status) if original_status is True: service.stop() time.sleep(5) if service.status() is not False: logging.error("Fail to stop service") service.start() raise exceptions.TestFail("Fail to stop service") service.start() else: service.start() time.sleep(5) if service.status() is not True: logging.error("Fail to start service") service.stop() raise exceptions.TestFail("Fail to start service") service.start() time.sleep(5) if not service.status() is original_status: raise exceptions.TestFail("Fail to restore original status of the %s " "service" % params["test_service"])
def run_remote_cmd(cmd): """ A function to run a command on remote host. :param cmd: the command to be executed :return: CmdResult object """ remote_runner = remote.RemoteRunner(host=server_ip, username=server_user, password=server_pwd) cmdResult = remote_runner.run(cmd, ignore_status=True) if cmdResult.exit_status: test.fail("Failed to run '%s' on remote: %s" % (cmd, results_stderr_52lts(cmdResult).strip())) return cmdResult
def server_config(self): """ configuer the ntp server: 1.ZONE = American/New_York; 2.start ntpd service; 3.restrict the host and guest """ logging.info("waiting for login server.....") self.server_hostname = self.server_session.\ cmd_output('hostname').strip() logging.debug("service hostname is %s", self.server_hostname) cmd = 'echo \'ZONE = "America/New_York"\' > /etc/sysconfig/clock' status = self.server_session.cmd_status(cmd) if status: self.test.error("set ZONE in server failed.") cmd_ln = 'ln -sf /usr/share/zoneinfo/America/New_York /etc/localtime' self.server_session.cmd_status(cmd_ln) # Add server of local clock output = self.server_session.cmd_output("grep '^server %s'" " /etc/ntp.conf" % self.local_clock).strip() if not output: status = self.server_session.cmd_status("echo 'server %s' >> " "/etc/ntp.conf" % self.local_clock) if status: self.test.error("config local_clock failed.") # Add host and guest in restrict output = self.server_session.cmd_output("grep '^restrict %s'" " /etc/ntp.conf" % self.net_range).strip() if not output: status = self.server_session.cmd_status( "echo 'restrict %s " "mask %s %s' " ">> /etc/ntp.conf" % (self.net_range, self.mask, self.restrict_option)) if status: self.test.error("config restrict failed.") # Restart ntpd service server_run = remote.RemoteRunner(session=self.server_session) server_ntpd = service.Factory.create_service("ntpd", run=server_run.run) server_ntpd.restart()
def run(test, params, env): """ Check libvirt daemons are removed after removing libvirt pkgs. """ daemons = params.get('daemons', "").split() require_modular_daemon = params.get('require_modular_daemon', "no") == "yes" utils_split_daemons.daemon_mode_check(require_modular_daemon) vm_name = params.get("main_vm") vm = env.get_vm(vm_name) try: vm_name = params.get("main_vm") vm = env.get_vm(vm_name) if not vm.is_alive(): vm.start() session = vm.wait_for_login() if not utils_package.package_install("libvirt*", session): test.error("Failed to install libvirt package on guest") virsh.reboot(vm) if session is None: session = vm.wait_for_login() #Destroy default network, otherwise network daemon will not be removed after removed libvirt pkgs cmd = "virsh net-destroy default" session.cmd(cmd, ignore_all_errors=True) runner = remote.RemoteRunner(session=session).run service.Factory.create_service('virtlogd', run=runner).start() if not utils_package.package_remove("libvirt*", session): test.error("Failed to remove libvirt packages on guest") for daemon in daemons: cmd = "systemctl -a | grep %s" % daemon if not session.cmd_status(cmd): test.fail("%s still exists after removing libvirt pkgs" % daemon) finally: if session is not None: session.close() if vm.is_alive(): vm.destroy()
def guest_config(self): """ configure the guest: 1.ZONE = American/New_York; 2.test the ntpdate; 3.configur the ntp.conf; 4.restart ntpd service """ # Set the time zone to american new york cmd = ('echo \'ZONE = "America/New_York"\' > /etc/sysconfig/clock;') self.session.cmd(cmd) cmd_ln = 'ln -sf /usr/share/zoneinfo/America/New_York /etc/localtime' self.session.cmd(cmd_ln) # Timing by ntpdate guest_run = remote.RemoteRunner(session=self.session) guest_ntpd = service.Factory.create_service("ntpd", run=guest_run.run) guest_ntpd.stop() # ntpdate utils_test.ntpdate(self.server_ip, self.session) # Check the result of ntpdate server_date = utils_test.get_date(self.server_session) guest_date = utils_test.get_date(self.session) logging.info("server time is : %s", server_date) logging.info("guest time is : %s ", guest_date) if not abs(int(server_date) - int(guest_date)) < 2: self.test.fail("timing by ntpdate on guest failed!!") # Delete server of local clock output = self.session.cmd_output("grep '%s' /etc/ntp.conf" % self.local_clock).strip() if not output: self.session.cmd("sed -i '/%s/d' /etc/ntp.conf" % self.local_clock) # Check the ntp.conf and add server ip into it output = self.session.cmd_output("grep '^server %s' /etc/ntp.conf" % self.server_ip) if not output: cmd = "echo 'server %s' >> /etc/ntp.conf" % self.server_ip status = self.session.cmd_status(cmd) if status: self.test.fail("config /etc/ntp.conf on server failed!!") # Start the ntpd service guest_ntpd.start()
def run_migration_back(params, test): """ Execute migration back from target host to source host :param params: dict, test parameters :param test: test object """ migrate_vm_back = "yes" == params.get("migrate_vm_back", "no") vm_name = params.get("migrate_main_vm") options = params.get("virsh_migrate_options", "--live --verbose") if migrate_vm_back: ssh_connection = utils_conn.SSHConnection( server_ip=params.get("client_ip"), server_pwd=params.get("local_pwd"), client_ip=params.get("server_ip"), client_pwd=params.get("server_pwd")) try: ssh_connection.conn_check() except utils_conn.ConnectionError: ssh_connection.conn_setup() ssh_connection.conn_check() # Pre migration setup for local machine src_full_uri = libvirt_vm.complete_uri( params.get("migrate_source_host")) migration_test = migration.MigrationTest() migration_test.migrate_pre_setup(src_full_uri, params) cmd = "virsh migrate %s %s %s" % (vm_name, options, src_full_uri) test.log.debug("Start migration: %s", cmd) runner_on_target = remote.RemoteRunner( host=params.get("remote_ip"), username=params.get("remote_user"), password=params.get("remote_pwd")) cmd_result = remote.run_remote_cmd(cmd, params, runner_on_target) test.log.info(cmd_result) if cmd_result.exit_status: destroy_cmd = "virsh destroy %s" % vm_name remote.run_remote_cmd(destroy_cmd, params, runner_on_target, ignore_status=False) test.fail("Failed to run '%s' on remote: %s" % (cmd, cmd_result)) else: test.log.debug("No need to migrate back")
def __init__(self, session=None): """ Initialize an service object for libvirtd. :params session: An session to guest or remote host. """ self.session = session if self.session: self.remote_runner = remote.RemoteRunner(session=self.session) runner = self.remote_runner.run else: runner = utils.run if LIBVIRTD is None: logging.warning("Libvirtd service is not available in host, " "utils_libvirtd module will not function normally") self.libvirtd = service.Factory.create_service(LIBVIRTD, run=runner)
def get_vddk_thumbprint(host, password, prompt=r"[\#\$]"): """ Get vddk thumbprint from VMware vCenter :param host: hostname or IP address :param password: Password :param prompt: Shell prompt (regular expression) """ cmd = 'openssl x509 -in /etc/vmware-vpx/ssl/rui.crt -fingerprint -sha1 -noout' r_runner = remote.RemoteRunner(host=host, password=password, prompt=prompt) cmdresult = r_runner.run(cmd) logging.debug("vddk thumbprint:\n%s", cmdresult.stdout) vddk_thumbprint = cmdresult.stdout.strip().split('=')[1] return vddk_thumbprint
def network_restart(params): """ Restart remote network """ time_out = int(params.get('time_out')) remote_ip = params.get('remote_ip') remote_user = params.get('remote_user') remote_pwd = params.get('remote_pwd') session = remote.remote_login("ssh", remote_ip, "22", remote_user, remote_pwd, "#") runner = remote.RemoteRunner(session=session) net_service = service.Factory.create_service("network", runner.run) net_service.restart() session.close() try: remote.wait_for_login("ssh", remote_ip, "22", remote_user, remote_pwd, "#", timeout=time_out) except remote.LoginTimeoutError, detail: raise error.TestError(str(detail))
def __init__(self, daemon_name='', session=None): """ Initialize an service object for virt daemons. :param daemon_name: daemon name such as virtqemud, networkd,etc,. :param session: An session to guest or remote host. """ if daemon_name: self.daemon_name = daemon_name self.session = session if self.session: self.remote_runner = remote.RemoteRunner(session=self.session) runner = self.remote_runner.run else: runner = process.run if not self.daemon_name: logging.warning("libvirt split daemon service is not available in host, " "utils_daemons module will not function normally") self.virtdaemon = service.Factory.create_service(self.daemon_name, run=runner)
def get_screenshot(self): """ Do virsh screenshot of the vm and fetch the image if the VM in remote host. """ sshot_file = os.path.join(data_dir.get_tmp_dir(), "vm_screenshot.ppm") if self.target == "ovirt": # Note: This is a screenshot path on a remote host vm_sshot = os.path.join("/tmp", "vm_screenshot.ppm") else: vm_sshot = sshot_file virsh.screenshot(self.name, vm_sshot, session_id=self.virsh_session_id) if self.target == "ovirt": remote_ip = self.params.get("remote_ip") remote_user = self.params.get("remote_user") remote_pwd = self.params.get("remote_pwd") remote.scp_from_remote(remote_ip, '22', remote_user, remote_pwd, vm_sshot, sshot_file) r_runner = remote.RemoteRunner( host=remote_ip, username=remote_user, password=remote_pwd) r_runner.run("rm -f %s" % vm_sshot) return sshot_file
def verify_test_mem_device(vm, params, test): """ Verify steps for memory device :param vm: VM object :param params: dict, test parameters :param test: test object """ verify_test_default(vm, params, test) remote_ip = params.get("remote_ip") remote_user = params.get("remote_user") remote_pwd = params.get("remote_pwd") # Create a remote runner for later use runner_on_target = remote.RemoteRunner(host=remote_ip, username=remote_user, password=remote_pwd) qemu_checks = params.get('qemu_checks', '').split('`') test.log.debug("qemu_checks:%s" % qemu_checks[0]) for qemu_check in qemu_checks: libvirt.check_qemu_cmd_line(qemu_check, False, params, runner_on_target)
def run(test, params, env): """ Test migration with glusterfs. """ def create_or_clean_backend_dir(g_uri, params, session=None, is_clean=False): """ Create/cleanup backend directory :params g_uri: glusterfs uri :params params: the parameters to be checked :params session: VM/remote session object :params is_cleanup: True for cleanup backend directory; False for create one. :return: gluster_img if is_clean is equal to True """ mount_point = params.get("gluster_mount_dir") is_symlink = params.get("gluster_create_symlink") == "yes" symlink_name = params.get("gluster_symlink") gluster_img = None if not is_clean: if not utils_misc.check_exists(mount_point, session): utils_misc.make_dirs(mount_point, session) if gluster.glusterfs_is_mounted(mount_point, session): gluster.glusterfs_umount(g_uri, mount_point, session) gluster.glusterfs_mount(g_uri, mount_point, session) gluster_img = os.path.join(mount_point, disk_img) if is_symlink: utils_misc.make_symlink(mount_point, symlink_name) utils_misc.make_symlink(mount_point, symlink_name, remote_session) gluster_img = os.path.join(symlink_name, disk_img) return gluster_img else: if is_symlink: utils_misc.rm_link(symlink_name, session) gluster.glusterfs_umount(g_uri, mount_point, session) if utils_misc.check_exists(mount_point, session): utils_misc.safe_rmdir(gluster_mount_dir, session=session) def do_migration(vm, dest_uri, options, extra): """ Execute the migration with given parameters :param vm: the guest to be migrated :param dest_uri: the destination uri for migration :param options: options next to 'migrate' command :param extra: options in the end of the migrate command line :return: CmdResult object """ # Migrate the guest. virsh_args.update({"ignore_status": True}) migration_res = vm.migrate(dest_uri, options, extra, **virsh_args) if int(migration_res.exit_status) != 0: logging.error("Migration failed for %s.", vm_name) return migration_res if vm.is_alive(): logging.info("VM is alive on destination %s.", dest_uri) else: test.fail("VM is not alive on destination %s" % dest_uri) # Throws exception if console shows panic message vm.verify_kernel_crash() return migration_res def check_migration_res(result): """ Check if the migration result is as expected :param result: the output of migration :raise: test.fail if test is failed """ if not result: test.error("No migration result is returned.") logging.info("Migration out: %s", result.stdout_text.strip()) logging.info("Migration error: %s", result.stderr_text.strip()) if status_error: # Migration should fail if err_msg: # Special error messages are expected if not re.search(err_msg, result.stderr_text.strip()): test.fail("Can not find the expected patterns '%s' in " "output '%s'" % (err_msg, result.stderr_text.strip())) else: logging.debug("It is the expected error message") else: if int(result.exit_status) != 0: logging.debug("Migration failure is expected result") else: test.fail("Migration success is unexpected result") else: if int(result.exit_status) != 0: test.fail(result.stderr_text.strip()) # Local variables virsh_args = {"debug": True} server_ip = params["server_ip"] = params.get("remote_ip") server_user = params["server_user"] = params.get("remote_user", "root") server_pwd = params["server_pwd"] = params.get("remote_pwd") client_ip = params["client_ip"] = params.get("local_ip") client_pwd = params["client_pwd"] = params.get("local_pwd") extra = params.get("virsh_migrate_extra") options = params.get("virsh_migrate_options") virsh_options = params.get("virsh_options", "--verbose --live") vol_name = params.get("vol_name") disk_format = params.get("disk_format", "qcow2") gluster_mount_dir = params.get("gluster_mount_dir") status_error = "yes" == params.get("status_error", "no") err_msg = params.get("err_msg") host_ip = params.get("gluster_server_ip", "") migr_vm_back = params.get("migrate_vm_back", "no") == "yes" selinux_local = params.get('set_sebool_local', 'yes') == "yes" selinux_remote = params.get('set_sebool_remote', 'no') == "yes" sebool_fusefs_local = params.get('set_sebool_fusefs_local', 'yes') sebool_fusefs_remote = params.get('set_sebool_fusefs_remote', 'yes') test_dict = dict(params) test_dict["local_boolean_varible"] = "virt_use_fusefs" test_dict["remote_boolean_varible"] = "virt_use_fusefs" remove_pkg = False seLinuxBool = None seLinuxfusefs = None gluster_uri = None mig_result = None # Make sure all of parameters are assigned a valid value check_parameters(test, params) # params for migration connection params["virsh_migrate_desturi"] = libvirt_vm.complete_uri( params.get("migrate_dest_host")) params["virsh_migrate_connect_uri"] = libvirt_vm.complete_uri( params.get("migrate_source_host")) src_uri = params.get("virsh_migrate_connect_uri") dest_uri = params.get("virsh_migrate_desturi") # For --postcopy enable postcopy_options = params.get("postcopy_options") if postcopy_options: virsh_options = "%s %s" % (virsh_options, postcopy_options) params['virsh_options'] = virsh_options vm_name = params.get("migrate_main_vm") vm = env.get_vm(vm_name) vm.verify_alive() # Back up xml file. new_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) orig_config_xml = new_xml.copy() migrate_setup = libvirt.MigrationTest() try: # Create a remote runner for later use runner_on_target = remote.RemoteRunner(host=server_ip, username=server_user, password=server_pwd) # Configure selinux if selinux_local or selinux_remote: seLinuxBool = utils_misc.SELinuxBoolean(params) seLinuxBool.setup() if sebool_fusefs_local or sebool_fusefs_remote: seLinuxfusefs = utils_misc.SELinuxBoolean(test_dict) seLinuxfusefs.setup() # Setup glusterfs and disk xml. disk_img = "gluster.%s" % disk_format params['disk_img'] = disk_img libvirt.set_vm_disk(vm, params) vm_xml_cxt = virsh.dumpxml(vm_name).stdout_text.strip() logging.debug("The VM XML with gluster disk source: \n%s", vm_xml_cxt) # Check if gluster server is deployed locally if not host_ip: logging.debug("Enable port 24007 and 49152:49216") migrate_setup.migrate_pre_setup(src_uri, params, ports="24007") migrate_setup.migrate_pre_setup(src_uri, params) gluster_uri = "{}:{}".format(client_ip, vol_name) else: gluster_uri = "{}:{}".format(host_ip, vol_name) remote_session = remote.wait_for_login('ssh', server_ip, '22', server_user, server_pwd, r"[\#\$]\s*$") if gluster_mount_dir: # The package 'glusterfs-fuse' is not installed on target # which makes issue when trying to 'mount -t glusterfs' pkg_name = 'glusterfs-fuse' logging.debug("Check if glusterfs-fuse is installed") pkg_mgr = utils_package.package_manager(remote_session, pkg_name) if not pkg_mgr.is_installed(pkg_name): logging.debug("glusterfs-fuse will be installed") if not pkg_mgr.install(): test.error("Package '%s' installation fails" % pkg_name) else: remove_pkg = True gluster_img = create_or_clean_backend_dir(gluster_uri, params) create_or_clean_backend_dir(gluster_uri, params, remote_session) logging.debug("Gluster Image is %s", gluster_img) gluster_backend_disk = {'disk_source_name': gluster_img} # Update disk xml with gluster image in backend dir libvirt.set_vm_disk(vm, gluster_backend_disk) remote_session.close() mig_result = do_migration(vm, dest_uri, options, extra) check_migration_res(mig_result) if migr_vm_back: ssh_connection = utils_conn.SSHConnection(server_ip=client_ip, server_pwd=client_pwd, client_ip=server_ip, client_pwd=server_pwd) try: ssh_connection.conn_check() except utils_conn.ConnectionError: ssh_connection.conn_setup() ssh_connection.conn_check() # Pre migration setup for local machine migrate_setup.migrate_pre_setup(src_uri, params) cmd = "virsh migrate %s %s %s" % (vm_name, virsh_options, src_uri) logging.debug("Start migrating: %s", cmd) cmd_result = remote.run_remote_cmd(cmd, params, runner_on_target) logging.info(cmd_result) if cmd_result.exit_status: destroy_cmd = "virsh destroy %s" % vm_name remote.run_remote_cmd(destroy_cmd, params, runner_on_target, ignore_status=False) test.fail("Failed to run '%s' on remote: %s" % (cmd, cmd_result)) finally: logging.info("Recovery test environment") orig_config_xml.sync() # Clean up of pre migration setup for local machine if migr_vm_back: if 'ssh_connection' in locals(): ssh_connection.auto_recover = True migrate_setup.migrate_pre_setup(src_uri, params, cleanup=True) # Cleanup selinu configuration if seLinuxBool: seLinuxBool.cleanup() if seLinuxfusefs: seLinuxfusefs.cleanup() # Disable ports 24007 and 49152:49216 if not host_ip: logging.debug("Disable 24007 and 49152:49216 in Firewall") migrate_setup.migrate_pre_setup(src_uri, params, cleanup=True, ports="24007") migrate_setup.migrate_pre_setup(src_uri, params, cleanup=True) gluster.setup_or_cleanup_gluster(False, **params) # Cleanup backend directory/symlink if gluster_mount_dir and gluster_uri: remote_session = remote.wait_for_login('ssh', server_ip, '22', server_user, server_pwd, r"[\#\$]\s*$") create_or_clean_backend_dir(gluster_uri, params, is_clean=True) create_or_clean_backend_dir(gluster_uri, params, remote_session, True) if remove_pkg: pkg_mgr = utils_package.package_manager(remote_session, pkg_name) if pkg_mgr.is_installed(pkg_name): logging.debug("glusterfs-fuse will be uninstalled") if not pkg_mgr.remove(): logging.error("Package '%s' un-installation fails", pkg_name) remote_session.close()
def run(test, params, env): """ Test migration with glusterfs. """ def create_or_clean_backend_dir(g_uri, params, session=None, is_clean=False): """ Create/cleanup backend directory :params g_uri: glusterfs uri :params params: the parameters to be checked :params session: VM/remote session object :params is_cleanup: True for cleanup backend directory; False for create one. :return: gluster_img if is_clean is equal to True """ mount_point = params.get("gluster_mount_dir") is_symlink = params.get("gluster_create_symlink") == "yes" symlink_name = params.get("gluster_symlink") gluster_img = None if not is_clean: if not utils_misc.check_exists(mount_point, session): utils_misc.make_dirs(mount_point, session) if gluster.glusterfs_is_mounted(mount_point, session): gluster.glusterfs_umount(g_uri, mount_point, session) gluster.glusterfs_mount(g_uri, mount_point, session) gluster_img = os.path.join(mount_point, disk_img) if is_symlink: utils_misc.make_symlink(mount_point, symlink_name) utils_misc.make_symlink(mount_point, symlink_name, remote_session) gluster_img = os.path.join(symlink_name, disk_img) return gluster_img else: if is_symlink: utils_misc.rm_link(symlink_name, session) gluster.glusterfs_umount(g_uri, mount_point, session) if utils_misc.check_exists(mount_point, session): utils_misc.safe_rmdir(gluster_mount_dir, session=session) # Local variables virsh_args = {"debug": True} server_ip = params["server_ip"] = params.get("remote_ip") server_user = params["server_user"] = params.get("remote_user", "root") server_pwd = params["server_pwd"] = params.get("remote_pwd") client_ip = params["client_ip"] = params.get("local_ip") client_pwd = params["client_pwd"] = params.get("local_pwd") extra = params.get("virsh_migrate_extra") options = params.get("virsh_migrate_options", "--live --p2p --verbose") virsh_options = params.get("virsh_options", "") vol_name = params.get("vol_name") disk_format = params.get("disk_format", "qcow2") gluster_mount_dir = params.get("gluster_mount_dir") status_error = "yes" == params.get("status_error", "no") err_msg = params.get("err_msg") host_ip = params.get("gluster_server_ip", "") migrate_vm_back = params.get("migrate_vm_back", "no") == "yes" selinux_local = params.get('set_sebool_local', 'yes') == "yes" selinux_remote = params.get('set_sebool_remote', 'no') == "yes" sebool_fusefs_local = params.get('set_sebool_fusefs_local', 'yes') sebool_fusefs_remote = params.get('set_sebool_fusefs_remote', 'yes') test_dict = dict(params) test_dict["local_boolean_varible"] = "virt_use_fusefs" test_dict["remote_boolean_varible"] = "virt_use_fusefs" remote_dargs = { 'server_ip': server_ip, 'server_user': server_user, 'server_pwd': server_pwd, 'file_path': "/etc/libvirt/libvirt.conf" } remove_pkg = False seLinuxBool = None seLinuxfusefs = None gluster_uri = None mig_result = None remove_dict = {} remote_libvirt_file = None src_libvirt_file = None # Make sure all of parameters are assigned a valid value migrate_test = migration.MigrationTest() migrate_test.check_parameters(params) extra_args = migrate_test.update_virsh_migrate_extra_args(params) # params for migration connection params["virsh_migrate_desturi"] = libvirt_vm.complete_uri( params.get("migrate_dest_host")) params["virsh_migrate_connect_uri"] = libvirt_vm.complete_uri( params.get("migrate_source_host")) src_uri = params.get("virsh_migrate_connect_uri") dest_uri = params.get("virsh_migrate_desturi") # For --postcopy enable postcopy_options = params.get("postcopy_options") if postcopy_options: extra = "%s %s" % (virsh_options, postcopy_options) func_name = virsh.migrate_postcopy vm_name = params.get("migrate_main_vm") vm = env.get_vm(vm_name) vm.verify_alive() # Back up xml file. new_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) orig_config_xml = new_xml.copy() try: # Create a remote runner for later use runner_on_target = remote.RemoteRunner(host=server_ip, username=server_user, password=server_pwd) # Configure selinux if selinux_local or selinux_remote: seLinuxBool = utils_misc.SELinuxBoolean(params) seLinuxBool.setup() if sebool_fusefs_local or sebool_fusefs_remote: seLinuxfusefs = utils_misc.SELinuxBoolean(test_dict) seLinuxfusefs.setup() # Setup glusterfs disk_img = "gluster.%s" % disk_format params['disk_img'] = disk_img host_ip = gluster.setup_or_cleanup_gluster(is_setup=True, **params) logging.debug("host ip: %s ", host_ip) # Check if gluster server is deployed locally if not host_ip: logging.debug("Enable port 24007 and 49152:49216") migrate_test.migrate_pre_setup(src_uri, params, ports="24007") migrate_test.migrate_pre_setup(src_uri, params) gluster_uri = "{}:{}".format(client_ip, vol_name) else: gluster_uri = "{}:{}".format(host_ip, vol_name) remote_session = remote.wait_for_login('ssh', server_ip, '22', server_user, server_pwd, r"[\#\$]\s*$") if gluster_mount_dir: # The package 'glusterfs-fuse' is not installed on target # which makes issue when trying to 'mount -t glusterfs' pkg_name = 'glusterfs-fuse' logging.debug("Check if glusterfs-fuse is installed") pkg_mgr = utils_package.package_manager(remote_session, pkg_name) if not pkg_mgr.is_installed(pkg_name): logging.debug("glusterfs-fuse will be installed") if not pkg_mgr.install(): test.error("Package '%s' installation fails" % pkg_name) else: remove_pkg = True gluster_img = create_or_clean_backend_dir(gluster_uri, params) create_or_clean_backend_dir(gluster_uri, params, remote_session) # Get the image path image_source = vm.get_first_disk_devices()['source'] image_info = utils_misc.get_image_info(image_source) if image_info["format"] == disk_format: disk_cmd = "cp -f %s %s" % (image_source, gluster_img) else: # Convert the disk format disk_cmd = ("qemu-img convert -f %s -O %s %s %s" % (image_info["format"], disk_format, image_source, gluster_img)) process.run("%s; chmod a+rw %s" % (disk_cmd, gluster_mount_dir), shell=True) logging.debug("Gluster Image is %s", gluster_img) gluster_backend_disk = {'disk_source_name': gluster_img} # Update disk xml with gluster image in backend dir libvirt.set_vm_disk(vm, gluster_backend_disk) remote_session.close() vm_xml_cxt = virsh.dumpxml(vm_name).stdout_text.strip() logging.debug("The VM XML with gluster disk source: \n%s", vm_xml_cxt) vm.wait_for_login().close() migrate_test.ping_vm(vm, params) remove_dict = {"do_search": '{"%s": "ssh:/"}' % dest_uri} src_libvirt_file = libvirt_config.remove_key_for_modular_daemon( remove_dict) vms = [vm] migrate_test.do_migration(vms, None, dest_uri, 'orderly', options, thread_timeout=900, ignore_status=True, virsh_opt=virsh_options, extra_opts=extra, **extra_args) migrate_test.ping_vm(vm, params, dest_uri) if migrate_vm_back: ssh_connection = utils_conn.SSHConnection(server_ip=client_ip, server_pwd=client_pwd, client_ip=server_ip, client_pwd=server_pwd) try: ssh_connection.conn_check() except utils_conn.ConnectionError: ssh_connection.conn_setup() ssh_connection.conn_check() # Pre migration setup for local machine migrate_test.migrate_pre_setup(src_uri, params) remove_dict = {"do_search": ('{"%s": "ssh:/"}' % src_uri)} remote_libvirt_file = libvirt_config\ .remove_key_for_modular_daemon(remove_dict, remote_dargs) cmd = "virsh migrate %s %s %s %s" % (vm_name, options, virsh_options, src_uri) logging.debug("Start migrating: %s", cmd) cmd_result = remote.run_remote_cmd(cmd, params, runner_on_target) logging.info(cmd_result) if cmd_result.exit_status: destroy_cmd = "virsh destroy %s" % vm_name remote.run_remote_cmd(destroy_cmd, params, runner_on_target, ignore_status=False) test.fail("Failed to run '%s' on remote: %s" % (cmd, cmd_result)) finally: logging.info("Recover test environment") migrate_test.cleanup_vm(vm, dest_uri) orig_config_xml.sync() if src_libvirt_file: src_libvirt_file.restore() if remote_libvirt_file: del remote_libvirt_file # Clean up of pre migration setup for local machine if migrate_vm_back: if 'ssh_connection' in locals(): ssh_connection.auto_recover = True migrate_test.migrate_pre_setup(src_uri, params, cleanup=True) # Cleanup selinu configuration if seLinuxBool: seLinuxBool.cleanup() if seLinuxfusefs: seLinuxfusefs.cleanup() # Disable ports 24007 and 49152:49216 if not host_ip: logging.debug("Disable 24007 and 49152:49216 in Firewall") migrate_test.migrate_pre_setup(src_uri, params, cleanup=True, ports="24007") migrate_test.migrate_pre_setup(src_uri, params, cleanup=True) gluster.setup_or_cleanup_gluster(False, **params) # Cleanup backend directory/symlink if gluster_mount_dir and gluster_uri: remote_session = remote.wait_for_login('ssh', server_ip, '22', server_user, server_pwd, r"[\#\$]\s*$") create_or_clean_backend_dir(gluster_uri, params, is_clean=True) create_or_clean_backend_dir(gluster_uri, params, remote_session, True) if remove_pkg: pkg_mgr = utils_package.package_manager( remote_session, pkg_name) if pkg_mgr.is_installed(pkg_name): logging.debug("glusterfs-fuse will be uninstalled") if not pkg_mgr.remove(): logging.error("Package '%s' un-installation fails", pkg_name) remote_session.close()
def run(test, params, env): """ Test virsh migrate command. """ def check_vm_network_accessed(session=None, ping_dest="www.baidu.com"): """ The operations to the VM need to be done before or after migration happens :param session: The session object to the host :param ping_dest: The destination to be ping :raise: test.fail when ping fails """ # Confirm local/remote VM can be accessed through network. logging.info("Check VM network connectivity") status, output = utils_test.ping(ping_dest, count=10, timeout=20, output_func=logging.debug, session=session) if status != 0: test.fail("Ping failed, status: %s," " output: %s" % (status, output)) def get_vm_ifaces(session=None): """ Get interfaces of vm :param session: The session object to the host :return: interfaces """ p_iface, v_iface = utils_net.get_remote_host_net_ifs(session) return p_iface def check_vm_iface_num(iface_list, exp_num=3): """ Check he number of interfaces :param iface_list: The interface list :param exp_num: The expected number :raise: test.fail when interfaces' number is not equal to exp_num """ if len(iface_list) != exp_num: test.fail("%d interfaces should be found on the vm, " "but find %s." % (exp_num, iface_list)) def create_or_del_networks(pf_name, params, remote_virsh_session=None, is_del=False): """ Create or delete network on local or remote :param params: Dictionary with the test parameters :param pf_name: The name of PF :param remote_virsh_session: The virsh session object to the remote host :param is_del: Whether the networks should be deleted :raise: test.fail when fails to define/start network """ net_hostdev_name = params.get("net_hostdev_name", "hostdev-net") net_hostdev_fwd = params.get("net_hostdev_fwd", '{"mode": "hostdev", "managed": "yes"}') net_bridge_name = params.get("net_bridge_name", "host-bridge") net_bridge_fwd = params.get("net_bridge_fwd", '{"mode": "bridge"}') bridge_name = params.get("bridge_name", "br0") net_dict = {"net_name": net_hostdev_name, "net_forward": net_hostdev_fwd, "net_forward_pf": '{"dev": "%s"}' % pf_name} bridge_dict = {"net_name": net_bridge_name, "net_forward": net_bridge_fwd, "net_bridge": '{"name": "%s"}' % bridge_name} if not is_del: for net_params in (net_dict, bridge_dict): net_dev = libvirt.create_net_xml(net_params.get("net_name"), net_params) if not remote_virsh_session: if net_dev.get_active(): net_dev.undefine() net_dev.define() net_dev.start() else: remote.scp_to_remote(server_ip, '22', server_user, server_pwd, net_dev.xml, net_dev.xml, limit="", log_filename=None, timeout=600, interface=None) remote_virsh_session.net_define(net_dev.xml, **virsh_args) remote_virsh_session.net_start(net_params.get("net_name"), **virsh_args) else: virsh_session = virsh if remote_virsh_session: virsh_session = remote_virsh_session for nname in (net_hostdev_name, net_bridge_name): if nname not in virsh_session.net_state_dict(): continue virsh_session.net_destroy(nname, debug=True, ignore_status=True) virsh_session.net_undefine(nname, debug=True, ignore_status=True) def check_vm_network_connection(net_name, expected_conn=0): """ Check network connections in network xml :param net_name: The network to be checked :param expected_conn: The expected value :raise: test.fail when fails """ output = virsh.net_dumpxml(net_name, debug=True).stdout_text if expected_conn == 0: reg_pattern = r"<network>" else: reg_pattern = r"<network connections='(\d)'>" res = re.findall(reg_pattern, output, re.I) if not res: test.fail("Unable to find expected connection in %s." % net_name) if expected_conn != 0: if expected_conn != int(res[0]): test.fail("Unable to get expected connection number." "Expected: %s, Actual %s" % (expected_conn, int(res[0]))) def get_hostdev_addr_from_xml(): """ Get VM hostdev address :return: pci driver id """ address_dict = {} for ifac in vm_xml.VMXML.new_from_dumpxml(vm_name).devices.by_device_tag("interface"): if ifac.type_name == "hostdev": address_dict = ifac.hostdev_address.attrs return libvirt.pci_info_from_address(address_dict, 16, "id") def check_vfio_pci(pci_path, status_error=False): """ Check if vf driver is vfio-pci :param pci_path: The absolute path of pci device :param status_error: Whether the driver should be vfio-pci """ cmd = "readlink %s/driver | awk -F '/' '{print $NF}'" % pci_path output = process.run(cmd, shell=True, verbose=True).stdout_text.strip() if (output == "vfio-pci") == status_error: test.fail("Get incorrect dirver %s, it should%s be vfio-pci." % (output, ' not' if status_error else '')) def update_iface_xml(vmxml): """ Update interfaces for guest :param vmxml: vm_xml.VMXML object """ vmxml.remove_all_device_by_type('interface') vmxml.sync() iface_dict = {"type": "network", "source": "{'network': 'host-bridge'}", "mac": mac_addr, "model": "virtio", "teaming": '{"type":"persistent"}', "alias": '{"name": "ua-backup0"}', "inbound": '{"average":"5"}', "outbound": '{"average":"5"}'} iface_dict2 = {"type": "network", "source": "{'network': 'hostdev-net'}", "mac": mac_addr, "model": "virtio", "teaming": '{"type":"transient", "persistent": "ua-backup0"}'} iface = interface.Interface('network') for ifc in (iface_dict, iface_dict2): iface.xml = libvirt.modify_vm_iface(vm.name, "get_xml", ifc) vmxml.add_device(iface) vmxml.sync() migration_test = migration.MigrationTest() migration_test.check_parameters(params) # Params for NFS shared storage shared_storage = params.get("migrate_shared_storage", "") if shared_storage == "": default_guest_asset = defaults.get_default_guest_os_info()['asset'] default_guest_asset = "%s.qcow2" % default_guest_asset shared_storage = os.path.join(params.get("nfs_mount_dir"), default_guest_asset) logging.debug("shared_storage:%s", shared_storage) # Params to update disk using shared storage params["disk_type"] = "file" params["disk_source_protocol"] = "netfs" params["mnt_path_name"] = params.get("nfs_mount_dir") # Local variables virsh_args = {"debug": True} virsh_options = params.get("virsh_options", "") server_ip = params.get("server_ip") server_user = params.get("server_user", "root") server_pwd = params.get("server_pwd") client_ip = params.get("client_ip") client_pwd = params.get("client_pwd") extra = params.get("virsh_migrate_extra") options = params.get("virsh_migrate_options") bridge_name = params.get("bridge_name", "br0") net_hostdev_name = params.get("net_hostdev_name", "hostdev-net") net_bridge_name = params.get("net_bridge_name", "host-bridge") driver = params.get("driver", "ixgbe") vm_tmp_file = params.get("vm_tmp_file", "/tmp/test.txt") cmd_during_mig = params.get("cmd_during_mig") net_failover_test = "yes" == params.get("net_failover_test", "no") cancel_migration = "yes" == params.get("cancel_migration", "no") try: vf_no = int(params.get("vf_no", "4")) except ValueError as e: test.error(e) migr_vm_back = "yes" == params.get("migrate_vm_back", "no") err_msg = params.get("err_msg") status_error = "yes" == params.get("status_error", "no") cmd_parms = {'server_ip': server_ip, 'server_user': server_user, 'server_pwd': server_pwd} remote_virsh_dargs = {'remote_ip': server_ip, 'remote_user': server_user, 'remote_pwd': server_pwd, 'unprivileged_user': None, 'ssh_remote_auth': True} destparams_dict = copy.deepcopy(params) remote_virsh_session = None vm_session = None vm = None mig_result = None func_name = None extra_args = {} default_src_vf = 0 default_dest_vf = 0 default_src_rp_filter = 1 default_dest_rp_filer = 1 if not libvirt_version.version_compare(6, 0, 0): test.cancel("This libvirt version doesn't support migration with " "net failover devices.") # params for migration connection params["virsh_migrate_desturi"] = libvirt_vm.complete_uri( params.get("migrate_dest_host")) params["virsh_migrate_connect_uri"] = libvirt_vm.complete_uri( params.get("migrate_source_host")) src_uri = params.get("virsh_migrate_connect_uri") dest_uri = params.get("virsh_migrate_desturi") vm_name = params.get("migrate_main_vm") vm = env.get_vm(vm_name) vm.verify_alive() # For safety reasons, we'd better back up xmlfile. new_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) orig_config_xml = new_xml.copy() try: # Create a remote runner for later use runner_on_target = remote.RemoteRunner(host=server_ip, username=server_user, password=server_pwd) server_session = remote.wait_for_login('ssh', server_ip, '22', server_user, server_pwd, r"[\#\$]\s*$") if net_failover_test: src_pf, src_pf_pci = utils_sriov.find_pf(driver) logging.debug("src_pf is %s. src_pf_pci: %s", src_pf, src_pf_pci) params['pf_name'] = src_pf dest_pf, dest_pf_pci = utils_sriov.find_pf(driver, server_session) logging.debug("dest_pf is %s. dest_pf_pci: %s", dest_pf, dest_pf_pci) destparams_dict['pf_name'] = dest_pf src_pf_pci_path = utils_misc.get_pci_path(src_pf_pci) dest_pf_pci_path = utils_misc.get_pci_path(dest_pf_pci, server_session) cmd = "cat %s/sriov_numvfs" % (src_pf_pci_path) default_src_vf = process.run(cmd, shell=True, verbose=True).stdout_text cmd = "cat %s/sriov_numvfs" % (dest_pf_pci_path) status, default_dest_vf = utils_misc.cmd_status_output(cmd, shell=True, session=server_session) if status: test.error("Unable to get default sriov_numvfs on target!" "status: %s, output: %s" % (status, default_dest_vf)) if not utils_sriov.set_vf(src_pf_pci_path, vf_no): test.error("Failed to set vf on source.") if not utils_sriov.set_vf(dest_pf_pci_path, vf_no, session=server_session): test.error("Failed to set vf on target.") # Create PF and bridge connection on source and target host cmd = 'cat /proc/sys/net/ipv4/conf/all/rp_filter' default_src_rp_filter = process.run(cmd, shell=True, verbose=True).stdout_text status, default_dest_rp_filter = utils_misc.cmd_status_output(cmd, shell=True, session=server_session) if status: test.error("Unable to get default rp_filter on target!" "status: %s, output: %s" % (status, default_dest_rp_filter)) cmd = 'echo 0 >/proc/sys/net/ipv4/conf/all/rp_filter' process.run(cmd, shell=True, verbose=True) utils_misc.cmd_status_output(cmd, shell=True, session=server_session) utils_sriov.add_or_del_connection(params, is_del=False) utils_sriov.add_or_del_connection(destparams_dict, is_del=False, session=server_session) if not remote_virsh_session: remote_virsh_session = virsh.VirshPersistent(**remote_virsh_dargs) create_or_del_networks(dest_pf, params, remote_virsh_session=remote_virsh_session) remote_virsh_session.close_session() create_or_del_networks(src_pf, params) # Change network interface xml mac_addr = utils_net.generate_mac_address_simple() update_iface_xml(new_xml) # Change the disk of the vm libvirt.set_vm_disk(vm, params) if not vm.is_alive(): vm.start() # Check local guest network connection before migration if vm.serial_console is not None: vm.cleanup_serial_console() vm.create_serial_console() vm_session = vm.wait_for_serial_login(timeout=240) if net_failover_test: utils_net.restart_guest_network(vm_session) iface_list = get_vm_ifaces(vm_session) vm_ipv4, vm_ipv6 = utils_net.get_linux_ipaddr(vm_session, iface_list[0]) check_vm_network_accessed(ping_dest=vm_ipv4) if net_failover_test: check_vm_iface_num(iface_list) check_vm_network_connection(net_hostdev_name, 1) check_vm_network_connection(net_bridge_name, 1) hostdev_pci_id = get_hostdev_addr_from_xml() vf_path = utils_misc.get_pci_path(hostdev_pci_id) check_vfio_pci(vf_path) if cmd_during_mig: s, o = utils_misc.cmd_status_output(cmd_during_mig, shell=True, session=vm_session) if s: test.fail("Failed to run %s in vm." % cmd_during_mig) if extra.count("--postcopy"): func_name = virsh.migrate_postcopy extra_args.update({'func_params': params}) if cancel_migration: func_name = migration_test.do_cancel # Execute migration process vms = [vm] migration_test.do_migration(vms, None, dest_uri, 'orderly', options, thread_timeout=900, ignore_status=True, virsh_opt=virsh_options, func=func_name, extra_opts=extra, **extra_args) mig_result = migration_test.ret migration_test.check_result(mig_result, params) if int(mig_result.exit_status) == 0: server_session = remote.wait_for_login('ssh', server_ip, '22', server_user, server_pwd, r"[\#\$]\s*$") check_vm_network_accessed(server_session, vm_ipv4) server_session.close() if net_failover_test: # Check network connection check_vm_network_connection(net_hostdev_name) check_vm_network_connection(net_bridge_name) # VF driver should not be vfio-pci check_vfio_pci(vf_path, True) cmd_parms.update({'vm_ip': vm_ipv4, 'vm_pwd': params.get("password")}) vm_after_mig = remote.VMManager(cmd_parms) vm_after_mig.setup_ssh_auth() cmd = "ip link" cmd_result = vm_after_mig.run_command(cmd) libvirt.check_result(cmd_result) p_iface = re.findall(r"\d+:\s+(\w+):\s+.*", cmd_result.stdout_text) p_iface = [x for x in p_iface if x != 'lo'] check_vm_iface_num(p_iface) # Check the output of ping command cmd = 'cat %s' % vm_tmp_file cmd_result = vm_after_mig.run_command(cmd) libvirt.check_result(cmd_result) if re.findall('Destination Host Unreachable', cmd_result.stdout_text, re.M): test.fail("The network does not work well during " "the migration peirod. ping output: %s" % cmd_result.stdout_text) # Execute migration from remote if migr_vm_back: ssh_connection = utils_conn.SSHConnection(server_ip=client_ip, server_pwd=client_pwd, client_ip=server_ip, client_pwd=server_pwd) try: ssh_connection.conn_check() except utils_conn.ConnectionError: ssh_connection.conn_setup() ssh_connection.conn_check() # Pre migration setup for local machine migration_test.migrate_pre_setup(src_uri, params) cmd = "virsh migrate %s %s %s" % (vm_name, virsh_options, src_uri) logging.debug("Start migration: %s", cmd) cmd_result = remote.run_remote_cmd(cmd, params, runner_on_target) logging.info(cmd_result) if cmd_result.exit_status: test.fail("Failed to run '%s' on remote: %s" % (cmd, cmd_result)) logging.debug("migration back done") check_vm_network_accessed(ping_dest=vm_ipv4) if net_failover_test: if vm_session: vm_session.close() vm_session = vm.wait_for_login() iface_list = get_vm_ifaces(vm_session) check_vm_iface_num(iface_list) else: check_vm_network_accessed(ping_dest=vm_ipv4) if net_failover_test: iface_list = get_vm_ifaces(vm_session) check_vm_iface_num(iface_list) finally: logging.debug("Recover test environment") # Clean VM on destination migration_test.cleanup_dest_vm(vm, vm.connect_uri, dest_uri) if vm.is_alive(): vm.destroy(gracefully=False) logging.info("Recovery VM XML configration") orig_config_xml.sync() logging.debug("The current VM XML:\n%s", orig_config_xml.xmltreefile) server_session = remote.wait_for_login('ssh', server_ip, '22', server_user, server_pwd, r"[\#\$]\s*$") if 'src_pf' in locals(): cmd = 'echo %s >/proc/sys/net/ipv4/conf/all/rp_filter' % default_src_rp_filter process.run(cmd, shell=True, verbose=True) utils_sriov.add_or_del_connection(params, is_del=True) create_or_del_networks(src_pf, params, is_del=True) if 'dest_pf' in locals(): cmd = 'echo %s >/proc/sys/net/ipv4/conf/all/rp_filter' % default_dest_rp_filter utils_misc.cmd_status_output(cmd, shell=True, session=server_session) utils_sriov.add_or_del_connection(destparams_dict, session=server_session, is_del=True) remote_virsh_session = virsh.VirshPersistent(**remote_virsh_dargs) create_or_del_networks(dest_pf, params, remote_virsh_session, is_del=True) remote_virsh_session.close_session() if 'dest_pf_pci_path' in locals() and default_dest_vf != vf_no: utils_sriov.set_vf(dest_pf_pci_path, default_dest_vf, server_session) if 'src_pf_pci_path' in locals() and default_src_vf != vf_no: utils_sriov.set_vf(src_pf_pci_path, default_src_vf) # Clean up of pre migration setup for local machine if migr_vm_back: if 'ssh_connection' in locals(): ssh_connection.auto_recover = True migration_test.migrate_pre_setup(src_uri, params, cleanup=True) server_session.close() if remote_virsh_session: remote_virsh_session.close_session() logging.info("Remove local NFS image") source_file = params.get("source_file") if source_file: libvirt.delete_local_disk("file", path=source_file)
def run(test, params, env): """ Test migration with special network settings 1) migrate guest with bridge type interface connected to ovs bridge :param test: test object :param params: Dictionary with the test parameters :param env: Dictionary with test environment. """ def check_vm_network_accessed(ping_dest, session=None): """ The operations to the VM need to be done before or after migration happens :param ping_dest: The destination to be ping :param session: The session object to the host :raise: test.fail when ping fails """ # Confirm local/remote VM can be accessed through network. logging.info("Check VM network connectivity") status, output = utils_net.ping(ping_dest, count=10, timeout=20, output_func=logging.debug, session=session) if status != 0: test.fail("Ping failed, status: %s, output: %s" % (status, output)) def update_iface_xml(vm_name, iface_dict): """ Update interfaces for guest :param vm_name: The name of VM :param iface_dict: The interface configurations params """ logging.debug("update iface xml") vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) vmxml.remove_all_device_by_type('interface') vmxml.sync() iface = interface.Interface('network') iface.xml = libvirt.modify_vm_iface(vm.name, "get_xml", iface_dict) libvirt.add_vm_device(vmxml, iface) migration_test = migration.MigrationTest() migration_test.check_parameters(params) # Params for NFS shared storage shared_storage = params.get("migrate_shared_storage", "") if shared_storage == "": default_guest_asset = defaults.get_default_guest_os_info()['asset'] default_guest_asset = "%s.qcow2" % default_guest_asset shared_storage = os.path.join(params.get("nfs_mount_dir"), default_guest_asset) logging.debug("shared_storage:%s", shared_storage) # Params to update disk using shared storage params["disk_type"] = "file" params["disk_source_protocol"] = "netfs" params["mnt_path_name"] = params.get("nfs_mount_dir") # Local variables virsh_args = {"debug": True} server_ip = params.get("server_ip") server_user = params.get("server_user", "root") server_pwd = params.get("server_pwd") client_ip = params.get("client_ip") client_pwd = params.get("client_pwd") virsh_options = params.get("virsh_options", "") extra = params.get("virsh_migrate_extra") options = params.get("virsh_migrate_options", "--live --p2p --verbose") func_params_exists = "yes" == params.get("func_params_exists", "no") migr_vm_back = "yes" == params.get("migr_vm_back", "no") ovs_bridge_name = params.get("ovs_bridge_name") network_dict = eval(params.get("network_dict", '{}')) iface_dict = eval(params.get("iface_dict", '{}')) remote_virsh_dargs = { 'remote_ip': server_ip, 'remote_user': server_user, 'remote_pwd': server_pwd, 'unprivileged_user': None, 'ssh_remote_auth': True } func_name = None libvirtd_conf = None mig_result = None # params for migration connection params["virsh_migrate_desturi"] = libvirt_vm.complete_uri( params.get("migrate_dest_host")) params["virsh_migrate_connect_uri"] = libvirt_vm.complete_uri( params.get("migrate_source_host")) src_uri = params.get("virsh_migrate_connect_uri") dest_uri = params.get("virsh_migrate_desturi") vm_name = params.get("migrate_main_vm") vm = env.get_vm(vm_name) vm.verify_alive() extra_args = {} if func_params_exists: extra_args.update({'func_params': params}) postcopy_options = params.get("postcopy_options") if postcopy_options: extra = "%s %s" % (extra, postcopy_options) func_name = virsh.migrate_postcopy # For safety reasons, we'd better back up xmlfile. new_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) orig_config_xml = new_xml.copy() try: # Create a remote runner for later use runner_on_target = remote.RemoteRunner(host=server_ip, username=server_user, password=server_pwd) remote_session = remote.remote_login("ssh", server_ip, "22", server_user, server_pwd, r'[$#%]') if ovs_bridge_name: status, stdout = utils_net.create_ovs_bridge(ovs_bridge_name) if status: test.fail("Failed to create ovs bridge on local. Status: %s" "Stdout: %s" % (status, stdout)) status, stdout = utils_net.create_ovs_bridge( ovs_bridge_name, session=remote_session) if status: test.fail("Failed to create ovs bridge on remote. Status: %s" "Stdout: %s" % (status, stdout)) if network_dict: libvirt_network.create_or_del_network( network_dict, remote_args=remote_virsh_dargs) libvirt_network.create_or_del_network(network_dict) remote_session.close() # Change domain network xml if iface_dict: if "mac" not in iface_dict: mac = utils_net.generate_mac_address_simple() iface_dict.update({'mac': mac}) else: mac = iface_dict["mac"] update_iface_xml(vm_name, iface_dict) # Change the disk of the vm libvirt.set_vm_disk(vm, params) if not vm.is_alive(): try: vm.start() except virt_vm.VMStartError as err: test.fail("Failed to start VM: %s" % err) logging.debug("Guest xml after starting:\n%s", vm_xml.VMXML.new_from_dumpxml(vm_name)) # Check local guest network connection before migration if vm.serial_console is not None: vm.cleanup_serial_console() vm.create_serial_console() vm_session = vm.wait_for_serial_login(timeout=240) utils_net.restart_guest_network(vm_session) vm_ip = utils_net.get_guest_ip_addr(vm_session, mac) logging.debug("VM IP Addr: %s", vm_ip) check_vm_network_accessed(vm_ip) # Execute migration process vms = [vm] migration_test.do_migration(vms, None, dest_uri, 'orderly', options, thread_timeout=900, ignore_status=True, virsh_opt=virsh_options, func=func_name, extra_opts=extra, **extra_args) mig_result = migration_test.ret migration_test.check_result(mig_result, params) if int(mig_result.exit_status) == 0: remote_session = remote.remote_login("ssh", server_ip, "22", server_user, server_pwd, r'[$#%]') check_vm_network_accessed(vm_ip, session=remote_session) remote_session.close() # Execute migration from remote if migr_vm_back: ssh_connection = utils_conn.SSHConnection(server_ip=client_ip, server_pwd=client_pwd, client_ip=server_ip, client_pwd=server_pwd) try: ssh_connection.conn_check() except utils_conn.ConnectionError: ssh_connection.conn_setup() ssh_connection.conn_check() # Pre migration setup for local machine migration_test.migrate_pre_setup(src_uri, params) cmd = "virsh migrate %s %s %s" % (vm_name, options, src_uri) logging.debug("Start migration: %s", cmd) cmd_result = remote.run_remote_cmd(cmd, params, runner_on_target) logging.info(cmd_result) if cmd_result.exit_status: test.fail("Failed to run '%s' on remote: %s" % (cmd, cmd_result)) logging.debug("VM is migrated back.") check_vm_network_accessed(vm_ip) finally: logging.debug("Recover test environment") # Clean VM on destination and source try: migration_test.cleanup_dest_vm(vm, vm.connect_uri, dest_uri) except Exception as err: logging.error(err) if vm.is_alive(): vm.destroy(gracefully=False) logging.info("Recovery VM XML configration") orig_config_xml.sync() remote_session = remote.remote_login("ssh", server_ip, "22", server_user, server_pwd, r'[$#%]') if network_dict: libvirt_network.create_or_del_network( network_dict, is_del=True, remote_args=remote_virsh_dargs) libvirt_network.create_or_del_network(network_dict, is_del=True) if ovs_bridge_name: utils_net.delete_ovs_bridge(ovs_bridge_name) utils_net.delete_ovs_bridge(ovs_bridge_name, session=remote_session) remote_session.close() if migr_vm_back: if 'ssh_connection' in locals(): ssh_connection.auto_recover = True migration_test.migrate_pre_setup(src_uri, params, cleanup=True) logging.info("Remove local NFS image") source_file = params.get("source_file") if source_file: libvirt.delete_local_disk("file", path=source_file)
def run(test, params, env): """ Run the test :param test: test object :param params: Dictionary with the test parameters :param env: Dictionary with test environment. """ libvirt_version.is_libvirt_feature_supported(params) vm_name = params.get("migrate_main_vm") vm = env.get_vm(vm_name) vm.verify_alive() bk_uri = vm.connect_uri migration_test = migration.MigrationTest() migration_test.check_parameters(params) extra_args = migration_test.update_virsh_migrate_extra_args(params) extra = params.get("virsh_migrate_extra") postcopy_options = params.get("postcopy_options") if postcopy_options: extra = "%s %s" % (extra, postcopy_options) params["virsh_migrate_desturi"] = libvirt_vm.complete_uri( params.get("migrate_dest_host")) dest_uri = params.get("virsh_migrate_desturi") options = params.get("virsh_migrate_options", "--live --p2p --persistent --verbose") virsh_options = params.get("virsh_options", "") stress_package = params.get("stress_package") action_during_mig = params.get("action_during_mig") migrate_speed = params.get("migrate_speed") migrate_speed_again = params.get("migrate_speed_again") migrate_again = "yes" == params.get("migrate_again", "no") vm_state_after_abort = params.get("vm_state_after_abort") return_port = "yes" == params.get("return_port", "no") params['server_pwd'] = params.get("migrate_dest_pwd") params['server_ip'] = params.get("migrate_dest_host") params['server_user'] = params.get("remote_user", "root") is_storage_migration = True if extra.count('--copy-storage-all') else False setup_tls = "yes" == params.get("setup_tls", "no") qemu_conf_dest = params.get("qemu_conf_dest", "{}") migrate_tls_force_default = "yes" == params.get("migrate_tls_force_default", "no") poweroff_src_vm = "yes" == params.get("poweroff_src_vm", "no") check_port = "yes" == params.get("check_port", "no") server_ip = params.get("migrate_dest_host") server_user = params.get("remote_user", "root") server_pwd = params.get("migrate_dest_pwd") server_params = {'server_ip': server_ip, 'server_user': server_user, 'server_pwd': server_pwd} qemu_conf_list = eval(params.get("qemu_conf_list", "[]")) qemu_conf_path = params.get("qemu_conf_path") min_port = params.get("min_port") vm_session = None qemu_conf_remote = None (remove_key_local, remove_key_remote) = (None, None) # For safety reasons, we'd better back up xmlfile. new_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) orig_config_xml = new_xml.copy() local_both_conf_obj = None remote_file_list = [] conn_obj_list = [] try: # Setup default value for migrate_tls_force if migrate_tls_force_default: value_list = ["migrate_tls_force"] # Setup migrate_tls_force default value on remote server_params['file_path'] = "/etc/libvirt/qemu.conf" remove_key_remote = libvirt_config.remove_key_in_conf(value_list, "qemu", remote_params=server_params) # Setup migrate_tls_force default value on local remove_key_local = libvirt_config.remove_key_in_conf(value_list, "qemu") if check_port: server_params['file_path'] = qemu_conf_path remove_key_remote = libvirt_config.remove_key_in_conf(qemu_conf_list, "qemu", remote_params=server_params) # Update only remote qemu conf if qemu_conf_dest: qemu_conf_remote = libvirt_remote.update_remote_file( server_params, qemu_conf_dest, "/etc/libvirt/qemu.conf") # Update local or both sides configuration files local_both_conf_obj = update_local_or_both_conf_file(params) # Setup TLS if setup_tls: conn_obj_list.append(migration_base.setup_conn_obj('tls', params, test)) # Update guest disk xml if not is_storage_migration: libvirt.set_vm_disk(vm, params) else: remote_file_list.append(libvirt_disk.create_remote_disk_by_same_metadata(vm, params)) if check_port: # Create a remote runner runner_on_target = remote_old.RemoteRunner(host=server_ip, username=server_user, password=server_pwd) cmd = "nc -l -p %s &" % min_port remote_old.run_remote_cmd(cmd, params, runner_on_target, ignore_status=False) if not vm.is_alive(): vm.start() logging.debug("Guest xml after starting:\n%s", vm_xml.VMXML.new_from_dumpxml(vm_name)) vm_session = vm.wait_for_login() if action_during_mig: if poweroff_src_vm: params.update({'vm_session': vm_session}) action_during_mig = migration_base.parse_funcs(action_during_mig, test, params) if stress_package: migration_test.run_stress_in_vm(vm, params) mode = 'both' if '--postcopy' in postcopy_options else 'precopy' if migrate_speed: migration_test.control_migrate_speed(vm_name, int(migrate_speed), mode) # Execute migration process migration_base.do_migration(vm, migration_test, None, dest_uri, options, virsh_options, extra, action_during_mig, extra_args) func_returns = dict(migration_test.func_ret) migration_test.func_ret.clear() logging.debug("Migration returns function results:%s", func_returns) if return_port: port_used = get_used_port(func_returns) if check_port: port_used = get_used_port(func_returns) if int(port_used) != int(min_port) + 1: test.fail("Wrong port for migration.") if vm_state_after_abort: check_vm_state_after_abort(vm_name, vm_state_after_abort, bk_uri, dest_uri, test) if migrate_again: if not vm.is_alive(): vm.start() vm_session = vm.wait_for_login() action_during_mig = migration_base.parse_funcs(params.get('action_during_mig_again'), test, params) extra_args['status_error'] = params.get("migrate_again_status_error", "no") if params.get("virsh_migrate_extra_mig_again"): extra = params.get("virsh_migrate_extra_mig_again") if params.get('scp_list_client_again'): params['scp_list_client'] = params.get('scp_list_client_again') # Recreate tlsconnection object using new parameter values conn_obj_list.append(migration_base.setup_conn_obj('tls', params, test)) if migrate_speed_again: migration_test.control_migrate_speed(vm_name, int(migrate_speed_again), mode) migration_base.do_migration(vm, migration_test, None, dest_uri, options, virsh_options, extra, action_during_mig, extra_args) if return_port: func_returns = dict(migration_test.func_ret) logging.debug("Migration returns function " "results:%s", func_returns) port_second = get_used_port(func_returns) if port_used != port_second: test.fail("Expect same port '{}' is used as previous one, " "but found new one '{}'".format(port_used, port_second)) else: logging.debug("Same port '%s' was used as " "expected", port_second) if int(migration_test.ret.exit_status) == 0: migration_test.post_migration_check([vm], params, uri=dest_uri) finally: logging.info("Recover test environment") vm.connect_uri = bk_uri if vm_session: vm_session.close() # Clean VM on destination and source migration_test.cleanup_vm(vm, dest_uri) # Restore remote qemu conf and restart libvirtd if qemu_conf_remote: logging.debug("Recover remote qemu configurations") del qemu_conf_remote # Restore local or both sides conf and restart libvirtd recover_config_file(local_both_conf_obj, params) if remove_key_remote: del remove_key_remote if remove_key_local: libvirt.customize_libvirt_config(None, config_object=remove_key_local, config_type='qemu') # Clean up connection object, like TLS migration_base.cleanup_conn_obj(conn_obj_list, test) for one_file in remote_file_list: if one_file: remote_old.run_remote_cmd("rm -rf %s" % one_file, params) orig_config_xml.sync()
def run(test, params, env): """ Test virsh migrate command. """ def check_vm_network_accessed(session=None): """ The operations to the VM need to be done before or after migration happens :param session: The session object to the host :raise: test.error when ping fails """ # Confirm local/remote VM can be accessed through network. logging.info("Check VM network connectivity") s_ping, _ = utils_test.ping(vm.get_address(), count=10, timeout=20, output_func=logging.debug, session=session) if s_ping != 0: if session: session.close() test.fail("%s did not respond after %d sec." % (vm.name, 20)) def check_migration_res(result): """ Check if the migration result is as expected :param result: the output of migration :raise: test.fail if test is failed """ if not result: test.error("No migration result is returned.") logging.info("Migration out: %s", result.stdout_text.strip()) logging.info("Migration error: %s", result.stderr_text.strip()) if status_error: # Migration should fail if err_msg: # Special error messages are expected if not re.search(err_msg, result.stderr_text.strip()): test.fail("Can not find the expected patterns '%s' in " "output '%s'" % (err_msg, result.stderr_text.strip())) else: logging.debug("It is the expected error message") else: if int(result.exit_status) != 0: logging.debug("Migration failure is expected result") else: test.fail("Migration success is unexpected result") else: if int(result.exit_status) != 0: test.fail(result.stderr_text.strip()) check_parameters(test, params) # Params for NFS shared storage shared_storage = params.get("migrate_shared_storage", "") if shared_storage == "": default_guest_asset = defaults.get_default_guest_os_info()['asset'] default_guest_asset = "%s.qcow2" % default_guest_asset shared_storage = os.path.join(params.get("nfs_mount_dir"), default_guest_asset) logging.debug("shared_storage:%s", shared_storage) # Params to update disk using shared storage params["disk_type"] = "file" params["disk_source_protocol"] = "netfs" params["mnt_path_name"] = params.get("nfs_mount_dir") # Local variables virsh_args = {"debug": True} virsh_options = params.get("virsh_options", "") server_ip = params.get("server_ip") server_user = params.get("server_user", "root") server_pwd = params.get("server_pwd") client_ip = params.get("client_ip") client_pwd = params.get("client_pwd") extra = params.get("virsh_migrate_extra") options = params.get("virsh_migrate_options") guest_src_url = params.get("guest_src_url") guest_src_path = params.get("guest_src_path", "/var/lib/libvirt/images/guest.img") check_disk = "yes" == params.get("check_disk") disk_model = params.get("disk_model") disk_target = params.get("disk_target", "vda") controller_model = params.get("controller_model") check_interface = "yes" == params.get("check_interface") iface_type = params.get("iface_type", "network") iface_model = params.get("iface_model", "virtio") iface_params = { 'type': iface_type, 'model': iface_model, 'del_addr': True, 'source': '{"network": "default"}' } check_memballoon = "yes" == params.get("check_memballoon") membal_model = params.get("membal_model") check_rng = "yes" == params.get("check_rng") rng_model = params.get("rng_model") migr_vm_back = "yes" == params.get("migrate_vm_back", "no") status_error = "yes" == params.get("status_error", "no") remote_virsh_dargs = { 'remote_ip': server_ip, 'remote_user': server_user, 'remote_pwd': server_pwd, 'unprivileged_user': None, 'ssh_remote_auth': True } xml_check_after_mig = params.get("guest_xml_check_after_mig") err_msg = params.get("err_msg") vm_session = None remote_virsh_session = None vm = None mig_result = None if not libvirt_version.version_compare(5, 0, 0): test.cancel("This libvirt version doesn't support " "virtio-transitional model.") # Make sure all of parameters are assigned a valid value check_parameters(test, params) # params for migration connection params["virsh_migrate_desturi"] = libvirt_vm.complete_uri( params.get("migrate_dest_host")) params["virsh_migrate_connect_uri"] = libvirt_vm.complete_uri( params.get("migrate_source_host")) src_uri = params.get("virsh_migrate_connect_uri") dest_uri = params.get("virsh_migrate_desturi") vm_name = params.get("migrate_main_vm") vm = env.get_vm(vm_name) vm.verify_alive() # For safety reasons, we'd better back up xmlfile. new_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) orig_config_xml = new_xml.copy() migration_test = migration.MigrationTest() try: # Create a remote runner for later use runner_on_target = remote.RemoteRunner(host=server_ip, username=server_user, password=server_pwd) # download guest source and update interface model to keep guest up if guest_src_url: blk_source = download.get_file(guest_src_url, guest_src_path) if not blk_source: test.error("Fail to download image.") params["blk_source_name"] = blk_source if (not check_interface) and iface_model: iface_dict = {'model': iface_model} libvirt.modify_vm_iface(vm_name, "update_iface", iface_dict) if not check_disk: params["disk_model"] = "virtio-transitional" if check_interface: libvirt.modify_vm_iface(vm_name, "update_iface", iface_params) if check_memballoon: membal_dict = {'membal_model': membal_model} dom_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) libvirt.update_memballoon_xml(dom_xml, membal_dict) if check_rng: rng_dict = {'rng_model': rng_model} rng_xml = libvirt.create_rng_xml(rng_dict) dom_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) libvirt.add_vm_device(dom_xml, rng_xml) # Change the disk of the vm libvirt.set_vm_disk(vm, params) if not vm.is_alive(): vm.start() logging.debug("Guest xml after starting:\n%s", vm_xml.VMXML.new_from_dumpxml(vm_name)) # Check local guest network connection before migration vm_session = vm.wait_for_login(restart_network=True) check_vm_network_accessed() # Execute migration process vms = [vm] migration_test.do_migration(vms, None, dest_uri, 'orderly', options, thread_timeout=900, ignore_status=True, virsh_opt=virsh_options, extra_opts=extra) mig_result = migration_test.ret check_migration_res(mig_result) if int(mig_result.exit_status) == 0: server_session = remote.wait_for_login('ssh', server_ip, '22', server_user, server_pwd, r"[\#\$]\s*$") check_vm_network_accessed(server_session) server_session.close() if xml_check_after_mig: if not remote_virsh_session: remote_virsh_session = virsh.VirshPersistent( **remote_virsh_dargs) target_guest_dumpxml = (remote_virsh_session.dumpxml( vm_name, debug=True, ignore_status=True).stdout_text.strip()) if check_disk: check_str = disk_model if disk_model else controller_model if check_interface: check_str = iface_model if check_memballoon: check_str = membal_model if check_rng: check_str = rng_model xml_check_after_mig = "%s'%s'" % (xml_check_after_mig, check_str) if not re.search(xml_check_after_mig, target_guest_dumpxml): test.fail("Fail to search '%s' in target guest XML:\n%s" % (xml_check_after_mig, target_guest_dumpxml)) remote_virsh_session.close_session() # Execute migration from remote if migr_vm_back: ssh_connection = utils_conn.SSHConnection(server_ip=client_ip, server_pwd=client_pwd, client_ip=server_ip, client_pwd=server_pwd) try: ssh_connection.conn_check() except utils_conn.ConnectionError: ssh_connection.conn_setup() ssh_connection.conn_check() # Pre migration setup for local machine migration_test.migrate_pre_setup(src_uri, params) cmd = "virsh migrate %s %s %s" % (vm_name, virsh_options, src_uri) logging.debug("Start migration: %s", cmd) cmd_result = remote.run_remote_cmd(cmd, params, runner_on_target) logging.info(cmd_result) if cmd_result.exit_status: test.fail("Failed to run '%s' on remote: %s" % (cmd, cmd_result)) finally: logging.debug("Recover test environment") # Clean VM on destination vm.connect_uri = '' migration_test.cleanup_dest_vm(vm, src_uri, dest_uri) logging.info("Recovery VM XML configration") orig_config_xml.sync() logging.debug("The current VM XML:\n%s", orig_config_xml.xmltreefile) # Clean up of pre migration setup for local machine if migr_vm_back: if 'ssh_connection' in locals(): ssh_connection.auto_recover = True migration_test.migrate_pre_setup(src_uri, params, cleanup=True) if remote_virsh_session: remote_virsh_session.close_session() logging.info("Remove local NFS image") source_file = params.get("source_file") libvirt.delete_local_disk("file", path=source_file) if guest_src_url and blk_source: libvirt.delete_local_disk("file", path=blk_source)
def run(test, params, env): """ Test migration of multi vms. """ vm_names = params.get("migrate_vms").split() if len(vm_names) < 2: raise exceptions.TestSkipError("No multi vms provided.") # Prepare parameters method = params.get("virsh_migrate_method") jobabort = "yes" == params.get("virsh_migrate_jobabort", "no") options = params.get("virsh_migrate_options", "") status_error = "yes" == params.get("status_error", "no") remote_host = params.get("remote_host", "DEST_HOSTNAME.EXAMPLE.COM") local_host = params.get("local_host", "SOURCE_HOSTNAME.EXAMPLE.COM") host_user = params.get("host_user", "root") host_passwd = params.get("host_password", "PASSWORD") nfs_shared_disk = params.get("nfs_shared_disk", True) migration_type = params.get("virsh_migration_type", "simultaneous") migrate_timeout = int(params.get("virsh_migrate_thread_timeout", 900)) migration_time = int(params.get("virsh_migrate_timeout", 60)) # Params for NFS and SSH setup params["server_ip"] = params.get("migrate_dest_host") params["server_user"] = "******" params["server_pwd"] = params.get("migrate_dest_pwd") params["client_ip"] = params.get("migrate_source_host") params["client_user"] = "******" params["client_pwd"] = params.get("migrate_source_pwd") params["nfs_client_ip"] = params.get("migrate_dest_host") params["nfs_server_ip"] = params.get("migrate_source_host") desturi = libvirt_vm.get_uri_with_transport(transport="ssh", dest_ip=remote_host) srcuri = libvirt_vm.get_uri_with_transport(transport="ssh", dest_ip=local_host) # Don't allow the defaults. if srcuri.count('///') or srcuri.count('EXAMPLE'): raise exceptions.TestSkipError("The srcuri '%s' is invalid" % srcuri) if desturi.count('///') or desturi.count('EXAMPLE'): raise exceptions.TestSkipError("The desturi '%s' is invalid" % desturi) # Config ssh autologin for remote host ssh_key.setup_remote_ssh_key(remote_host, host_user, host_passwd, port=22, public_key="rsa") # Prepare local session and remote session localrunner = remote.RemoteRunner(host=remote_host, username=host_user, password=host_passwd) remoterunner = remote.RemoteRunner(host=remote_host, username=host_user, password=host_passwd) # Configure NFS in remote host if nfs_shared_disk: nfs_client = nfs.NFSClient(params) nfs_client.setup() # Prepare MigrationHelper instance vms = [] for vm_name in vm_names: vm = env.get_vm(vm_name) vms.append(vm) try: option = make_migration_options(method, options, migration_time) # make sure cache=none if "unsafe" not in options: device_target = params.get("virsh_device_target", "sda") for vm in vms: if vm.is_alive(): vm.destroy() for each_vm in vm_names: logging.info("configure cache=none") vmxml = vm_xml.VMXML.new_from_dumpxml(each_vm) device_source = str( vmxml.get_disk_attr(each_vm, device_target, 'source', 'file')) ret_detach = virsh.detach_disk(each_vm, device_target, "--config") status = ret_detach.exit_status output = ret_detach.stdout.strip() logging.info("Status:%s", status) logging.info("Output:\n%s", output) if not ret_detach: raise exceptions.TestError("Detach disks fails") subdriver = utils_test.get_image_info(device_source)['format'] ret_attach = virsh.attach_disk( each_vm, device_source, device_target, "--driver qemu " "--config --cache none " "--subdriver %s" % subdriver) status = ret_attach.exit_status output = ret_attach.stdout.strip() logging.info("Status:%s", status) logging.info("Output:\n%s", output) if not ret_attach: raise exceptions.TestError("Attach disks fails") for vm in vms: if vm.is_dead(): vm.start() vm.wait_for_login() multi_migration(vms, srcuri, desturi, option, migration_type, migrate_timeout, jobabort, lrunner=localrunner, rrunner=remoterunner, status_error=status_error) except Exception as info: logging.error("Test failed: %s" % info) flag_migration = False # NFS cleanup if nfs_shared_disk: logging.info("NFS cleanup") nfs_client.cleanup(ssh_auto_recover=False) localrunner.session.close() remoterunner.session.close() if not (ret_migration or flag_migration): if not status_error: raise exceptions.TestFail("Migration test failed") if not ret_jobabort: if not status_error: raise exceptions.TestFail("Abort migration failed") if not ret_downtime_tolerable: raise exceptions.TestFail("Downtime during migration is intolerable")
def run(test, params, env): """ Test virsh migrate command. """ def set_feature(vmxml, feature, value): """ Set guest features for PPC :param state: the htm status :param vmxml: guest xml """ features_xml = vm_xml.VMFeaturesXML() if feature == 'hpt': features_xml.hpt_resizing = value elif feature == 'htm': features_xml.htm = value vmxml.features = features_xml vmxml.sync() def trigger_hpt_resize(session): """ Check the HPT order file and dmesg :param session: the session to guest :raise: test.fail if required message is not found """ hpt_order_path = "/sys/kernel/debug/powerpc/hpt_order" hpt_order = session.cmd_output('cat %s' % hpt_order_path).strip() hpt_order = int(hpt_order) logging.info('Current hpt_order is %d', hpt_order) hpt_order += 1 cmd = 'echo %d > %s' % (hpt_order, hpt_order_path) cmd_result = session.cmd_status_output(cmd) result = process.CmdResult(stderr=cmd_result[1], stdout=cmd_result[1], exit_status=cmd_result[0]) libvirt.check_exit_status(result) dmesg = session.cmd('dmesg') dmesg_content = params.get('dmesg_content').split('|') for content in dmesg_content: if content % hpt_order not in dmesg: test.fail("'%s' is missing in dmesg" % (content % hpt_order)) else: logging.info("'%s' is found in dmesg", content % hpt_order) def check_vm_network_accessed(session=None): """ The operations to the VM need to be done before or after migration happens :param session: The session object to the host :raise: test.error when ping fails """ # Confirm local/remote VM can be accessed through network. logging.info("Check VM network connectivity") s_ping, _ = utils_test.ping(vm.get_address(), count=10, timeout=20, output_func=logging.debug, session=session) if s_ping != 0: if session: session.close() test.fail("%s did not respond after %d sec." % (vm.name, 20)) def check_virsh_command_and_option(command, option=None): """ Check if virsh command exists :param command: the command to be checked :param option: the command option to be checked """ msg = "This version of libvirt does not support " if not virsh.has_help_command(command): test.cancel(msg + "virsh command '%s'" % command) if option and not virsh.has_command_help_match(command, option): test.cancel(msg + "virsh command '%s' with option '%s'" % (command, option)) def add_ctrls(vm_xml, dev_type="pci", dev_index="0", dev_model="pci-root"): """ Add multiple devices :param dev_type: the type of the device to be added :param dev_index: the maximum index of the device to be added :param dev_model: the model of the device to be added """ for inx in range(0, int(dev_index) + 1): newcontroller = Controller("controller") newcontroller.type = dev_type newcontroller.index = inx newcontroller.model = dev_model logging.debug("New device is added:\n%s", newcontroller) vm_xml.add_device(newcontroller) vm_xml.sync() def do_migration(vm, dest_uri, options, extra): """ Execute the migration with given parameters :param vm: the guest to be migrated :param dest_uri: the destination uri for migration :param options: options next to 'migrate' command :param extra: options in the end of the migrate command line :return: CmdResult object """ logging.info("Sleeping 10 seconds before migration") time.sleep(10) # Migrate the guest. virsh_args.update({"ignore_status": True}) migration_res = vm.migrate(dest_uri, options, extra, **virsh_args) if int(migration_res.exit_status) != 0: logging.error("Migration failed for %s.", vm_name) return migration_res if vm.is_alive(): # vm.connect_uri was updated logging.info("VM is alive on destination %s.", dest_uri) else: test.fail("VM is not alive on destination %s" % dest_uri) # Throws exception if console shows panic message vm.verify_kernel_crash() return migration_res def cleanup_libvirtd_log(log_file): """ Remove existing libvirtd log file on source and target host. :param log_file: log file with absolute path """ if os.path.exists(log_file): logging.debug("Delete local libvirt log file '%s'", log_file) os.remove(log_file) cmd = "rm -f %s" % log_file logging.debug("Delete remote libvirt log file '%s'", log_file) cmd_parms = { 'server_ip': server_ip, 'server_user': server_user, 'server_pwd': server_pwd } remote.run_remote_cmd(cmd, cmd_parms, runner_on_target) def cleanup_dest(vm): """ Clean up the destination host environment when doing the uni-direction migration. :param vm: the guest to be cleaned up """ logging.info("Cleaning up VMs on %s", vm.connect_uri) try: if virsh.domain_exists(vm.name, uri=vm.connect_uri): vm_state = vm.state() if vm_state == "paused": vm.resume() elif vm_state == "shut off": vm.start() vm.destroy(gracefully=False) if vm.is_persistent(): vm.undefine() except Exception as detail: logging.error("Cleaning up destination failed.\n%s", detail) def run_stress_in_vm(): """ The function to load stress in VM """ stress_args = params.get( "stress_args", "--cpu 8 --io 4 " "--vm 2 --vm-bytes 128M " "--timeout 20s") try: vm_session.cmd('stress %s' % stress_args) except Exception as detail: logging.debug(detail) def control_migrate_speed(to_speed=1): """ Control migration duration :param to_speed: the speed value in Mbps to be set for migration :return int: the new migration speed after setting """ virsh_args.update({"ignore_status": False}) old_speed = virsh.migrate_getspeed(vm_name, **virsh_args) logging.debug("Current migration speed is %s MiB/s\n", old_speed.stdout.strip()) logging.debug("Set migration speed to %d MiB/s\n", to_speed) cmd_result = virsh.migrate_setspeed(vm_name, to_speed, "", **virsh_args) actual_speed = virsh.migrate_getspeed(vm_name, **virsh_args) logging.debug("New migration speed is %s MiB/s\n", actual_speed.stdout.strip()) return int(actual_speed.stdout.strip()) def check_setspeed(params): """ Set/get migration speed :param params: the parameters used :raise: test.fail if speed set does not take effect """ expected_value = int(params.get("migrate_speed", '41943040')) // (1024 * 1024) actual_value = control_migrate_speed(to_speed=expected_value) params.update({'compare_to_value': actual_value}) if actual_value != expected_value: test.fail( "Migration speed is expected to be '%d MiB/s', but '%d MiB/s' " "found" % (expected_value, actual_value)) def check_domjobinfo(params, option=""): """ Check given item in domjobinfo of the guest is as expected :param params: the parameters used :param option: options for domjobinfo :raise: test.fail if the value of given item is unexpected """ def search_jobinfo(jobinfo): """ Find value of given item in domjobinfo :param jobinfo: cmdResult object :raise: test.fail if not found """ for item in jobinfo.stdout.splitlines(): if item.count(jobinfo_item): groups = re.findall(r'[0-9.]+', item.strip()) logging.debug("In '%s' search '%s'\n", item, groups[0]) if (math.fabs(float(groups[0]) - float(compare_to_value)) // float(compare_to_value) > diff_rate): test.fail("{} {} has too much difference from " "{}".format(jobinfo_item, groups[0], compare_to_value)) break jobinfo_item = params.get("jobinfo_item") compare_to_value = params.get("compare_to_value") logging.debug("compare_to_value:%s", compare_to_value) diff_rate = float(params.get("diff_rate", "0")) if not jobinfo_item or not compare_to_value: return vm_ref = '{}{}'.format(vm_name, option) jobinfo = virsh.domjobinfo(vm_ref, **virsh_args) search_jobinfo(jobinfo) check_domjobinfo_remote = params.get("check_domjobinfo_remote") if check_domjobinfo_remote: remote_virsh_session = virsh.VirshPersistent(**remote_virsh_dargs) jobinfo = remote_virsh_session.domjobinfo(vm_ref, **virsh_args) search_jobinfo(jobinfo) remote_virsh_session.close_session() def check_maxdowntime(params): """ Set/get migration maxdowntime :param params: the parameters used :raise: test.fail if maxdowntime set does not take effect """ expected_value = int( float(params.get("migrate_maxdowntime", '0.3')) * 1000) virsh_args.update({"ignore_status": False}) old_value = int(virsh.migrate_getmaxdowntime(vm_name).stdout.strip()) logging.debug("Current migration maxdowntime is %d ms", old_value) logging.debug("Set migration maxdowntime to %d ms", expected_value) virsh.migrate_setmaxdowntime(vm_name, expected_value, **virsh_args) actual_value = int( virsh.migrate_getmaxdowntime(vm_name).stdout.strip()) logging.debug("New migration maxdowntime is %d ms", actual_value) if actual_value != expected_value: test.fail( "Migration maxdowntime is expected to be '%d ms', but '%d ms' " "found" % (expected_value, actual_value)) params.update({'compare_to_value': actual_value}) def do_actions_during_migrate(params): """ The entry point to execute action list during migration :param params: the parameters used """ actions_during_migration = params.get("actions_during_migration") if not actions_during_migration: return for action in actions_during_migration.split(","): if action == 'setspeed': check_setspeed(params) elif action == 'domjobinfo': check_domjobinfo(params) elif action == 'setmaxdowntime': check_maxdowntime(params) time.sleep(3) def attach_channel_xml(): """ Create channel xml and attach it to guest configuration """ # Check if pty channel exists already for elem in new_xml.devices.by_device_tag('channel'): if elem.type_name == channel_type_name: logging.debug( "{0} channel already exists in guest. " "No need to add new one".format(channel_type_name)) return params = { 'channel_type_name': channel_type_name, 'target_type': target_type, 'target_name': target_name } channel_xml = libvirt.create_channel_xml(params) virsh.attach_device(domain_opt=vm_name, file_opt=channel_xml.xml, flagstr="--config", ignore_status=False) logging.debug("New VMXML with channel:\n%s", virsh.dumpxml(vm_name)) def check_timeout_postcopy(params): """ Check the vm state on target host after timeout when --postcopy and --timeout-postcopy are used. The vm state is expected as running. :param params: the parameters used """ timeout = int(params.get("timeout_postcopy", 10)) time.sleep(timeout + 1) remote_virsh_session = virsh.VirshPersistent(**remote_virsh_dargs) vm_state = results_stdout_52lts( remote_virsh_session.domstate(vm_name)).strip() if vm_state != "running": remote_virsh_session.close_session() test.fail( "After timeout '%s' seconds, " "the vm state on target host should " "be 'running', but '%s' found", timeout, vm_state) remote_virsh_session.close_session() def get_usable_compress_cache(pagesize): """ Get a number which is bigger than pagesize and is power of two. :param pagesize: the given integer :return: an integer satisfying the criteria """ def calculate(num): result = num & (num - 1) return (result == 0) item = pagesize found = False while (not found): item += 1 found = calculate(item) logging.debug( "%d is smallest one that is bigger than '%s' and " "is power of 2", item, pagesize) return item def check_migration_res(result): """ Check if the migration result is as expected :param result: the output of migration :raise: test.fail if test is failed """ logging.info("Migration out: %s", results_stdout_52lts(result).strip()) logging.info("Migration error: %s", results_stderr_52lts(result).strip()) if status_error: # Migration should fail if err_msg: # Special error messages are expected if not re.search(err_msg, results_stderr_52lts(result).strip()): test.fail("Can not find the expected patterns '%s' in " "output '%s'" % (err_msg, results_stderr_52lts(result).strip())) else: logging.debug("It is the expected error message") else: if int(result.exit_status) != 0: logging.debug("Migration failure is expected result") else: test.fail("Migration success is unexpected result") else: if int(result.exit_status) != 0: test.fail(results_stderr_52lts(result).strip()) check_parameters(test, params) # Params for NFS shared storage shared_storage = params.get("migrate_shared_storage", "") if shared_storage == "": default_guest_asset = defaults.get_default_guest_os_info()['asset'] default_guest_asset = "%s.qcow2" % default_guest_asset shared_storage = os.path.join(params.get("nfs_mount_dir"), default_guest_asset) logging.debug("shared_storage:%s", shared_storage) # params for migration connection params["virsh_migrate_desturi"] = libvirt_vm.complete_uri( params.get("migrate_dest_host")) # Params to update disk using shared storage params["disk_type"] = "file" params["disk_source_protocol"] = "netfs" params["mnt_path_name"] = params.get("nfs_mount_dir") # Local variables virsh_args = {"debug": True} virsh_opt = params.get("virsh_opt", "") server_ip = params.get("server_ip") server_user = params.get("server_user", "root") server_pwd = params.get("server_pwd") extra = params.get("virsh_migrate_extra") options = params.get("virsh_migrate_options") src_uri = params.get("virsh_migrate_connect_uri") dest_uri = params.get("virsh_migrate_desturi") log_file = params.get("libvirt_log", "/var/log/libvirt/libvirtd.log") check_complete_job = "yes" == params.get("check_complete_job", "no") config_libvirtd = "yes" == params.get("config_libvirtd", "no") contrl_index = params.get("new_contrl_index", None) asynch_migration = "yes" == params.get("asynch_migrate", "no") grep_str_remote_log = params.get("grep_str_remote_log", "") grep_str_local_log = params.get("grep_str_local_log", "") disable_verify_peer = "yes" == params.get("disable_verify_peer", "no") status_error = "yes" == params.get("status_error", "no") stress_in_vm = "yes" == params.get("stress_in_vm", "no") low_speed = params.get("low_speed", None) remote_virsh_dargs = { 'remote_ip': server_ip, 'remote_user': server_user, 'remote_pwd': server_pwd, 'unprivileged_user': None, 'ssh_remote_auth': True } hpt_resize = params.get("hpt_resize", None) htm_state = params.get("htm_state", None) # For pty channel test add_channel = "yes" == params.get("add_channel", "no") channel_type_name = params.get("channel_type_name", None) target_type = params.get("target_type", None) target_name = params.get("target_name", None) cmd_run_in_remote_guest = params.get("cmd_run_in_remote_guest", None) cmd_run_in_remote_guest_1 = params.get("cmd_run_in_remote_guest_1", None) cmd_run_in_remote_host = params.get("cmd_run_in_remote_host", None) cmd_run_in_remote_host_1 = params.get("cmd_run_in_remote_host_1", None) cmd_run_in_remote_host_2 = params.get("cmd_run_in_remote_host_2", None) # For qemu command line checking qemu_check = params.get("qemu_check", None) xml_check_after_mig = params.get("guest_xml_check_after_mig", None) # params for cache matrix test cache = params.get("cache") remove_cache = "yes" == params.get("remove_cache", "no") err_msg = params.get("err_msg") arch = platform.machine() if any([hpt_resize, contrl_index, htm_state]) and 'ppc64' not in arch: test.cancel("The case is PPC only.") # For TLS tls_recovery = params.get("tls_auto_recovery", "yes") # qemu config qemu_conf_dict = None # libvirtd config libvirtd_conf_dict = None remote_virsh_session = None vm = None vm_session = None libvirtd_conf = None qemu_conf = None mig_result = None test_exception = None is_TestError = False is_TestFail = False is_TestSkip = False # Objects to be cleaned up in the end objs_list = [] tls_obj = None # Local variables vm_name = params.get("migrate_main_vm") vm = env.get_vm(vm_name) vm.verify_alive() # For safety reasons, we'd better back up xmlfile. new_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) orig_config_xml = new_xml.copy() if not orig_config_xml: test.error("Backing up xmlfile failed.") try: # Create a remote runner for later use runner_on_target = remote.RemoteRunner(host=server_ip, username=server_user, password=server_pwd) # Change the configuration files if needed before starting guest # For qemu.conf if extra.count("--tls"): # Setup TLS tls_obj = TLSConnection(params) if tls_recovery == "yes": objs_list.append(tls_obj) tls_obj.auto_recover = True tls_obj.conn_setup() if not disable_verify_peer: qemu_conf_dict = {"migrate_tls_x509_verify": "1"} # Setup qemu configure logging.debug("Configure the qemu") cleanup_libvirtd_log(log_file) qemu_conf = libvirt.customize_libvirt_config( qemu_conf_dict, config_type="qemu", remote_host=True, extra_params=params) # Setup libvirtd if config_libvirtd: logging.debug("Configure the libvirtd") cleanup_libvirtd_log(log_file) libvirtd_conf_dict = setup_libvirtd_conf_dict(params) libvirtd_conf = libvirt.customize_libvirt_config( libvirtd_conf_dict, remote_host=True, extra_params=params) # Prepare required guest xml before starting guest if contrl_index: new_xml.remove_all_device_by_type('controller') logging.debug("After removing controllers, current XML:\n%s\n", new_xml) add_ctrls(new_xml, dev_index=contrl_index) if add_channel: attach_channel_xml() if hpt_resize: set_feature(new_xml, 'hpt', hpt_resize) if htm_state: set_feature(new_xml, 'htm', htm_state) if cache: params["driver_cache"] = cache if remove_cache: params["enable_cache"] = "no" # Change the disk of the vm to shared disk and then start VM libvirt.set_vm_disk(vm, params) if not vm.is_alive(): vm.start() logging.debug("Guest xml after starting:\n%s", vm_xml.VMXML.new_from_dumpxml(vm_name)) # Check qemu command line after guest is started if qemu_check: check_content = qemu_check if hpt_resize: check_content = "%s%s" % (qemu_check, hpt_resize) if htm_state: check_content = "%s%s" % (qemu_check, htm_state) libvirt.check_qemu_cmd_line(check_content) # Check local guest network connection before migration vm_session = vm.wait_for_login() check_vm_network_accessed() # Preparation for the running guest before migration if hpt_resize and hpt_resize != 'disabled': trigger_hpt_resize(vm_session) if low_speed: control_migrate_speed(int(low_speed)) if stress_in_vm: pkg_name = 'stress' logging.debug("Check if stress tool is installed") pkg_mgr = utils_package.package_manager(vm_session, pkg_name) if not pkg_mgr.is_installed(pkg_name): logging.debug("Stress tool will be installed") if not pkg_mgr.install(): test.error("Package '%s' installation fails" % pkg_name) stress_thread = threading.Thread(target=run_stress_in_vm, args=()) stress_thread.start() if extra.count("timeout-postcopy"): func_name = check_timeout_postcopy if params.get("actions_during_migration"): func_name = do_actions_during_migrate if extra.count("comp-xbzrle-cache"): cache = get_usable_compress_cache(memory.get_page_size()) extra = "%s %s" % (extra, cache) # For --postcopy enable postcopy_options = params.get("postcopy_options") if postcopy_options: extra = "%s %s" % (extra, postcopy_options) # Execute migration process if not asynch_migration: mig_result = do_migration(vm, dest_uri, options, extra) else: migration_test = libvirt.MigrationTest() logging.debug("vm.connect_uri=%s", vm.connect_uri) vms = [vm] try: migration_test.do_migration(vms, None, dest_uri, 'orderly', options, thread_timeout=900, ignore_status=True, virsh_opt=virsh_opt, func=func_name, extra_opts=extra, func_params=params) mig_result = migration_test.ret except exceptions.TestFail as fail_detail: test.fail(fail_detail) except exceptions.TestSkipError as skip_detail: test.cancel(skip_detail) except exceptions.TestError as error_detail: test.error(error_detail) except Exception as details: mig_result = migration_test.ret logging.error(details) check_migration_res(mig_result) if add_channel: # Get the channel device source path of remote guest if not remote_virsh_session: remote_virsh_session = virsh.VirshPersistent( **remote_virsh_dargs) file_path = tempfile.mktemp(dir=data_dir.get_tmp_dir()) remote_virsh_session.dumpxml(vm_name, to_file=file_path, debug=True, ignore_status=True) local_vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name) local_vmxml.xmltreefile = xml_utils.XMLTreeFile(file_path) for elem in local_vmxml.devices.by_device_tag('channel'): logging.debug("Found channel device {}".format(elem)) if elem.type_name == channel_type_name: host_source = elem.source.get('path') logging.debug( "Remote guest uses {} for channel device".format( host_source)) break remote_virsh_session.close_session() if not host_source: test.fail("Can not find source for %s channel on remote host" % channel_type_name) # Prepare to wait for message on remote host from the channel cmd_parms = { 'server_ip': server_ip, 'server_user': server_user, 'server_pwd': server_pwd } cmd_result = remote.run_remote_cmd( cmd_run_in_remote_host % host_source, cmd_parms, runner_on_target) # Send message from remote guest to the channel file remote_vm_obj = utils_test.RemoteVMManager(cmd_parms) vm_ip = vm.get_address() vm_pwd = params.get("password") remote_vm_obj.setup_ssh_auth(vm_ip, vm_pwd) cmd_result = remote_vm_obj.run_command(vm_ip, cmd_run_in_remote_guest_1) remote_vm_obj.run_command( vm_ip, cmd_run_in_remote_guest % results_stdout_52lts(cmd_result).strip()) logging.debug("Sending message is done") # Check message on remote host from the channel remote.run_remote_cmd(cmd_run_in_remote_host_1, cmd_parms, runner_on_target) logging.debug("Receiving message is done") remote.run_remote_cmd(cmd_run_in_remote_host_2, cmd_parms, runner_on_target) if check_complete_job: opts = " --completed" check_virsh_command_and_option("domjobinfo", opts) if extra.count("comp-xbzrle-cache"): params.update({'compare_to_value': cache // 1024}) check_domjobinfo(params, option=opts) if grep_str_local_log: cmd = "grep -E '%s' %s" % (grep_str_local_log, log_file) cmdRes = process.run(cmd, shell=True, ignore_status=True) if cmdRes.exit_status: test.fail(results_stderr_52lts(cmdRes).strip()) if grep_str_remote_log: cmd = "grep -E '%s' %s" % (grep_str_remote_log, log_file) cmd_parms = { 'server_ip': server_ip, 'server_user': server_user, 'server_pwd': server_pwd } remote.run_remote_cmd(cmd, cmd_parms, runner_on_target) if xml_check_after_mig: if not remote_virsh_session: remote_virsh_session = virsh.VirshPersistent( **remote_virsh_dargs) target_guest_dumpxml = results_stdout_52lts( remote_virsh_session.dumpxml(vm_name, debug=True, ignore_status=True)).strip() if hpt_resize: check_str = hpt_resize elif htm_state: check_str = htm_state if hpt_resize or htm_state: xml_check_after_mig = "%s'%s'" % (xml_check_after_mig, check_str) if not re.search(xml_check_after_mig, target_guest_dumpxml): remote_virsh_session.close_session() test.fail("Fail to search '%s' in target guest XML:\n%s" % (xml_check_after_mig, target_guest_dumpxml)) if contrl_index: all_ctrls = re.findall(xml_check_after_mig, target_guest_dumpxml) if len(all_ctrls) != int(contrl_index) + 1: remote_virsh_session.close_session() test.fail( "%s pci-root controllers are expected in guest XML, " "but found %s" % (int(contrl_index) + 1, len(all_ctrls))) remote_virsh_session.close_session() if int(mig_result.exit_status) == 0: server_session = remote.wait_for_login('ssh', server_ip, '22', server_user, server_pwd, r"[\#\$]\s*$") check_vm_network_accessed(server_session) server_session.close() except exceptions.TestFail as details: is_TestFail = True test_exception = details except exceptions.TestSkipError as details: is_TestSkip = True test_exception = details except exceptions.TestError as details: is_TestError = True test_exception = details except Exception as details: test_exception = details finally: logging.debug("Recover test environment") try: # Clean VM on destination vm.connect_uri = dest_uri cleanup_dest(vm) vm.connect_uri = src_uri logging.info("Recovery VM XML configration") orig_config_xml.sync() logging.debug("The current VM XML:\n%s", orig_config_xml.xmltreefile) if remote_virsh_session: remote_virsh_session.close_session() if extra.count("--tls") and not disable_verify_peer: logging.debug("Recover the qemu configuration") libvirt.customize_libvirt_config(None, config_type="qemu", remote_host=True, extra_params=params, is_recover=True, config_object=qemu_conf) if config_libvirtd: logging.debug("Recover the libvirtd configuration") libvirt.customize_libvirt_config(None, remote_host=True, extra_params=params, is_recover=True, config_object=libvirtd_conf) logging.info("Remove local NFS image") source_file = params.get("source_file") libvirt.delete_local_disk("file", path=source_file) if objs_list: for obj in objs_list: logging.debug("Clean up local objs") del obj except Exception as exception_detail: if (not test_exception and not is_TestError and not is_TestFail and not is_TestSkip): raise exception_detail else: # if any of above exceptions has been raised, only print # error log here to avoid of hiding the original issue logging.error(exception_detail) # Check result if is_TestFail: test.fail(test_exception) if is_TestSkip: test.cancel(test_exception) if is_TestError: test.error(test_exception) if not test_exception: logging.info("Case execution is done.") else: test.error(test_exception)
def run(test, params, env): """ Test migration of multi vms. """ vm_names = params.get("migrate_vms").split() if len(vm_names) < 2: raise error.TestNAError("No multi vms provided.") # Prepare parameters method = params.get("virsh_migrate_method") simultaneous = "yes" == params.get("simultaneous_migration", "no") jobabort = "yes" == params.get("virsh_migrate_jobabort", "no") options = params.get("virsh_migrate_options", "") status_error = "yes" == params.get("status_error", "no") #remote_migration = "yes" == params.get("remote_migration", "no") remote_host = params.get("remote_host", "DEST_HOSTNAME.EXAMPLE.COM") local_host = params.get("local_host", "SOURCE_HOSTNAME.EXAMPLE.COM") host_user = params.get("host_user", "root") host_passwd = params.get("host_password", "PASSWORD") desturi = libvirt_vm.get_uri_with_transport(transport="ssh", dest_ip=remote_host) srcuri = libvirt_vm.get_uri_with_transport(transport="ssh", dest_ip=local_host) # Don't allow the defaults. if srcuri.count('///') or srcuri.count('EXAMPLE'): raise error.TestNAError("The srcuri '%s' is invalid" % srcuri) if desturi.count('///') or desturi.count('EXAMPLE'): raise error.TestNAError("The desturi '%s' is invalid" % desturi) # Config ssh autologin for remote host ssh_key.setup_ssh_key(remote_host, host_user, host_passwd, port=22) # Prepare local session and remote session localrunner = remote.RemoteRunner(host=remote_host, username=host_user, password=host_passwd) remoterunner = remote.RemoteRunner(host=remote_host, username=host_user, password=host_passwd) # Prepare MigrationHelper instance helpers = [] for vm_name in vm_names: helper = MigrationHelper(vm_name, test, params, env) helper.set_virsh_instance() helper.set_migration_cmd(options, method, desturi) helpers.append(helper) for helper in helpers: vm = helper.vm if vm.is_dead(): vm.start() vm.wait_for_login() # Used for checking downtime helper.vm_ip = vm.get_address() try: multi_migration(helpers, simultaneous=False, jobabort=False, lrunner=localrunner, rrunner=remoterunner) finally: for helper in helpers: helper.virsh_instance.close_session() helper.cleanup_vm(srcuri, desturi) localrunner.session.close() remoterunner.session.close() if not ret_migration: if not status_error: raise error.TestFail("Migration test failed.") if not ret_jobabort: if not status_error: raise error.TestFail("Abort migration failed.") if not ret_downtime_tolerable: raise error.TestFail("Downtime during migration is intolerable.")
def run(test, params, env): """ Test remote access with TCP, TLS connection """ test_dict = dict(params) vm_name = test_dict.get("main_vm") vm = env.get_vm(vm_name) start_vm = test_dict.get("start_vm", "no") # Server and client parameters server_ip = test_dict.get("server_ip") server_user = test_dict.get("server_user") server_pwd = test_dict.get("server_pwd") client_ip = test_dict.get("client_ip") client_user = test_dict.get("client_user") client_pwd = test_dict.get("client_pwd") server_cn = test_dict.get("server_cn") client_cn = test_dict.get("client_cn") target_ip = test_dict.get("target_ip", "") # generate remote IP if target_ip == "": if server_cn: target_ip = server_cn elif server_ip: target_ip = server_ip else: target_ip = target_ip remote_virsh_dargs = { 'remote_ip': server_ip, 'remote_user': server_user, 'remote_pwd': server_pwd, 'unprivileged_user': None, 'ssh_remote_auth': True } # Ceph disk parameters driver = test_dict.get("test_driver", "qemu") transport = test_dict.get("transport") plus = test_dict.get("conn_plus", "+") source_type = test_dict.get("vm_disk_source_type", "file") virsh_options = test_dict.get("virsh_options", "--verbose --live") vol_name = test_dict.get("vol_name") disk_src_protocol = params.get("disk_source_protocol") source_file = test_dict.get("disk_source_file") disk_format = test_dict.get("disk_format", "qcow2") mon_host = params.get("mon_host") ceph_key_opt = "" attach_disk = False # Disk XML file disk_xml = None # Define ceph_disk conditional variable ceph_disk = "yes" == test_dict.get("ceph_disk") # For --postcopy enable postcopy_options = test_dict.get("postcopy_options") if postcopy_options and not virsh_options.count(postcopy_options): virsh_options = "%s %s" % (virsh_options, postcopy_options) test_dict['virsh_options'] = virsh_options # For bi-directional and tls reverse test uri_port = test_dict.get("uri_port", ":22") uri_path = test_dict.get("uri_path", "/system") src_uri = test_dict.get("migration_source_uri", "qemu:///system") uri = "%s%s%s://%s%s%s" % (driver, plus, transport, target_ip, uri_port, uri_path) test_dict["desuri"] = uri # Make sure all of parameters are assigned a valid value check_parameters(test, test_dict) # Set up SSH key #ssh_key.setup_ssh_key(server_ip, server_user, server_pwd, port=22) remote_session = remote.wait_for_login('ssh', server_ip, '22', server_user, server_pwd, r"[\#\$]\s*$") remote_session.close() #ssh_key.setup_ssh_key(server_ip, server_user, server_pwd, port=22) # Set up remote ssh key and remote /etc/hosts file for bi-direction migration migrate_vm_back = "yes" == test_dict.get("migrate_vm_back", "no") if migrate_vm_back: ssh_key.setup_remote_ssh_key(server_ip, server_user, server_pwd) ssh_key.setup_remote_known_hosts_file(client_ip, server_ip, server_user, server_pwd) # Reset Vm state if needed if vm.is_alive() and start_vm == "no": vm.destroy(gracefully=False) # Back up xml file. vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) # Setup migration context migrate_setup = migration.MigrationTest() migrate_setup.migrate_pre_setup(test_dict["desuri"], params) # Install ceph-common on remote host machine. remote_ssh_session = remote.remote_login("ssh", server_ip, "22", server_user, server_pwd, r"[\#\$]\s*$") if not utils_package.package_install(["ceph-common"], remote_ssh_session): test.error("Failed to install required packages on remote host") remote_ssh_session.close() try: # Create a remote runner for later use runner_on_target = remote.RemoteRunner(host=server_ip, username=server_user, password=server_pwd) # Get initial Selinux config flex bit LOCAL_SELINUX_ENFORCING_STATUS = utils_selinux.get_status() logging.info("previous local enforce :%s", LOCAL_SELINUX_ENFORCING_STATUS) cmd_result = remote.run_remote_cmd('getenforce', params, runner_on_target) REMOTE_SELINUX_ENFORCING_STATUS = cmd_result.stdout_text logging.info("previous remote enforce :%s", REMOTE_SELINUX_ENFORCING_STATUS) if ceph_disk: logging.info( "Put local SELinux in permissive mode when test ceph migrating" ) utils_selinux.set_status("enforcing") logging.info("Put remote SELinux in permissive mode") cmd = "setenforce enforcing" cmd_result = remote.run_remote_cmd(cmd, params, runner_on_target) status, output = cmd_result.exit_status, cmd_result.stdout_text.strip( ) if status: test.Error("Failed to set SELinux " "in permissive mode") # Prepare ceph disk. key_file = os.path.join(data_dir.get_tmp_dir(), "ceph.key") test_dict['key_file'] = key_file test_dict['first_disk'] = vm.get_first_disk_devices() ceph_key_opt, secret_uuid = prepare_ceph_disk( test_dict, remote_virsh_dargs, test, runner_on_target) host_ip = test_dict.get('mon_host') disk_image = test_dict.get('disk_img') # Build auth information. auth_attrs = {} auth_attrs['auth_user'] = params.get("auth_user") auth_attrs['secret_type'] = params.get("secret_type") auth_attrs['secret_uuid'] = secret_uuid build_disk_xml(vm_name, disk_format, host_ip, disk_src_protocol, vol_name, disk_image, auth=auth_attrs) vm_xml_cxt = process.run("virsh dumpxml %s" % vm_name, shell=True).stdout_text logging.debug("The VM XML with ceph disk source: \n%s", vm_xml_cxt) try: if vm.is_dead(): vm.start() except virt_vm.VMStartError as e: logging.info("Failed to start VM") test.fail("Failed to start VM: %s" % vm_name) # Ensure the same VM name doesn't exist on remote host before migrating. destroy_vm_cmd = "virsh destroy %s" % vm_name remote.run_remote_cmd(cmd, params, runner_on_target) # Trigger migration migrate_vm(test, test_dict) if migrate_vm_back: ssh_connection = utils_conn.SSHConnection(server_ip=client_ip, server_pwd=client_pwd, client_ip=server_ip, client_pwd=server_pwd) try: ssh_connection.conn_check() except utils_conn.ConnectionError: ssh_connection.conn_setup() ssh_connection.conn_check() # Pre migration setup for local machine migrate_setup.migrate_pre_setup(src_uri, params) cmd = "virsh migrate %s %s %s" % (vm_name, virsh_options, src_uri) logging.debug("Start migrating: %s", cmd) cmd_result = remote.run_remote_cmd(cmd, params, runner_on_target) status, output = cmd_result.exit_status, cmd_result.stdout_text.strip( ) logging.info(output) if status: destroy_cmd = "virsh destroy %s" % vm_name remote.run_remote_cmd(destroy_cmd, params, runner_on_target) test.fail("Failed to run '%s' on remote: %s" % (cmd, output)) finally: logging.info("Recovery test environment") # Clean up of pre migration setup for local machine if migrate_vm_back: migrate_setup.migrate_pre_setup(src_uri, params, cleanup=True) # Ensure VM can be cleaned up on remote host even migrating fail. destroy_vm_cmd = "virsh destroy %s" % vm_name remote.run_remote_cmd(destroy_vm_cmd, params, runner_on_target) logging.info("Recovery VM XML configuration") vmxml_backup.sync() logging.debug("The current VM XML:\n%s", vmxml_backup.xmltreefile) # Clean up ceph environment. if disk_src_protocol == "rbd": # Clean up secret secret_list = get_secret_list() if secret_list: for secret_uuid in secret_list: virsh.secret_undefine(secret_uuid) # Clean up dirty secrets on remote host if testing involve in ceph auth. client_name = test_dict.get('client_name') client_key = test_dict.get("client_key") if client_name and client_key: try: remote_virsh = virsh.VirshPersistent(**remote_virsh_dargs) remote_dirty_secret_list = get_secret_list(remote_virsh) for dirty_secret_uuid in remote_dirty_secret_list: remote_virsh.secret_undefine(dirty_secret_uuid) except (process.CmdError, remote.SCPError) as detail: test.Error(detail) finally: remote_virsh.close_session() # Delete the disk if it exists. disk_src_name = "%s/%s" % (vol_name, test_dict.get('disk_img')) cmd = ("rbd -m {0} {1} info {2} && rbd -m {0} {1} rm " "{2}".format(mon_host, ceph_key_opt, disk_src_name)) process.run(cmd, ignore_status=True, shell=True) if LOCAL_SELINUX_ENFORCING_STATUS: logging.info("Restore SELinux in original mode") utils_selinux.set_status(LOCAL_SELINUX_ENFORCING_STATUS) if REMOTE_SELINUX_ENFORCING_STATUS: logging.info("Put remote SELinux in original mode") cmd = "yes yes | setenforce %s" % REMOTE_SELINUX_ENFORCING_STATUS remote.run_remote_cmd(cmd, params, runner_on_target) # Remove known hosts on local host cmd = "ssh-keygen -R %s" % server_ip process.run(cmd, ignore_status=True, shell=True) # Remove known hosts on remote host cmd = "ssh-keygen -R %s" % client_ip remote.run_remote_cmd(cmd, params, runner_on_target)