예제 #1
0
def setup_remote_ssh_key(hostname1, user1, password1,
                         hostname2=None, user2=None, password2=None,
                         port=22, config_options=None, public_key="rsa"):
    """
    Setup up remote to remote login in another server by using public key
    If hostname2 is not supplied, setup to local.

    :param hostname1: the server wants to login other host
    :param hostname2: the server to be logged in
    :type hostname: str
    :param user: user to login
    :type user: str
    :param password: password
    :type password: str
    :param port: port number
    :type port: int
    :param config_options: list of options eg: ["StrictHostKeyChecking=no"]
    :type config_options: list of str
    """
    logging.debug('Performing SSH key setup on %s:%d as %s.' %
                  (hostname1, port, user1))

    try:
        session1 = remote.remote_login(client='ssh', host=hostname1, port=port,
                                       username=user1, password=password1,
                                       prompt=r'[$#%]')
        public_key = get_remote_public_key(session1, public_key=public_key)

        if hostname2 is None:
            # Simply create a session to local
            session2 = aexpect.ShellSession("sh", linesep='\n', prompt='#')
            # set config in local machine
            if config_options:
                for each_option in config_options:
                    session2.cmd_output("echo '%s' >> ~/.ssh/config" %
                                        each_option)
        else:
            session2 = remote.remote_login(client='ssh', host=hostname2,
                                           port=port, username=user2,
                                           password=password2,
                                           prompt=r'[$#%]')
            # set config in remote machine
            if config_options:
                for each_option in config_options:
                    session1.cmd_output("echo '%s' >> ~/.ssh/config" %
                                        each_option)
        session2.cmd_output('mkdir -p ~/.ssh')
        session2.cmd_output('chmod 700 ~/.ssh')
        session2.cmd_output("echo '%s' >> ~/.ssh/authorized_keys; " %
                            public_key)
        session2.cmd_output('chmod 600 ~/.ssh/authorized_keys')
        logging.debug('SSH key setup on %s complete.', session2)
    except Exception as err:
        logging.debug('SSH key setup has failed: %s', err)
        try:
            session1.close()
            session2.close()
        except Exception:
            pass
예제 #2
0
 def test(self):
     log_file = os.path.join(self.srcdir, "file")
     session_int = remote.RemoteRunner("ssh", self.ip, 22, self.user_name, self.password,
                                       self.prompt, "\n", log_file, 100, 10, None)
     session_int.run("cat /etc/os-release", 600, "True")
     if "Ubuntu" in open(log_file).read():
         file_list = ['dmesg', 'dump']
         f_val = 12
         session_int.run("DEBIAN_FRONTEND=noninteractive apt-get install -y  linux-crashdump;", 600, "True")
         crashkernel_value = 'GRUB_CMDLINE_LINUX_DEFAULT=\"$GRUB_CMDLINE_LINUX_DEFAULT\
                              crashkernel=2G-4G:320M,4G-32G:512M,32G-64G:1024M,64G-128G:2048M,128G-:4096M\"'
         cmd = "echo \'%s\' > /etc/default/grub.d/kexec-tools.cfg;" % crashkernel_value
         session_int.run(cmd, 600, "True")
         session_int.run("sudo update-grub;", 600, "True")
         session_reboot = remote.remote_login("ssh", self.ip, 22, self.user_name, self.password,
                                              self.prompt, "\n", None, 100, None, None, False)
         session_reboot.sendline('reboot;')
         time.sleep(600)
         self.log.info("Connecting after reboot")
         session_status = remote.RemoteRunner("ssh", self.ip, 22, self.user_name, self.password,
                                              self.prompt, "\n", log_file, 100, 10, None)
         session_status.run("kdump-config show", 600, "True")
         if self.run_cmd_out("cat %s | grep -Eai 'Not ready to'" % log_file):
             self.fail("Kdump is not operational")
         else:
             self.log.info("Kdump status is operational")
     if "rhel" in open(log_file).read():
         file_list = ['vmcore-dmesg.txt', 'vmcore']
         f_val = 11
         session_int.run("kdumpctl status", 600, "True")
         if self.run_cmd_out("cat %s | grep -Eai 'Kdump is not operational'" % log_file):
             self.fail("Kdump is not operational")
         else:
             self.log.info("Kdump status is operational")
     session_crash = remote.remote_login("ssh", self.ip, 22, self.user_name, self.password,
                                         self.prompt, "\n", None, 100, None, None, False)
     session_crash.sendline('echo 1 > /proc/sys/kernel/sysrq;')
     session_crash.sendline('echo "c" > /proc/sysrq-trigger;')
     time.sleep(600)
     self.log.info("Connecting after reboot")
     session_check = remote.RemoteRunner("ssh", self.ip, 22, self.user_name, self.password,
                                         self.prompt, "\n", log_file, 100, 10, None)
     session_check.run("ls -lrt /var/crash", 100, "True")
     crash_dir = self.run_cmd_out("cat %s | grep drwxr | tail -1 | cut -d' ' -f%s" % (log_file, f_val))
     path_crash_dir = os.path.join("/var/crash", crash_dir)
     print path_crash_dir
     session_check.run("ls -lrt %s" % path_crash_dir, 100, "True")
     for files in file_list:
         if files not in open(log_file).read():
             self.fail("%s is not saved" % files)
예제 #3
0
 def test(self):
     log_file_server = os.path.join(self.workdir, "file_server")
     self.configure_kdump()
     self.configure_nfs()
     session_check = remote.RemoteRunner("ssh", self.ip_server, 22, self.user_name_server, self.password_server,
                                         self.prompt_server, "\n", log_file_server, 100, 10, None)
     session_check.run("date +%s", 100, "True")
     time_init = self.run_cmd_out("cat %s | tail -3 | head -1 | cut -d' ' -f3" % log_file_server).strip()
     session_crash = remote.remote_login("ssh", self.ip, 22, self.user_name, self.password,
                                         self.prompt, "\n", None, 100, None, None, False)
     session_crash.sendline('echo 1 > /proc/sys/kernel/sysrq;')
     session_crash.sendline('echo "c" > /proc/sysrq-trigger;')
     time.sleep(300)
     self.log.info("Connecting to nfs server")
     session_check = remote.RemoteRunner("ssh", self.ip_server, 22, self.user_name_server, self.password_server,
                                         self.prompt_server, "\n", log_file_server, 100, 10, None)
     if self.distro == "rhel":
         nfs_dir_path = os.path.join(self.nfs_path, "var", "crash")
     print nfs_dir_path
     session_check.run("ls -lrt %s;" % nfs_dir_path, 100, "True")
     crash_dir = self.run_cmd_out("cat %s | grep drwxr | awk '{print $NF}' | tail -1" % log_file_server)
     path_crash_dir = os.path.join(nfs_dir_path, crash_dir)
     session_check.run("stat -c%%Z %s" % path_crash_dir, 100, "True")
     time_created = self.run_cmd_out("cat %s | tail -3 | head -1 | cut -d' ' -f3" % log_file_server).strip()
     if time_created < time_init:
         self.fail("Dump is not saved in ssh server")
     session_check.run("ls -lrt %s" % path_crash_dir, 100, "True")
     for files in self.file_list:
         if files not in open(log_file_server).read():
             self.fail("%s is not saved" % files)
예제 #4
0
def remote_test(remote_ip, local_ip, remote_pwd, remote_prompt,
                vm_name, status_error_test):
    """
    Test remote case
    """
    err = ""
    status = 1
    status_error = status_error_test
    try:
        remote_uri = libvirt_vm.complete_uri(local_ip)
        session = remote.remote_login("ssh", remote_ip, "22",
                                      "root", remote_pwd, remote_prompt)
        session.cmd_output('LANG=C')
        command = "virsh -c %s setvcpus %s 1 --live" % (remote_uri, vm_name)
        if virsh.has_command_help_match("setvcpus", "--live") is None:
            raise error.TestNAError("The current libvirt doesn't support"
                                    " '--live' option for setvcpus")
        status, output = session.cmd_status_output(command, internal_timeout=5)
        session.close()
        if status != 0:
            err = output
    except error.CmdError:
        status = 1
        err = "remote test failed"
    return status, status_error, err
예제 #5
0
    def list_local_domains_on_remote(options_ref, remote_ip, remote_passwd,
                                     local_ip, remote_user, local_user,
                                     local_pwd):
        """
        Create a virsh list command and execute it on remote host.
        It will list local domains on remote host.

        :param options_ref:options in virsh list command.
        :param remote_ip:remote host's ip.
        :param remote_passwd:remote host's password.
        :param local_ip:local ip, to create uri in virsh list.
        :return:return status and output of the virsh list command.
        """
        complete_uri = libvirt_vm.complete_uri(local_ip)
        command_on_remote = ("virsh -c %s list %s"
                             % (complete_uri, options_ref))
        try:
            # setup autologin for ssh from remote machine to execute commands
            # remotely
            config_opt = ["StrictHostKeyChecking=no"]
            ssh_key.setup_remote_ssh_key(remote_ip, remote_user,
                                         remote_passwd, hostname2=local_ip,
                                         user2=local_user,
                                         password2=local_pwd,
                                         config_options=config_opt)
            session = remote.remote_login("ssh", remote_ip, "22", remote_user,
                                          remote_passwd, "#")
            time.sleep(5)
            status, output = session.cmd_status_output(
                command_on_remote, internal_timeout=30)
        except Exception, info:
            logging.error("Shell failed to execute command from"
                          " remote")
            return 1, info
예제 #6
0
 def receiver():
     """ Receive side """
     logging.info("Starting receiver process on %s", receiver_addr)
     if vm_receiver:
         session = vm_receiver.wait_for_login(timeout=login_timeout)
     else:
         username = params.get("username", "")
         password = params.get("password", "")
         prompt = params.get("shell_prompt", "[\#\$]")
         linesep = eval("'%s'" % params.get("shell_linesep", r"\n"))
         client = params.get("shell_client")
         port = int(params.get("shell_port"))
         log_filename = ("session-%s-%s.log" % (receiver_addr,
                         utils_misc.generate_random_string(4)))
         session = remote.remote_login(client, receiver_addr, port,
                                       username, password, prompt,
                                       linesep, log_filename, timeout)
         session.set_status_test_command("echo %errorlevel%")
     install_ntttcp(session)
     ntttcp_receiver_cmd = params.get("ntttcp_receiver_cmd")
     global _receiver_ready
     f = open(results_path + ".receiver", 'a')
     for b in buffers:
         utils_misc.wait_for(lambda: not _wait(), timeout)
         _receiver_ready = True
         rbuf = params.get("fixed_rbuf", b)
         cmd = ntttcp_receiver_cmd % (
             session_num, receiver_addr, rbuf, buf_num)
         r = session.cmd_output(cmd, timeout=timeout,
                                print_func=logging.debug)
         f.write("Send buffer size: %s\n%s\n%s" % (b, cmd, r))
     f.close()
     session.close()
예제 #7
0
 def remote_test(params, vm_name):
     """
     Test remote case.
     """
     remote_ip = params.get("remote_ip", "REMOTE.EXAMPLE.COM")
     local_ip = params.get("local_ip", "LOCAL.EXAMPLE.COM")
     remote_pwd = params.get("remote_pwd", "")
     status = 0
     output = ""
     err = ""
     try:
         if remote_ip.count("EXAMPLE.COM") or local_ip.count("EXAMPLE.COM"):
             raise error.TestNAError("remote_ip and/or local_ip parameters "
                                     "not changed from default values.")
         uri = libvirt_vm.complete_uri(local_ip)
         session = remote.remote_login("ssh", remote_ip, "22", "root",
                                       remote_pwd, "#")
         session.cmd_output('LANG=C')
         command = "virsh -c %s dominfo %s" % (uri, vm_name)
         status, output = session.cmd_status_output(command,
                                                    internal_timeout=5)
         if status != 0:
             err = output
         session.close()
     except error.CmdError:
         status = 1
         output = ""
         err = "remote test failed"
     return status, output, err
예제 #8
0
    def start_ip_sniffing(self, params):
        """
        Start ip sniffing.

        :param params: Params object.
        """
        self.data.setdefault("address_cache", ip_sniffing.AddrCache())
        sniffers = ip_sniffing.Sniffers

        if not self._sniffer:
            remote_pp = params.get("remote_preprocess") == "yes"
            remote_opts = None
            session = None
            if remote_pp:
                client = params.get('remote_shell_client', 'ssh')
                remote_opts = (params['remote_node_address'],
                               params.get('remote_shell_port', '22'),
                               params['remote_node_user'],
                               params['remote_node_password'],
                               params.get('remote_shell_prompt', '#'))
                session = remote.remote_login(client, *remote_opts)
            for s_cls in sniffers:
                if s_cls.is_supported(session):
                    self._sniffer = s_cls(self.data["address_cache"],
                                          "ip-sniffer.log",
                                          remote_opts)
                    break
            if session:
                session.close()

        if not self._sniffer:
            raise exceptions.TestError("Can't find any supported ip sniffer! "
                                       "%s" % [s.command for s in sniffers])

        self._sniffer.start()
예제 #9
0
 def get_remote_host_session():
     dsthostssh = remote.remote_login("ssh", dsthost, 22, "root",
                                      passwd, "#", timeout=30)
     if dsthostssh:
         dsthostssh.set_status_test_command("echo $?")
         return dsthostssh
     else:
         return None
예제 #10
0
def run_virsh_reboot(test, params, env):
    """
    Test command: virsh reboot.

    Run a reboot command in the target domain.
    1.Prepare test environment.
    2.When the libvirtd == "off", stop the libvirtd service.
    3.Perform virsh reboot operation.
    4.Recover test environment.(libvirts service)
    5.Confirm the test result.
    """

    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)

    # run test case
    libvirtd = params.get("libvirtd", "on")
    vm_ref = params.get("reboot_vm_ref")
    status_error = params.get("status_error")
    extra = params.get("reboot_extra")
    remote_ip = params.get("remote_ip", "REMOTE.EXAMPLE.COM")
    local_ip = params.get("local_ip", "LOCAL.EXAMPLE.COM")
    remote_pwd = params.get("remote_pwd", "password")
    domid = vm.get_id()
    domuuid = vm.get_uuid()
    if libvirtd == "off":
        utils_libvirtd.libvirtd_stop()

    if vm_ref == "id":
        vm_ref = domid
    elif vm_ref == "name":
        vm_ref = vm_name
    elif vm_ref == "uuid":
        vm_ref = domuuid
    elif vm_ref == "hex_id":
        vm_ref = hex(int(domid))
    elif vm_ref.find("invalid") != -1:
        vm_ref = params.get(vm_ref)
    elif vm_ref == "remote_name":
        if remote_ip.count("EXAMPLE.COM") or local_ip.count("EXAMPLE.COM"):
            raise error.TestNAError("remote_ip and/or local_ip parameters not "
                                    "changed from default values")
        complete_uri = libvirt_vm.complete_uri(local_ip)
        try:
            session = remote.remote_login("ssh", remote_ip, "22", "root",
                                          remote_pwd, "#")
            session.cmd_output('LANG=C')
            command = "virsh -c %s reboot %s" % (complete_uri, vm_name)
            status, output = session.cmd_status_output(command,
                                                       internal_timeout=5)
            session.close()
        # FIXME: Catch specific exception
        except Exception, detail:
            logging.error("Exception: %s", str(detail))
            status = -1
예제 #11
0
 def test(self):
     log_file = os.path.join(self.workdir, "file")
     session_init = remote.RemoteRunner("ssh", self.ip, 22, self.user_name, self.password,
                                        self.prompt, "\n", log_file, 100, 10, None)
     session_init.run("cat /boot/config-`uname -r` | grep PSTORE", 600, "True")
     if not self.run_cmd_out("cat %s | grep -Eai 'CONFIG_PSTORE=y'" % log_file):
         self.fail("Pstore in not configured")
     session_init.run("mount", 600, "True")
     if not self.run_cmd_out("cat %s | grep -Eai 'debugfs on /sys/kernel/debug'" % log_file):
         self.fail("debugfs is not mounted")
     session_init.run("ls -lrt /sys/fs/pstore", 100, "True")
     file_list = ['common-nvram', 'dmesg-nvram']
     for files in file_list:
         if files not in open(log_file).read():
             self.fail("%s is not saved" % files)
     process.run("echo "" > %s" % log_file, ignore_status=True, sudo=True, shell=True)
     session_init.run("date +%s", 100, "True")
     time_init = self.run_cmd_out("cat %s | tail -3 | head -1 | cut -d' ' -f3" % log_file).strip()
     session1 = remote.remote_login("ssh", self.ip, 22, self.user_name, self.password,
                                    self.prompt, "\n", None, 100, None, None, False)
     session1.sendline('echo "c" > /proc/sysrq-trigger;')
     time.sleep(600)
     self.log.info("Connecting after reboot")
     session2 = remote.RemoteRunner("ssh", self.ip, 22, self.user_name, self.password,
                                    self.prompt, "\n", log_file, 100, 10, None)
     session2.run("ls -lrt /sys/fs/pstore", 100, "True")
     for files in file_list:
         if files not in open(log_file).read():
             self.fail("%s is not saved" % files)
         file_path = os.path.join('/sys/fs/pstore', "*%s*" % files)
         session2.run("stat -c%%Z %s" % file_path, 100, "True")
         time_created = self.run_cmd_out("cat %s | tail -3 | head -1 | cut -d' ' -f3" % log_file).strip()
         if time_created < time_init:
             self.fail("New %s is not saved" % files)
     process.run("echo "" > %s" % log_file, ignore_status=True, sudo=True, shell=True)
     session2.run("cat /etc/os-release", 600, "True")
     if "rhel" in open(log_file).read():
         session2.run("yum install sos", 600, "True")
     if "Ubuntu" in open(log_file).read():
         session2.run("yum install sosreport", 600, "True")
     session2.run("sosreport --no-report --batch --build", 100, "True")
     dir_name = self.run_cmd_out("cat %s | grep located | cut -d':' -f2" % log_file).strip()
     sosreport_dir = os.path.join(dir_name, '/sys/fs/pstore/')
     session2.run("ls -lrt %s" % sosreport_dir, 100, "True")
     for files in file_list:
         if files not in open(log_file).read():
             self.fail("%s is not saved" % files)
         file_path = os.path.join(sosreport_dir, "*%s*" % files)
         session2.run("stat -c%%Z %s" % file_path, 100, "True")
         time_created = self.run_cmd_out("cat %s | tail -3 | head -1 | cut -d' ' -f3" % log_file).strip()
         if time_created < time_init:
             self.fail("sosreport contains wrong %s file" % files)
예제 #12
0
    def __init__(self, test, params):
        self.vm_name = params.get("main_vm", "test-vm1")
        self.test = test
        self.env = params.get('env')
        self.time_out = int(params.get('time_out'))
        self.time_out_test = "yes" == params.get('time_out_test')
        self.remote_ip = params.get('remote_ip')
        self.remote_user = params.get('remote_user')
        self.local_ip = params.get('local_ip')
        if self.remote_ip.count("ENTER") or self.local_ip.count("ENTER"):
            self.test.cancel("Please set remote/local ip in base.cfg")
        self.remote_pwd = params.get('remote_pwd')
        self.local_mnt = params.get('local_mnt')
        self.remote_mnt = params.get('remote_mnt')
        self.session = remote.remote_login("ssh", self.remote_ip, "22",
                                           self.remote_user,
                                           self.remote_pwd, "#")
        self.session.cmd("setsebool virt_use_nfs on")
        local_hostname = process.run("hostname", shell=True).stdout_text.strip()
        remote_hostname = self.session.cmd_output("hostname")

        def file_add(a_str, a_file, session=None):
            """
            Add detail to a file
            """
            write_cmd = "echo '%s' >> %s" % (a_str, a_file)
            if session:
                session.cmd(write_cmd)
            else:
                process.run(write_cmd, shell=True)

        # Edit /etc/hosts file on local and remote host
        backup_hosts_cmd = "cat /etc/hosts > /etc/hosts.bak"
        process.run(backup_hosts_cmd, shell=True)
        self.session.cmd(backup_hosts_cmd)
        hosts_local_str = "%s %s" % (self.local_ip, local_hostname)
        hosts_remote_str = "%s %s" % (self.remote_ip, remote_hostname)
        file_add(hosts_local_str, "/etc/hosts")
        file_add(hosts_remote_str, "/etc/hosts")
        file_add(hosts_local_str, "/etc/hosts", self.session)
        file_add(hosts_remote_str, "/etc/hosts", self.session)

        # Edit /etc/exports file on local host
        process.run("cat /etc/exports > /etc/exports.bak", shell=True)
        exports_str = "%s *(insecure,rw,sync,no_root_squash)" % self.local_mnt
        file_add(exports_str, "/etc/exports")
        nfs_mount_cmd = "mount -t nfs %s:%s %s"\
                        % (self.local_ip, self.local_mnt, self.remote_mnt)
        self.session.cmd(nfs_mount_cmd)
        vm = self.env.get_vm(self.vm_name)
        vm.wait_for_login()
예제 #13
0
 def setup_esx_ssh_key(hostname, user, password, port=22):
     """
     Setup up remote login in esx server by using public key
     """
     logging.debug('Performing SSH key setup on %s:%d as %s.' %
                   (hostname, port, user))
     try:
         session = remote.remote_login(client='ssh', host=hostname,
                                       username=user, port=port,
                                       password=password, prompt=r'[ $#%]')
         public_key = ssh_key.get_public_key()
         session.cmd("echo '%s' >> /etc/ssh/keys-root/authorized_keys; " %
                     public_key)
         logging.debug('SSH key setup complete.')
         session.close()
     except Exception as err:
         logging.debug('SSH key setup has failed. %s', err)
예제 #14
0
    def list_local_domains_on_remote(options_ref, remote_ip, remote_passwd, local_ip):
        """
        Create a virsh list command and execute it on remote host.
        It will list local domains on remote host.

        @param options_ref:options in virsh list command.
        @param remote_ip:remote host's ip.
        @param remote_passwd:remote host's password.
        @param local_ip:local ip, to create uri in virsh list.
        @return:return status and output of the virsh list command.
        """
        command_on_remote = "virsh -c qemu+ssh://%s/system list %s" % (local_ip, options_ref)
        session = remote.remote_login("ssh", remote_ip, "22", "root", remote_passwd, "#")
        time.sleep(5)
        status, output = session.cmd_status_output(command_on_remote, internal_timeout=5)
        time.sleep(5)
        session.close()
        return int(status), output
예제 #15
0
def network_restart(test, params):
    """
    Restart remote network
    """
    time_out = int(params.get('time_out'))
    remote_ip = params.get('remote_ip')
    remote_user = params.get('remote_user')
    remote_pwd = params.get('remote_pwd')
    session = remote.remote_login("ssh", remote_ip, "22", remote_user,
                                  remote_pwd, "#")
    runner = remote.RemoteRunner(session=session)
    net_service = service.Factory.create_service("network", runner.run)
    net_service.restart()
    session.close()
    try:
        remote.wait_for_login("ssh", remote_ip, "22", remote_user,
                              remote_pwd, "#", timeout=time_out)
    except remote.LoginTimeoutError as detail:
        test.error(str(detail))
예제 #16
0
def remote_test(remote_ip, local_ip, remote_pwd, remote_prompt, vm_name):
    """
    Test remote case
    """
    err = ""
    try:
        remote_uri = libvirt_vm.complete_uri(local_ip)
        session = remote.remote_login("ssh", remote_ip, "22",
                                      "root", remote_pwd, remote_prompt)
        session.cmd_output('LANG=C')
        command = "virsh -c %s setvcpus %s 1 --live" % (remote_uri, vm_name)
        if virsh.has_command_help_match(command, "--live") is None:
            status_error = "yes"
        status, output = session.cmd_status_output(command, internal_timeout=5)
        session.close()
        if status != 0:
            err = output
    except error.CmdError:
        status = 1
        status_error = "yes"
        err = "remote test failed"
    return status, status_error, err
예제 #17
0
def check_dest_vm_network(vm, remote_host, username, password,
                          shell_prompt):
    """
    Ping migrated vms on remote host.
    """
    session = remote.remote_login("ssh", remote_host, 22, username,
                                  password, shell_prompt)
    # Timeout to wait vm's network
    logging.debug("Getting vm's IP...")
    timeout = 60
    while timeout > 0:
        try:
            ping_cmd = "ping -c 4 %s" % vm.get_address()
            break
        except virt_vm.VMAddressError:
            time.sleep(5)
            timeout -= 5
    if timeout <= 0:
        raise error.TestFail("Can not get remote vm's IP.")
    status, output = session.cmd_status_output(ping_cmd)
    if status:
        raise error.TestFail("Check %s IP failed:%s" % (vm.name, output))
예제 #18
0
def check_dest_vm_network(vm, ip, remote_host, username, password,
                          shell_prompt):
    """
    Ping migrated vms on remote host.
    """
    session = remote.remote_login("ssh", remote_host, 22, username,
                                  password, shell_prompt)
    # Timeout to wait vm's network
    logging.debug("verifying VM's IP...")
    timeout = 60
    ping_failed = True
    ping_cmd = "ping -c 4 %s" % ip
    while timeout > 0:
        ps, po = session.cmd_status_output(ping_cmd)
        if ps:
            time.sleep(5)
            timeout -= 5
            continue
        logging.error(po)
        ping_failed = False
        break
    if ping_failed:
        raise error.TestFail("Check %s IP failed." % vm.name)
예제 #19
0
def setup_ssh_key(hostname, user, password, port=22):
    """
    Setup up remote login in another server by using public key

    :param hostname: the server to login
    :type hostname: str
    :param user: user to login
    :type user: str
    :param password: password
    :type password: str
    :param port: port number
    :type port: int
    """
    logging.debug('Performing SSH key setup on %s:%d as %s.' %
                  (hostname, port, user))

    try:
        session = remote.remote_login(client='ssh', host=hostname,
                                      username=user, port=port,
                                      password=password, prompt=r'[$#%]')
        public_key = get_public_key()

        session.cmd('mkdir -p ~/.ssh')
        session.cmd('chmod 700 ~/.ssh')
        session.cmd("echo '%s' >> ~/.ssh/authorized_keys; " %
                    public_key)
        session.cmd('chmod 600 ~/.ssh/authorized_keys')
        logging.debug('SSH key setup complete.')

    except Exception:
        logging.debug('SSH key setup has failed.')

    finally:
        try:
            session.close()
        except Exception:
            pass
예제 #20
0
    def configure_kdump(self):
        log_file_init = os.path.join(self.workdir, "file1")
        session_int = remote.RemoteRunner("ssh", self.ip, 22, self.user_name, self.password,
                                          self.prompt, "\n", log_file_init, 100, 10, None)
        session_int.run("cat /etc/os-release", 20, "True")

        if "Ubuntu" in open(log_file_init).read():
            self.distro = "Ubuntu"
            self.file_list = ['dmesg', 'dump']
            session_int.run("DEBIAN_FRONTEND=noninteractive apt-get install -y  linux-crashdump;", 600, "True")
            crashkernel_value = 'GRUB_CMDLINE_LINUX_DEFAULT=\"$GRUB_CMDLINE_LINUX_DEFAULT\
                                 crashkernel=2G-4G:320M,4G-32G:512M,32G-64G:1024M,64G-128G:2048M,128G-:4096M\"'
            cmd = "echo \'%s\' > /etc/default/grub.d/kexec-tools.cfg;" % crashkernel_value
            session_int.run(cmd, 60, "True")
            session_int.run("sudo update-grub;", 600, "True")
            session_reboot = remote.remote_login("ssh", self.ip, 22, self.user_name, self.password,
                                                 self.prompt, "\n", None, 100, None, None, False)
            session_reboot.sendline('reboot;')
            time.sleep(600)
            self.log.info("Connecting after reboot")
            session_status = remote.RemoteRunner("ssh", self.ip, 22, self.user_name, self.password,
                                                 self.prompt, "\n", log_file_init, 100, 10, None)
            session_status.run("kdump-config show", 60, "True")
            if self.run_cmd_out("cat %s | grep -Eai 'Not ready to'" % log_file_init):
                self.fail("Kdump is not operational")
            else:
                self.log.info("Kdump status is operational")
            session_status.session.kill()
        if "rhel" in open(log_file_init).read():
            self.distro = "rhel"
            self.file_list = ['vmcore-dmesg.txt', 'vmcore']
            session_int.run("kdumpctl status", 60, "True")
            if self.run_cmd_out("cat %s | grep -Eai 'Kdump is not operational'" % log_file_init):
                self.fail("Kdump is not operational")
            else:
                self.log.info("Kdump status is operational")
예제 #21
0
def run(test, params, env):
    """
    Test virsh undefine command.

    Undefine an inactive domain, or convert persistent to transient.
    1.Prepare test environment.
    2.Backup the VM's information to a xml file.
    3.When the libvirtd == "off", stop the libvirtd service.
    4.Perform virsh undefine operation.
    5.Recover test environment.(libvirts service,VM)
    6.Confirm the test result.
    """

    vm_ref = params.get("undefine_vm_ref", "vm_name")
    extra = params.get("undefine_extra", "")
    option = params.get("undefine_option", "")
    libvirtd_state = params.get("libvirtd", "on")
    status_error = ("yes" == params.get("status_error", "no"))
    undefine_twice = ("yes" == params.get("undefine_twice", 'no'))
    local_ip = params.get("local_ip", "LOCAL.EXAMPLE.COM")
    remote_ip = params.get("remote_ip", "REMOTE.EXAMPLE.COM")
    remote_user = params.get("remote_user", "user")
    remote_pwd = params.get("remote_pwd", "password")
    remote_prompt = params.get("remote_prompt", "#")
    pool_type = params.get("pool_type")
    pool_name = params.get("pool_name", "test")
    pool_target = params.get("pool_target")
    volume_size = params.get("volume_size", "1G")
    vol_name = params.get("vol_name", "test_vol")
    emulated_img = params.get("emulated_img", "emulated_img")
    emulated_size = "%sG" % (int(volume_size[:-1]) + 1)
    disk_target = params.get("disk_target", "vdb")
    wipe_data = "yes" == params.get("wipe_data", "no")
    if wipe_data:
        option += " --wipe-storage"

    vm_name = params.get("main_vm", "virt-tests-vm1")
    vm = env.get_vm(vm_name)
    vm_id = vm.get_id()
    vm_uuid = vm.get_uuid()

    # polkit acl related params
    uri = params.get("virsh_uri")
    unprivileged_user = params.get('unprivileged_user')
    if unprivileged_user:
        if unprivileged_user.count('EXAMPLE'):
            unprivileged_user = '******'

    if not libvirt_version.version_compare(1, 1, 1):
        if params.get('setup_libvirt_polkit') == 'yes':
            raise error.TestNAError("API acl test not supported in current"
                                    " libvirt version.")

    # Back up xml file.Xen host has no guest xml file to define a guset.
    backup_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)

    # Confirm how to reference a VM.
    if vm_ref == "vm_name":
        vm_ref = vm_name
    elif vm_ref == "id":
        vm_ref = vm_id
    elif vm_ref == "hex_vm_id":
        vm_ref = hex(int(vm_id))
    elif vm_ref == "uuid":
        vm_ref = vm_uuid
    elif vm_ref.find("invalid") != -1:
        vm_ref = params.get(vm_ref)

    volume = None
    pvtest = None
    status3 = None

    try:
        save_file = "/var/lib/libvirt/qemu/save/%s.save" % vm_name
        if option.count("managedsave") and vm.is_alive():
            virsh.managedsave(vm_name)

        if not vm.is_lxc():
            snp_list = virsh.snapshot_list(vm_name)
            if option.count("snapshot"):
                snp_file_list = []
                if not len(snp_list):
                    virsh.snapshot_create(vm_name)
                    logging.debug("Create a snapshot for test!")
                else:
                    # Backup snapshots for domain
                    for snp_item in snp_list:
                        tmp_file = os.path.join(test.tmpdir, snp_item + ".xml")
                        virsh.snapshot_dumpxml(vm_name, snp_item, to_file=tmp_file)
                        snp_file_list.append(tmp_file)
            else:
                if len(snp_list):
                    raise error.TestNAError("This domain has snapshot(s), "
                                            "cannot be undefined!")
        if option.count("remove-all-storage"):
            pvtest = utlv.PoolVolumeTest(test, params)
            pvtest.pre_pool(pool_name, pool_type, pool_target, emulated_img,
                            emulated_size=emulated_size)
            new_pool = libvirt_storage.PoolVolume(pool_name)
            if not new_pool.create_volume(vol_name, volume_size):
                raise error.TestFail("Creation of volume %s failed." % vol_name)
            volumes = new_pool.list_volumes()
            volume = volumes[vol_name]
            virsh.attach_disk(vm_name, volume, disk_target, "--config")

        # Turn libvirtd into certain state.
        if libvirtd_state == "off":
            utils_libvirtd.libvirtd_stop()

        # Test virsh undefine command.
        output = ""
        if vm_ref != "remote":
            vm_ref = "%s %s" % (vm_ref, extra)
            cmdresult = virsh.undefine(vm_ref, option,
                                       unprivileged_user=unprivileged_user,
                                       uri=uri,
                                       ignore_status=True, debug=True)
            status = cmdresult.exit_status
            output = cmdresult.stdout.strip()
            if status:
                logging.debug("Error status, command output: %s",
                              cmdresult.stderr.strip())
            if undefine_twice:
                status2 = virsh.undefine(vm_ref,
                                         ignore_status=True).exit_status
        else:
            if remote_ip.count("EXAMPLE.COM") or local_ip.count("EXAMPLE.COM"):
                raise error.TestNAError("remote_ip and/or local_ip parameters"
                                        " not changed from default values")
            try:
                uri = libvirt_vm.complete_uri(local_ip)
                session = remote.remote_login("ssh", remote_ip, "22",
                                              remote_user, remote_pwd,
                                              remote_prompt)
                cmd_undefine = "virsh -c %s undefine %s" % (uri, vm_name)
                status, output = session.cmd_status_output(cmd_undefine)
                logging.info("Undefine output: %s", output)
            except (error.CmdError, remote.LoginError, aexpect.ShellError), de:
                logging.error("Detail: %s", de)
                status = 1

        # Recover libvirtd state.
        if libvirtd_state == "off":
            utils_libvirtd.libvirtd_start()

        # Shutdown VM.
        if virsh.domain_exists(vm.name):
            try:
                if vm.is_alive():
                    vm.destroy(gracefully=False)
            except error.CmdError, detail:
                logging.error("Detail: %s", detail)
예제 #22
0
def run(test, params, env):
    """
    Test command: virsh reboot.

    Run a reboot command in the target domain.
    1.Prepare test environment.
    2.When the libvirtd == "off", stop the libvirtd service.
    3.Perform virsh reboot operation.
    4.Recover test environment.(libvirts service)
    5.Confirm the test result.
    """

    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)

    # run test case
    libvirtd = params.get("libvirtd", "on")
    vm_ref = params.get("reboot_vm_ref")
    status_error = ("yes" == params.get("status_error"))
    extra = params.get("reboot_extra")
    remote_ip = params.get("remote_ip", "REMOTE.EXAMPLE.COM")
    local_ip = params.get("local_ip", "LOCAL.EXAMPLE.COM")
    remote_pwd = params.get("remote_pwd", "password")
    agent = ("yes" == params.get("reboot_agent", "no"))
    mode = params.get("reboot_mode", "")
    pre_domian_status = params.get("reboot_pre_domian_status", "running")
    xml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    try:
        # Add or remove qemu-agent from guest before test
        try:
            vm.prepare_guest_agent(channel=agent, start=agent)
        except virt_vm.VMError, e:
            logging.debug(e)
            # qemu-guest-agent is not available on REHL5
            raise error.TestNAError(
                "qemu-guest-agent package is not available")

        if pre_domian_status == "shutoff":
            virsh.destroy(vm_name)
        if libvirtd == "off":
            utils_libvirtd.libvirtd_stop()

        domid = vm.get_id()
        domuuid = vm.get_uuid()
        if vm_ref == "id":
            vm_ref = domid
        elif vm_ref == "name":
            vm_ref = vm_name
        elif vm_ref == "uuid":
            vm_ref = domuuid
        elif vm_ref == "hex_id":
            vm_ref = hex(int(domid))
        elif vm_ref.find("invalid") != -1:
            vm_ref = params.get(vm_ref)
        elif vm_ref == "remote_name":
            if remote_ip.count("EXAMPLE.COM") or local_ip.count("EXAMPLE.COM"):
                raise error.TestNAError("remote_ip and/or local_ip parameters"
                                        " not changed from default values")
            complete_uri = libvirt_vm.complete_uri(local_ip)
            try:
                session = remote.remote_login("ssh", remote_ip, "22", "root",
                                              remote_pwd, "#")
                session.cmd_output('LANG=C')
                command = "virsh -c %s reboot %s %s" % (complete_uri, vm_name,
                                                        mode)
                status, output = session.cmd_status_output(command,
                                                           internal_timeout=5)
                session.close()
            except (remote.LoginError, error.CmdError, aexpect.ShellError), e:
                logging.error("Exception: %s", str(e))
                status = -1
예제 #23
0
def run(test, params, env):
    """
    Test command: virsh shutdown.

    The conmand can gracefully shutdown a domain.

    1.Prepare test environment.
    2.When the libvirtd == "off", stop the libvirtd service.
    3.Perform virsh setvcpus operation.
    4.Recover test environment.
    5.Confirm the test result.
    """

    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    vm_ref = params.get("shutdown_vm_ref")
    status_error = ("yes" == params.get("status_error"))
    agent = ("yes" == params.get("shutdown_agent", "no"))
    mode = params.get("shutdown_mode", "")
    pre_domian_status = params.get("reboot_pre_domian_status", "running")
    libvirtd = params.get("libvirtd", "on")
    xml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    timeout = eval(params.get("shutdown_timeout", "60"))
    readonly = "yes" == params.get("shutdown_readonly", "no")
    expect_msg = params.get("shutdown_err_msg")

    # Libvirt acl test related params
    uri = params.get("virsh_uri")
    unprivileged_user = params.get('unprivileged_user')
    if unprivileged_user:
        if unprivileged_user.count('EXAMPLE'):
            unprivileged_user = '******'

    if not libvirt_version.version_compare(1, 1, 1):
        if params.get('setup_libvirt_polkit') == 'yes':
            test.cancel("API acl test not supported in"
                        " current libvirt version.")

    try:
        # Add or remove qemu-agent from guest before test
        vm.prepare_guest_agent(channel=agent, start=agent)
        if pre_domian_status == "shutoff":
            virsh.destroy(vm_name)
        domid = vm.get_id()
        domuuid = vm.get_uuid()
        # run test case
        if vm_ref == "id":
            vm_ref = domid
        elif vm_ref == "hex_id":
            vm_ref = hex(int(domid))
        elif vm_ref.find("invalid") != -1:
            vm_ref = params.get(vm_ref)
        elif vm_ref == "name":
            vm_ref = "%s %s" % (vm_name, params.get("shutdown_extra"))
        elif vm_ref == "uuid":
            vm_ref = domuuid

        if libvirtd == "off":
            utils_libvirtd.libvirtd_stop()

        if vm_ref != "remote":
            result = virsh.shutdown(vm_ref, mode,
                                    unprivileged_user=unprivileged_user,
                                    uri=uri, debug=True,
                                    ignore_status=True,
                                    readonly=readonly)
            status = result.exit_status
        else:
            remote_ip = params.get("remote_ip", "REMOTE.EXAMPLE.COM")
            remote_pwd = params.get("remote_pwd", None)
            remote_user = params.get("remote_user", "root")
            local_ip = params.get("local_ip", "LOCAL.EXAMPLE.COM")
            local_pwd = params.get("local_pwd", "password")
            local_user = params.get("username", "root")
            if remote_ip.count("EXAMPLE.COM") or local_ip.count("EXAMPLE.COM"):
                test.cancel("Remote test parameters"
                            " unchanged from default")
            status = 0
            try:
                remote_uri = libvirt_vm.complete_uri(local_ip)
                # set up auto ssh login from remote machine to
                # execute commands
                config_opt = ["StrictHostKeyChecking=no"]
                ssh_key.setup_remote_ssh_key(remote_ip, remote_user,
                                             remote_pwd, hostname2=local_ip,
                                             user2=local_user,
                                             password2=local_pwd,
                                             config_options=config_opt)
                session = remote.remote_login("ssh", remote_ip, "22", "root",
                                              remote_pwd, "#")
                session.cmd_output('LANG=C')
                command = ("virsh -c %s shutdown %s %s"
                           % (remote_uri, vm_name, mode))
                status = session.cmd_status(command, internal_timeout=5)
                session.close()
            except process.CmdError:
                status = 1

        # recover libvirtd service start
        if libvirtd == "off":
            utils_libvirtd.libvirtd_start()

        # check status_error
        if status_error:
            if not status:
                test.fail("Run successfully with wrong command!")
            if expect_msg:
                libvirt.check_result(result, expect_msg.split(';'))
        else:
            if status:
                test.fail("Run failed with right command")
            if not vm.wait_for_shutdown(timeout):
                test.fail("Failed to shutdown in timeout %s", timeout)
    finally:
        xml_backup.sync()
예제 #24
0
def run(test, params, env):
    """
    Convert specific xen guest
    """
    for v in list(params.values()):
        if "V2V_EXAMPLE" in v:
            test.cancel("Please set real value for %s" % v)
    if utils_v2v.V2V_EXEC is None:
        test.cancel('Missing command: virt-v2v')
    vm_name = params.get('main_vm')
    new_vm_name = params.get('new_vm_name')
    xen_host = params.get('xen_hostname')
    xen_host_user = params.get('xen_host_user', 'root')
    xen_host_passwd = params.get('xen_host_passwd', 'redhat')
    output_mode = params.get('output_mode')
    v2v_timeout = int(params.get('v2v_timeout', 1200))
    v2v_opts = '-v -x' if params.get('v2v_debug', 'on') == 'on' else ''
    if params.get("v2v_opts"):
        # Add a blank by force
        v2v_opts += ' ' + params.get("v2v_opts")
    status_error = 'yes' == params.get('status_error', 'no')
    skip_vm_check = params.get('skip_vm_check', 'no')
    skip_reason = params.get('skip_reason')
    pool_name = params.get('pool_name', 'v2v_test')
    pool_type = params.get('pool_type', 'dir')
    pool_target = params.get('pool_target_path', 'v2v_pool')
    pvt = libvirt.PoolVolumeTest(test, params)
    address_cache = env.get('address_cache')
    checkpoint = params.get('checkpoint', '')
    bk_list = ['vnc_autoport', 'vnc_encrypt', 'vnc_encrypt_warning']
    error_list = []
    # For construct rhv-upload option in v2v cmd
    output_method = params.get("output_method")
    rhv_upload_opts = params.get("rhv_upload_opts")
    storage_name = params.get('storage_name')
    os_pool = os_storage = params.get('output_storage', 'default')
    # for get ca.crt file from ovirt engine
    rhv_passwd = params.get("rhv_upload_passwd")
    rhv_passwd_file = params.get("rhv_upload_passwd_file")
    ovirt_engine_passwd = params.get("ovirt_engine_password")
    ovirt_hostname = params.get("ovirt_engine_url").split(
        '/')[2] if params.get("ovirt_engine_url") else None
    ovirt_ca_file_path = params.get("ovirt_ca_file_path")
    local_ca_file_path = params.get("local_ca_file_path")
    virsh_instance = None

    def log_fail(msg):
        """
        Log error and update error list
        """
        LOG.error(msg)
        error_list.append(msg)

    def set_graphics(virsh_instance, param):
        """
        Set graphics attributes of vm xml
        """
        vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(
            vm_name, virsh_instance=virsh_instance)
        graphic = vmxml.xmltreefile.find('devices').find('graphics')
        for key in param:
            LOG.debug('Set %s=\'%s\'' % (key, param[key]))
            graphic.set(key, param[key])
        vmxml.sync(virsh_instance=virsh_instance)

    def check_grub_file(vmcheck, check):
        """
        Check grub file content
        """
        LOG.info('Checking grub file')
        grub_file = utils_misc.get_bootloader_cfg(session=vmcheck.session)
        if not grub_file:
            test.error('Not found grub file')
        content = vmcheck.session.cmd('cat %s' % grub_file)
        if check == 'console_xvc0':
            if 'console=xvc0' in content:
                log_fail('"console=xvc0" still exists')

    def check_kernel(vmcheck):
        """
        Check content of /etc/sysconfig/kernel
        """
        LOG.info('Checking /etc/sysconfig/kernel file')
        content = vmcheck.session.cmd('cat /etc/sysconfig/kernel')
        LOG.debug(content)
        if 'DEFAULTKERNEL=kernel' not in content:
            log_fail('Not find "DEFAULTKERNEL=kernel"')
        elif 'DEFAULTKERNEL=kernel-xen' in content:
            log_fail('DEFAULTKERNEL is "kernel-xen"')

    def check_sound_card(vmcheck, check):
        """
        Check sound status of vm from xml
        """
        xml = virsh.dumpxml(
            vm_name, session_id=vmcheck.virsh_session_id).stdout
        LOG.debug(xml)
        if check == 'sound' and '<sound model' in xml:
            log_fail('Sound card should be removed')
        if check == 'pcspk' and output_mode == 'libvirt' and "<sound model='pcspk'" not in xml:
            log_fail('Sound card should be "pcspk"')

    def check_disk(vmcheck, count):
        """
        Check if number of disks meets expectation
        """
        LOG.info('Expect number of disks: %d', count)
        actual = vmcheck.session.cmd('lsblk |grep disk |wc -l').strip()
        LOG.info('Actual number of disks: %s', actual)
        if int(actual) != count:
            log_fail('Number of disks is wrong')

    def check_result(result, status_error):
        """
        Check virt-v2v command result
        """
        libvirt.check_exit_status(result, status_error)
        output = result.stdout_text + result.stderr_text
        if not status_error and checkpoint != 'vdsm':
            vmchecker = VMChecker(test, params, env)
            params['vmchecker'] = vmchecker
            if output_mode == 'rhev':
                if not utils_v2v.import_vm_to_ovirt(params, address_cache,
                                                    timeout=v2v_timeout):
                    test.fail('Import VM failed')
            elif output_mode == 'libvirt':
                try:
                    virsh.start(vm_name, debug=True, ignore_status=False)
                except Exception as e:
                    test.fail('Start vm failed: %s' % str(e))
            # Check guest following the checkpoint document after conversion
            LOG.info('Checking common checkpoints for v2v')
            if params.get('skip_vm_check') != 'yes':
                ret = vmchecker.run()
                if len(ret) == 0:
                    LOG.info("All common checkpoints passed")
            else:
                LOG.info(
                    'Skip checking vm after conversion: %s' %
                    skip_reason)
            # Check specific checkpoints
            if checkpoint == 'console_xvc0':
                check_grub_file(vmchecker.checker, 'console_xvc0')
            if checkpoint in ('vnc_autoport', 'vnc_encrypt'):
                vmchecker.check_graphics(params[checkpoint])
            if checkpoint == 'pv_with_regular_kernel':
                check_kernel(vmchecker.checker)
            if checkpoint in ['sound', 'pcspk']:
                check_sound_card(vmchecker.checker, checkpoint)
            if checkpoint == 'multidisk':
                check_disk(vmchecker.checker, params['disk_count'])
        log_check = utils_v2v.check_log(params, output)
        if log_check:
            log_fail(log_check)
        # Merge 2 error lists
        if params.get('vmchecker'):
            error_list.extend(params['vmchecker'].errors)
        if len(error_list):
            test.fail(
                '%d checkpoints failed: %s' %
                (len(error_list), error_list))

    try:
        v2v_sasl = None

        v2v_params = {
            'hostname': xen_host, 'hypervisor': 'xen', 'main_vm': vm_name,
            'v2v_opts': v2v_opts, 'input_mode': 'libvirt',
            'new_name': new_vm_name,
            'password': xen_host_passwd,
            'os_pool': os_pool,
            'os_storage': os_storage,
            'os_storage_name': storage_name,
            'network': params.get('network'),
            'bridge': params.get('bridge'),
            'target': params.get('target'),
            'output_method': output_method,
            'rhv_upload_opts': rhv_upload_opts,
            'params': params
        }

        bk_xml = None
        os.environ['LIBGUESTFS_BACKEND'] = 'direct'

        # See man virt-v2v-input-xen(1)
        process.run(
            'update-crypto-policies --set LEGACY',
            verbose=True,
            ignore_status=True,
            shell=True)

        # Setup ssh-agent access to xen hypervisor
        LOG.info('set up ssh-agent access ')
        xen_pubkey, xen_session = utils_v2v.v2v_setup_ssh_key(
            xen_host, xen_host_user, xen_host_passwd, auto_close=False)
        utils_misc.add_identities_into_ssh_agent()

        if params.get('output_format'):
            v2v_params.update({'of_format': params.get('output_format')})

        # Build rhev related options
        if output_mode == 'rhev':
            # To RHV doesn't support 'qcow2' right now
            v2v_params['of_format'] = 'raw'
            # create different sasl_user name for different job
            params.update({'sasl_user': params.get("sasl_user") +
                           utils_misc.generate_random_string(3)})
            LOG.info('sals user name is %s' % params.get("sasl_user"))

            # Create SASL user on the ovirt host
            user_pwd = "[['%s', '%s']]" % (params.get("sasl_user"),
                                           params.get("sasl_pwd"))
            v2v_sasl = utils_sasl.SASL(sasl_user_pwd=user_pwd)
            v2v_sasl.server_ip = params.get("remote_ip")
            v2v_sasl.server_user = params.get('remote_user')
            v2v_sasl.server_pwd = params.get('remote_pwd')
            v2v_sasl.setup(remote=True)
            LOG.debug('A SASL session %s was created', v2v_sasl)
            if output_method == 'rhv_upload':
                # Create password file for '-o rhv_upload' to connect to ovirt
                with open(rhv_passwd_file, 'w') as f:
                    f.write(rhv_passwd)
                # Copy ca file from ovirt to local
                remote.scp_from_remote(ovirt_hostname, 22, 'root',
                                       ovirt_engine_passwd,
                                       ovirt_ca_file_path,
                                       local_ca_file_path)

        # Create libvirt dir pool
        if output_mode == 'libvirt':
            pvt.pre_pool(pool_name, pool_type, pool_target, '')

        uri = utils_v2v.Uri('xen').get_uri(xen_host)
        virsh_dargs = {'uri': uri,
                       'remote_ip': xen_host,
                       'remote_user': '******',
                       'remote_pwd': xen_host_passwd,
                       'auto_close': True,
                       'debug': True}
        virsh_instance = utils_v2v.wait_for(virsh.VirshPersistent, **virsh_dargs)
        LOG.debug('A new virsh session %s was created', virsh_instance)
        if not utils_v2v.wait_for(virsh_instance.domain_exists, name=vm_name):
            test.error('VM %s not exists' % vm_name)

        if checkpoint in bk_list:
            bk_xml = vm_xml.VMXML.new_from_inactive_dumpxml(
                vm_name, virsh_instance=virsh_instance)
        if checkpoint == 'guest_uuid':
            uuid = virsh.domuuid(vm_name, uri=uri).stdout.strip()
            v2v_params['main_vm'] = uuid
        if checkpoint in ['format_convert', 'xvda_disk']:
            # Get remote disk image path
            blklist = virsh.domblklist(vm_name, uri=uri).stdout.split('\n')
            LOG.debug('domblklist %s:\n%s', vm_name, blklist)
            for line in blklist:
                if line.strip().startswith(('hda', 'vda', 'sda', 'xvda')):
                    params['remote_disk_image'] = line.split()[-1]
                    break
            # Local path of disk image
            params['img_path'] = data_dir.get_tmp_dir() + '/%s.img' % vm_name
            if checkpoint == 'xvda_disk':
                v2v_params['input_mode'] = 'disk'
                v2v_params['hypervisor'] = 'kvm'
                v2v_params.update({'input_file': params['img_path']})
            # Copy remote image to local with scp
            remote.scp_from_remote(xen_host, 22, xen_host_user,
                                   xen_host_passwd,
                                   params['remote_disk_image'],
                                   params['img_path'])
        if checkpoint == 'pool_uuid':
            virsh.pool_start(pool_name)
            pooluuid = virsh.pool_uuid(pool_name).stdout.strip()
            v2v_params['os_pool'] = pooluuid
        if checkpoint.startswith('vnc'):
            vm_xml.VMXML.set_graphics_attr(vm_name, {'type': 'vnc'},
                                           virsh_instance=virsh_instance)
            if checkpoint == 'vnc_autoport':
                params[checkpoint] = {'autoport': 'yes'}
                vm_xml.VMXML.set_graphics_attr(vm_name, params[checkpoint],
                                               virsh_instance=virsh_instance)
            elif checkpoint in ['vnc_encrypt', 'vnc_encrypt_warning']:
                params[checkpoint] = {
                    'passwd': params.get(
                        'vnc_passwd', 'redhat')}
                vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(
                    vm_name, virsh_instance=virsh_instance)
                vm_xml.VMXML.add_security_info(
                    vmxml, params[checkpoint]['passwd'],
                    virsh_instance=virsh_instance)
            LOG.debug(
                virsh_instance.dumpxml(
                    vm_name,
                    extra='--security-info'))
        if checkpoint.startswith('libguestfs_backend'):
            value = checkpoint[19:]
            if value == 'empty':
                value = ''
            LOG.info('Set LIBGUESTFS_BACKEND to "%s"', value)
            os.environ['LIBGUESTFS_BACKEND'] = value
        if checkpoint == 'same_name':
            LOG.info('Convert guest and rename to %s', new_vm_name)
            v2v_params.update({'new_name': new_vm_name})
        if checkpoint == 'no_passwordless_SSH':
            LOG.info('Unset $SSH_AUTH_SOCK')
            os.unsetenv('SSH_AUTH_SOCK')
        if checkpoint in ['xml_without_image', 'format_convert']:
            xml_file = os.path.join(data_dir.get_tmp_dir(), '%s.xml' % vm_name)
            virsh.dumpxml(vm_name, to_file=xml_file, uri=uri)
            v2v_params['hypervisor'] = 'kvm'
            v2v_params['input_mode'] = 'libvirtxml'
            v2v_params.update({'input_file': xml_file})
            if params.get('img_path'):
                cmd = "sed -i 's|%s|%s|' %s" % (params['remote_disk_image'],
                                                params['img_path'], xml_file)
                process.run(cmd)
                LOG.debug(process.run('cat %s' % xml_file).stdout_text)
        if checkpoint == 'ssh_banner':
            session = remote.remote_login("ssh", xen_host, "22", "root",
                                          xen_host_passwd, "#")
            ssh_banner_content = r'"# no default banner path\n' \
                                 r'#Banner /path/banner file\n' \
                                 r'Banner /etc/ssh/ssh_banner"'
            LOG.info('Create ssh_banner file')
            session.cmd(
                'echo -e %s > /etc/ssh/ssh_banner' %
                ssh_banner_content)
            LOG.info('Content of ssh_banner file:')
            LOG.info(session.cmd_output('cat /etc/ssh/ssh_banner'))
            LOG.info('Restart sshd service on xen host')
            session.cmd('service sshd restart')
        if checkpoint == 'cdrom':
            xml = vm_xml.VMXML.new_from_inactive_dumpxml(
                vm_name, virsh_instance=virsh_instance)
            LOG.debug(xml.xmltreefile)
            disks = xml.get_disk_all_by_expr('device==cdrom')
            LOG.debug('Disks: %r', disks)
            for disk in list(disks.values()):
                # Check if vm has cdrom attached
                if disk.find('source') is None:
                    test.error('No CDROM image attached')
        if checkpoint == 'vdsm':
            extra_pkg = params.get('extra_pkg')
            LOG.info('Install %s', extra_pkg)
            utils_package.package_install(extra_pkg.split(','))

            # Backup conf file for recovery
            for conf in params['bk_conf'].strip().split(','):
                LOG.debug('Back up %s', conf)
                shutil.copyfile(conf, conf + '.bk')

            LOG.info('Configure libvirt for vdsm')
            process.run('vdsm-tool configure --force')

            LOG.info('Start vdsm service')
            service_manager = service.Factory.create_generic_service()
            service_manager.start('vdsmd')

            # Setup user and password
            user_pwd = "[['%s', '%s']]" % (params.get("sasl_user"),
                                           params.get("sasl_pwd"))
            v2v_sasl = utils_sasl.SASL(sasl_user_pwd=user_pwd)
            v2v_sasl.server_ip = 'localhost'
            v2v_sasl.server_user = params.get('sasl_server_user', 'root')
            v2v_sasl.server_pwd = params.get('sasl_server_passwd')
            v2v_sasl.setup()
            LOG.debug('A SASL session %s was created', v2v_sasl)

            v2v_params['sasl_user'] = params.get("sasl_user")
            v2v_params['sasl_pwd'] = params.get("sasl_pwd")
        if checkpoint == 'multidisk':
            params['disk_count'] = 0
            blklist = virsh.domblklist(vm_name, uri=uri).stdout.split('\n')
            LOG.info(blklist)
            for line in blklist:
                if '/' in line:
                    params['disk_count'] += 1
            LOG.info('Total disks: %d', params['disk_count'])

        # Execute virt-v2v
        v2v_result = utils_v2v.v2v_cmd(v2v_params)

        if new_vm_name:
            vm_name = new_vm_name
            params['main_vm'] = new_vm_name
        check_result(v2v_result, status_error)
    finally:
        LOG.info("Cleaning Environment")
        # Cleanup constant files
        utils_v2v.cleanup_constant_files(params)
        if checkpoint == 'ssh_banner':
            LOG.info('Remove ssh_banner file')
            session = remote.remote_login("ssh", xen_host, "22", "root",
                                          xen_host_passwd, "#")
            session.cmd('rm -f /etc/ssh/ssh_banner')
            session.cmd('service sshd restart')
        # Restore crypto-policies to DEFAULT, the setting is impossible to be
        # other values by default in testing environment.
        process.run(
            'update-crypto-policies --set DEFAULT',
            verbose=True,
            ignore_status=True,
            shell=True)
        if bk_xml:
            bk_xml.sync(virsh_instance=virsh_instance)
        if virsh_instance:
            LOG.debug('virsh session %s is closing', virsh_instance)
            virsh_instance.close_session()
        if params.get('vmchecker'):
            params['vmchecker'].cleanup()
        if output_mode == 'rhev' and v2v_sasl:
            v2v_sasl.cleanup()
            LOG.debug('SASL session %s is closing', v2v_sasl)
            v2v_sasl.close_session()
        if checkpoint == 'vdsm':
            LOG.info('Stop vdsmd')
            service_manager = service.Factory.create_generic_service()
            service_manager.stop('vdsmd')
            if params.get('extra_pkg'):
                utils_package.package_remove(params['extra_pkg'].split(','))
            for conf in params['bk_conf'].strip().split(','):
                if os.path.exists(conf + '.bk'):
                    LOG.debug('Recover %s', conf)
                    os.remove(conf)
                    shutil.move(conf + '.bk', conf)
            LOG.info('Restart libvirtd')
            libvirtd = utils_libvirtd.Libvirtd()
            libvirtd.restart()
            LOG.info('Start network "default"')
            virsh.net_start('default')
            virsh.undefine(vm_name)
        if output_mode == 'libvirt':
            pvt.cleanup_pool(pool_name, pool_type, pool_target, '')
        utils_v2v.v2v_setup_ssh_key_cleanup(xen_session, xen_pubkey)
        process.run('ssh-agent -k')
예제 #25
0
def run(test, params, env):
    """
    Test command: virsh reboot.

    Run a reboot command in the target domain.
    1.Prepare test environment.
    2.When the libvirtd == "off", stop the libvirtd service.
    3.Perform virsh reboot operation.
    4.Recover test environment.(libvirts service)
    5.Confirm the test result.
    """

    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)

    # run test case
    libvirtd = params.get("libvirtd", "on")
    vm_ref = params.get("reboot_vm_ref")
    status_error = ("yes" == params.get("status_error"))
    extra = params.get("reboot_extra", "")
    remote_ip = params.get("remote_ip", "REMOTE.EXAMPLE.COM")
    local_ip = params.get("local_ip", "LOCAL.EXAMPLE.COM")
    remote_pwd = params.get("remote_pwd", "password")
    agent = ("yes" == params.get("reboot_agent", "no"))
    mode = params.get("reboot_mode", "")
    pre_domian_status = params.get("reboot_pre_domian_status", "running")
    reboot_readonly = "yes" == params.get("reboot_readonly", "no")
    wait_time = int(params.get('wait_time', 5))
    xml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    try:
        # Add or remove qemu-agent from guest before test
        try:
            vm.prepare_guest_agent(channel=agent, start=agent)
        except virt_vm.VMError as e:
            logging.debug(e)
            # qemu-guest-agent is not available on REHL5
            test.cancel("qemu-guest-agent package is not available")

        if pre_domian_status == "shutoff":
            virsh.destroy(vm_name)
        if libvirtd == "off":
            utils_libvirtd.libvirtd_stop()

        domid = vm.get_id()
        domuuid = vm.get_uuid()
        if vm_ref == "id":
            vm_ref = domid
        elif vm_ref == "name":
            vm_ref = vm_name
        elif vm_ref == "uuid":
            vm_ref = domuuid
        elif vm_ref == "hex_id":
            vm_ref = hex(int(domid))
        elif vm_ref.find("invalid") != -1:
            vm_ref = params.get(vm_ref)
        elif vm_ref == "remote_name":
            if remote_ip.count("EXAMPLE.COM") or local_ip.count("EXAMPLE.COM"):
                test.cancel("remote_ip and/or local_ip parameters"
                            " not changed from default values")
            complete_uri = libvirt_vm.complete_uri(local_ip)
            try:
                session = remote.remote_login("ssh", remote_ip, "22", "root",
                                              remote_pwd, "#")
                session.cmd_output('LANG=C')
                command = "virsh -c %s reboot %s %s" % (complete_uri, vm_name,
                                                        mode)
                status, output = session.cmd_status_output(command,
                                                           internal_timeout=5)
                session.close()
                if not status:
                    # the operation before the end of reboot
                    # may result in data corruption
                    vm.wait_for_login().close()
            except (remote.LoginError, process.CmdError,
                    aexpect.ShellError) as e:
                logging.error("Exception: %s", str(e))
                status = -1
        if vm_ref != "remote_name":
            vm_ref = "%s" % vm_ref
            if extra:
                vm_ref += " %s" % extra
            cmdresult = virsh.reboot(vm_ref,
                                     mode,
                                     ignore_status=True,
                                     debug=True)
            status = cmdresult.exit_status
            if status:
                logging.debug("Error status, cmd error: %s", cmdresult.stderr)
                if not virsh.has_command_help_match('reboot', '\s+--mode\s+'):
                    # old libvirt doesn't support reboot
                    status = -2
            # avoid the check if it is negative test
            if not status_error:
                cmdoutput = ''

                def _wait_for_vm_running():
                    cmdoutput = virsh.domstate(vm_ref,
                                               '--reason',
                                               ignore_status=True,
                                               debug=True)
                    domstate_status = cmdoutput.exit_status
                    output = "running" in cmdoutput.stdout
                    return not domstate_status and output

                if not wait.wait_for(
                        _wait_for_vm_running, timeout=wait_time, step=1):
                    test.fail("Cmd error: %s Error status: %s" %
                              (cmdoutput.stderr, cmdoutput.stdout))
            elif pre_domian_status != 'shutoff':
                vm.wait_for_login().close()
        output = virsh.dom_list(ignore_status=True).stdout.strip()

        # Test the readonly mode
        if reboot_readonly:
            result = virsh.reboot(vm_ref,
                                  ignore_status=True,
                                  debug=True,
                                  readonly=True)
            libvirt.check_exit_status(result, expect_error=True)
            # This is for status_error check
            status = result.exit_status

        # recover libvirtd service start
        if libvirtd == "off":
            utils_libvirtd.libvirtd_start()

        # check status_error
        if status_error:
            if not status:
                test.fail("Run successfully with wrong command!")
        else:
            if status or (not re.search(vm_name, output)):
                if status == -2:
                    test.cancel("Reboot command doesn't work on older libvirt "
                                "versions")
                test.fail("Run failed with right command")
    finally:
        xml_backup.sync()
예제 #26
0
def start_iperf(vm, params):
    """
    Start iperf server on host and run client in vm.
    """
    def iperf_func(session, server_ip, iperf_type, prefix=None, suffix=None):
        """
        Thread to run iperf.
        """
        if iperf_type == "server":
            cmd = "iperf -s"
        elif iperf_type == "client":
            cmd = "iperf -c %s -t 300" % server_ip
        if prefix:
            cmd = "%s %s" % (prefix, cmd)
        if suffix:
            cmd = "%s %s" % (cmd, suffix)
        session.cmd(cmd)

    server_ip = params.get("local_ip")
    server_pwd = params.get("local_pwd")
    queue_count = int(params.get("queue_count", 1))
    vcpu_count = int(params.get("vcpu_count", 1))
    if queue_count > vcpu_count:
        client_count = queue_count
    else:
        client_count = queue_count * 2
    host_session = remote.remote_login("ssh", server_ip, 22, "root",
                                       server_pwd, "#")
    # We may start several sessions for client
    client_sessions = []
    # Start server
    prefix = params.get("iperf_prefix")
    host_t = threading.Thread(target=iperf_func,
                              args=(host_session, server_ip, "server"))
    client_ts = []
    for count in range(client_count):
        client_session = vm.wait_for_login()
        client_t = threading.Thread(target=iperf_func,
                                    args=(client_session, server_ip, "client",
                                          prefix))
        client_sessions.append(client_session)
        client_ts.append(client_t)
    host_t.start()
    time.sleep(5)
    for client_t in client_ts:
        client_t.start()
    time.sleep(5)
    # Wait for iperf running
    try:
        if not host_t.isAlive():
            raise error.TestFail("Start iperf on server failed.")
        for client_t in client_ts:
            if not client_t.isAlive():
                raise error.TestFail("Start iperf on client failed.")
    except error.TestFail:
        host_session.close()
        for client_session in client_sessions:
            client_session.close()
        host_t.join(2)
        for client_t in client_ts:
            client_t.join(2)
        raise
    # All iperf are working
    return host_session, client_sessions
예제 #27
0
def run(test, params, env):
    """
    Convert specific xen guest
    """
    for v in list(params.values()):
        if "V2V_EXAMPLE" in v:
            test.cancel("Please set real value for %s" % v)
    if utils_v2v.V2V_EXEC is None:
        test.cancel('Missing command: virt-v2v')
    vm_name = params.get('main_vm')
    new_vm_name = params.get('new_vm_name')
    xen_host = params.get('xen_hostname')
    xen_host_user = params.get('xen_host_user', 'root')
    xen_host_passwd = params.get('xen_host_passwd', 'redhat')
    output_mode = params.get('output_mode')
    v2v_timeout = int(params.get('v2v_timeout', 1200))
    status_error = 'yes' == params.get('status_error', 'no')
    pool_name = params.get('pool_name', 'v2v_test')
    pool_type = params.get('pool_type', 'dir')
    pool_target = params.get('pool_target_path', 'v2v_pool')
    pvt = libvirt.PoolVolumeTest(test, params)
    address_cache = env.get('address_cache')
    checkpoint = params.get('checkpoint', '')
    bk_list = ['vnc_autoport', 'vnc_encrypt', 'vnc_encrypt_warning']
    error_list = []
    # For construct rhv-upload option in v2v cmd
    output_method = params.get("output_method")
    rhv_upload_opts = params.get("rhv_upload_opts")
    storage_name = params.get('storage_name')
    # for get ca.crt file from ovirt engine
    rhv_passwd = params.get("rhv_upload_passwd")
    rhv_passwd_file = params.get("rhv_upload_passwd_file")
    ovirt_engine_passwd = params.get("ovirt_engine_password")
    ovirt_hostname = params.get("ovirt_engine_url").split(
        '/')[2] if params.get("ovirt_engine_url") else None
    ovirt_ca_file_path = params.get("ovirt_ca_file_path")
    local_ca_file_path = params.get("local_ca_file_path")

    def log_fail(msg):
        """
        Log error and update error list
        """
        logging.error(msg)
        error_list.append(msg)

    def set_graphics(virsh_instance, param):
        """
        Set graphics attributes of vm xml
        """
        vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(
            vm_name, virsh_instance=virsh_instance)
        graphic = vmxml.xmltreefile.find('devices').find('graphics')
        for key in param:
            logging.debug('Set %s=\'%s\'' % (key, param[key]))
            graphic.set(key, param[key])
        vmxml.sync(virsh_instance=virsh_instance)

    def check_rhev_file_exist(vmcheck):
        """
        Check if rhev files exist
        """
        file_path = {
            'rhev-apt.exe': r'C:\rhev-apt.exe',
            'rhsrvany.exe': r'"C:\program files\redhat\rhev\apt\rhsrvany.exe"'
        }
        fail = False
        for key in file_path:
            status = vmcheck.session.cmd_status('dir %s' % file_path[key])
            if not status:
                logging.error('%s exists' % key)
                fail = True
        if fail:
            log_fail('RHEV file exists after convert to kvm')

    def check_grub_file(vmcheck, check):
        """
        Check grub file content
        """
        logging.info('Checking grub file')
        grub_file = utils_misc.get_bootloader_cfg(session=vmcheck.session)
        if not grub_file:
            test.error('Not found grub file')
        content = vmcheck.session.cmd('cat %s' % grub_file)
        if check == 'console_xvc0':
            if 'console=xvc0' in content:
                log_fail('"console=xvc0" still exists')

    def check_kernel(vmcheck):
        """
        Check content of /etc/sysconfig/kernel
        """
        logging.info('Checking /etc/sysconfig/kernel file')
        content = vmcheck.session.cmd('cat /etc/sysconfig/kernel')
        logging.debug(content)
        if 'DEFAULTKERNEL=kernel' not in content:
            log_fail('Not find "DEFAULTKERNEL=kernel"')
        elif 'DEFAULTKERNEL=kernel-xen' in content:
            log_fail('DEFAULTKERNEL is "kernel-xen"')

    def check_sound_card(vmcheck, check):
        """
        Check sound status of vm from xml
        """
        xml = virsh.dumpxml(vm_name,
                            session_id=vmcheck.virsh_session_id).stdout
        logging.debug(xml)
        if check == 'sound' and '<sound model' in xml:
            log_fail('Sound card should be removed')
        if check == 'pcspk' and "<sound model='pcspk'" not in xml:
            log_fail('Sound card should be "pcspk"')

    def check_rhsrvany_md5(vmcheck):
        """
        Check if MD5 and SHA1 of rhsrvany.exe are correct
        """
        logging.info('Check md5 and sha1 of rhsrvany.exe')
        val_md5, val_sha1 = params.get('val_md5'), params.get('val_sha1')
        logging.info('Expect MD5=%s, SHA1=%s', val_md5, val_sha1)
        if not val_md5 or not val_sha1:
            test.error('No MD5 or SHA1 value provided')
        cmd_sha1 = params.get('cmd_sha1')
        cmd_md5 = cmd_sha1 + ' MD5'
        sha1 = vmcheck.session.cmd_output(
            cmd_sha1, safe=True).strip().split('\n')[1].replace(' ', '')
        md5 = vmcheck.session.cmd_output(
            cmd_md5, safe=True).strip().split('\n')[1].replace(' ', '')
        logging.info('Actual MD5=%s, SHA1=%s', md5, sha1)
        if sha1 == val_sha1 and md5 == val_md5:
            logging.info('MD5 and SHA1 are correct')
        else:
            log_fail('MD5 or SHA1 of rhsrvany.exe not correct')

    def check_disk(vmcheck, count):
        """
        Check if number of disks meets expectation
        """
        logging.info('Expect number of disks: %d', count)
        actual = vmcheck.session.cmd('lsblk |grep disk |wc -l').strip()
        logging.info('Actual number of disks: %s', actual)
        if int(actual) != count:
            log_fail('Number of disks is wrong')

    def check_result(result, status_error):
        """
        Check virt-v2v command result
        """
        libvirt.check_exit_status(result, status_error)
        output = result.stdout + result.stderr
        if not status_error and checkpoint != 'vdsm':
            if output_mode == 'rhev':
                if not utils_v2v.import_vm_to_ovirt(
                        params, address_cache, timeout=v2v_timeout):
                    test.fail('Import VM failed')
            elif output_mode == 'libvirt':
                try:
                    virsh.start(vm_name, debug=True, ignore_status=False)
                except Exception as e:
                    test.fail('Start vm failed: %s', str(e))
            # Check guest following the checkpoint document after convertion
            logging.info('Checking common checkpoints for v2v')
            vmchecker = VMChecker(test, params, env)
            params['vmchecker'] = vmchecker
            ret = vmchecker.run()
            if len(ret) == 0:
                logging.info("All common checkpoints passed")
            # Check specific checkpoints
            if checkpoint == 'rhev_file':
                check_rhev_file_exist(vmchecker.checker)
            if checkpoint == 'console_xvc0':
                check_grub_file(vmchecker.checker, 'console_xvc0')
            if checkpoint in ('vnc_autoport', 'vnc_encrypt'):
                vmchecker.check_graphics(params[checkpoint])
            if checkpoint == 'sdl':
                if output_mode == 'libvirt':
                    vmchecker.check_graphics({'type': 'vnc'})
                elif output_mode == 'rhev':
                    vmchecker.check_graphics({'type': 'spice'})
            if checkpoint == 'pv_with_regular_kernel':
                check_kernel(vmchecker.checker)
            if checkpoint in ['sound', 'pcspk']:
                check_sound_card(vmchecker.checker, checkpoint)
            if checkpoint == 'rhsrvany_md5':
                check_rhsrvany_md5(vmchecker.checker)
            if checkpoint == 'multidisk':
                check_disk(vmchecker.checker, params['disk_count'])
        log_check = utils_v2v.check_log(params, output)
        if log_check:
            log_fail(log_check)
        # Merge 2 error lists
        if params.get('vmchecker'):
            error_list.extend(params['vmchecker'].errors)
        # Virtio drivers will not be installed without virtio-win setup
        if checkpoint == 'virtio_win_unset':
            missing_list = params.get('missing').split(',')
            expect_errors = ['Not find driver: ' + x for x in missing_list]
            logging.debug('Expect errors: %s' % expect_errors)
            logging.debug('Actual errors: %s' % error_list)
            if set(error_list) == set(expect_errors):
                error_list[:] = []
            else:
                logging.error('Virtio drivers not meet expectation')
        if len(error_list):
            test.fail('%d checkpoints failed: %s' %
                      (len(error_list), error_list))

    try:
        v2v_params = {
            'hostname': xen_host,
            'hypervisor': 'xen',
            'main_vm': vm_name,
            'v2v_opts': '-v -x',
            'input_mode': 'libvirt',
            'new_name': new_vm_name,
            'storage': params.get('output_storage', 'default'),
            'network': params.get('network'),
            'bridge': params.get('bridge'),
            'target': params.get('target'),
            'output_method': output_method,
            'storage_name': storage_name,
            'rhv_upload_opts': rhv_upload_opts
        }

        bk_xml = None
        os.environ['LIBGUESTFS_BACKEND'] = 'direct'

        # Setup ssh-agent access to xen hypervisor
        logging.info('set up ssh-agent access ')
        ssh_key.setup_ssh_key(xen_host,
                              user=xen_host_user,
                              port=22,
                              password=xen_host_passwd)
        utils_misc.add_identities_into_ssh_agent()

        if params.get('output_format'):
            v2v_params.update({'output_format': params.get('output_format')})

        # Build rhev related options
        if output_mode == 'rhev':
            # create different sasl_user name for different job
            params.update({
                'sasl_user':
                params.get("sasl_user") + utils_misc.generate_random_string(3)
            })
            logging.info('sals user name is %s' % params.get("sasl_user"))

            # Create SASL user on the ovirt host
            user_pwd = "[['%s', '%s']]" % (params.get("sasl_user"),
                                           params.get("sasl_pwd"))
            v2v_sasl = utils_sasl.SASL(sasl_user_pwd=user_pwd)
            v2v_sasl.server_ip = params.get("remote_ip")
            v2v_sasl.server_user = params.get('remote_user')
            v2v_sasl.server_pwd = params.get('remote_pwd')
            v2v_sasl.setup(remote=True)
            if output_method == 'rhv_upload':
                # Create password file for '-o rhv_upload' to connect to ovirt
                with open(rhv_passwd_file, 'w') as f:
                    f.write(rhv_passwd)
                # Copy ca file from ovirt to local
                remote.scp_from_remote(ovirt_hostname, 22, 'root',
                                       ovirt_engine_passwd, ovirt_ca_file_path,
                                       local_ca_file_path)

        # Create libvirt dir pool
        if output_mode == 'libvirt':
            pvt.pre_pool(pool_name, pool_type, pool_target, '')

        uri = utils_v2v.Uri('xen').get_uri(xen_host)

        # Check if xen guest exists
        if not virsh.domain_exists(vm_name, uri=uri):
            logging.error('VM %s not exists', vm_name)

        virsh_instance = virsh.VirshPersistent()
        virsh_instance.set_uri(uri)

        if checkpoint in bk_list:
            bk_xml = vm_xml.VMXML.new_from_inactive_dumpxml(
                vm_name, virsh_instance=virsh_instance)
        if checkpoint == 'guest_uuid':
            uuid = virsh.domuuid(vm_name, uri=uri).stdout.strip()
            v2v_params['main_vm'] = uuid
        if checkpoint in ['format_convert', 'xvda_disk']:
            # Get remote disk image path
            blklist = virsh.domblklist(vm_name, uri=uri).stdout.split('\n')
            logging.debug('domblklist %s:\n%s', vm_name, blklist)
            for line in blklist:
                if line.startswith(('hda', 'vda', 'sda')):
                    params['remote_disk_image'] = line.split()[-1]
                    break
            # Local path of disk image
            params['img_path'] = data_dir.get_tmp_dir() + '/%s.img' % vm_name
            if checkpoint == 'xvda_disk':
                v2v_params['input_mode'] = 'disk'
                v2v_params.update({'input_file': params['img_path']})
            # Copy remote image to local with scp
            remote.scp_from_remote(xen_host, 22, xen_host_user,
                                   xen_host_passwd,
                                   params['remote_disk_image'],
                                   params['img_path'])
        if checkpoint == 'pool_uuid':
            virsh.pool_start(pool_name)
            pooluuid = virsh.pool_uuid(pool_name).stdout.strip()
            v2v_params['storage'] = pooluuid
        if checkpoint.startswith('vnc'):
            vm_xml.VMXML.set_graphics_attr(vm_name, {'type': 'vnc'},
                                           virsh_instance=virsh_instance)
            if checkpoint == 'vnc_autoport':
                params[checkpoint] = {'autoport': 'yes'}
                vm_xml.VMXML.set_graphics_attr(vm_name,
                                               params[checkpoint],
                                               virsh_instance=virsh_instance)
            elif checkpoint in ['vnc_encrypt', 'vnc_encrypt_warning']:
                params[checkpoint] = {
                    'passwd': params.get('vnc_passwd', 'redhat')
                }
                vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(
                    vm_name, virsh_instance=virsh_instance)
                vm_xml.VMXML.add_security_info(vmxml,
                                               params[checkpoint]['passwd'],
                                               virsh_instance=virsh_instance)
            logging.debug(
                virsh_instance.dumpxml(vm_name, extra='--security-info'))
        if checkpoint.startswith('libguestfs_backend'):
            value = checkpoint[19:]
            if value == 'empty':
                value = ''
            logging.info('Set LIBGUESTFS_BACKEND to "%s"', value)
            os.environ['LIBGUESTFS_BACKEND'] = value
        if checkpoint == 'same_name':
            logging.info('Convert guest and rename to %s', new_vm_name)
            v2v_params.update({'new_name': new_vm_name})
        if checkpoint == 'no_passwordless_SSH':
            logging.info('Unset $SSH_AUTH_SOCK')
            os.unsetenv('SSH_AUTH_SOCK')
        if checkpoint in ['xml_without_image', 'format_convert']:
            xml_file = os.path.join(data_dir.get_tmp_dir(), '%s.xml' % vm_name)
            virsh.dumpxml(vm_name, to_file=xml_file, uri=uri)
            v2v_params['hypervisor'] = 'kvm'
            v2v_params['input_mode'] = 'libvirtxml'
            v2v_params.update({'input_file': xml_file})
            if params.get('img_path'):
                cmd = "sed -i 's|%s|%s|' %s" % (params['remote_disk_image'],
                                                params['img_path'], xml_file)
                process.run(cmd)
                logging.debug(process.run('cat %s' % xml_file).stdout_text)
            if checkpoint == 'format_convert':
                v2v_params['output_format'] = 'qcow2'
        if checkpoint == 'ssh_banner':
            session = remote.remote_login("ssh", xen_host, "22", "root",
                                          xen_host_passwd, "#")
            ssh_banner_content = r'"# no default banner path\n' \
                                 r'#Banner /path/banner file\n' \
                                 r'Banner /etc/ssh/ssh_banner"'
            logging.info('Create ssh_banner file')
            session.cmd('echo -e %s > /etc/ssh/ssh_banner' %
                        ssh_banner_content)
            logging.info('Content of ssh_banner file:')
            logging.info(session.cmd_output('cat /etc/ssh/ssh_banner'))
            logging.info('Restart sshd service on xen host')
            session.cmd('service sshd restart')
        if checkpoint.startswith('virtio_win'):
            src_dir = params.get('virtio_win_dir')
            dest_dir = os.path.join(data_dir.get_tmp_dir(), 'virtio-win')
            iso_path = os.path.join(dest_dir, 'virtio-win.iso')
            if not os.path.exists(dest_dir):
                shutil.copytree(src_dir, dest_dir)
            virtio_win_env = params.get('virtio_win_env', 'VIRTIO_WIN')
            process.run('rpm -e virtio-win')
            if process.run('rpm -q virtio-win',
                           ignore_status=True).exit_status == 0:
                test.error('not removed')
            if checkpoint.endswith('unset'):
                logging.info('Unset env %s' % virtio_win_env)
                os.unsetenv(virtio_win_env)
            if checkpoint.endswith('custom'):
                logging.info('Set env %s=%s' % (virtio_win_env, dest_dir))
                os.environ[virtio_win_env] = dest_dir
            if checkpoint.endswith('iso_mount'):
                logging.info('Mount iso to /opt')
                process.run('mount %s /opt' % iso_path)
                os.environ[virtio_win_env] = '/opt'
            if checkpoint.endswith('iso_file'):
                logging.info('Set env %s=%s' % (virtio_win_env, iso_path))
                os.environ[virtio_win_env] = iso_path
        if checkpoint == 'cdrom':
            xml = vm_xml.VMXML.new_from_inactive_dumpxml(
                vm_name, virsh_instance=virsh_instance)
            logging.debug(xml.xmltreefile)
            disks = xml.get_disk_all()
            logging.debug('Disks: %r', disks)
            for disk in list(disks.values()):
                # Check if vm has cdrom attached
                if disk.get(
                        'device') == 'cdrom' and disk.find('source') is None:
                    test.error('No CDROM image attached')
        if checkpoint == 'vdsm':
            extra_pkg = params.get('extra_pkg')
            logging.info('Install %s', extra_pkg)
            utils_package.package_install(extra_pkg.split(','))

            # Backup conf file for recovery
            for conf in params['bk_conf'].strip().split(','):
                logging.debug('Back up %s', conf)
                shutil.copyfile(conf, conf + '.bk')

            logging.info('Configure libvirt for vdsm')
            process.run('vdsm-tool configure --force')

            logging.info('Start vdsm service')
            service_manager = service.Factory.create_generic_service()
            service_manager.start('vdsmd')

            # Setup user and password
            user_pwd = "[['%s', '%s']]" % (params.get("sasl_user"),
                                           params.get("sasl_pwd"))
            v2v_sasl = utils_sasl.SASL(sasl_user_pwd=user_pwd)
            v2v_sasl.server_ip = 'localhost'
            v2v_sasl.server_user = params.get('sasl_server_user', 'root')
            v2v_sasl.server_pwd = params.get('sasl_server_passwd')
            v2v_sasl.setup()

            v2v_params['sasl_user'] = params.get("sasl_user")
            v2v_params['sasl_pwd'] = params.get("sasl_pwd")
        if checkpoint == 'multidisk':
            params['disk_count'] = 0
            blklist = virsh.domblklist(vm_name, uri=uri).stdout.split('\n')
            logging.info(blklist)
            for line in blklist:
                if '/' in line:
                    params['disk_count'] += 1
            logging.info('Total disks: %d', params['disk_count'])

        # Check if xen guest exists again
        if not virsh.domain_exists(vm_name, uri=uri):
            logging.error('VM %s not exists', vm_name)

        # Execute virt-v2v
        v2v_result = utils_v2v.v2v_cmd(v2v_params)

        if new_vm_name:
            vm_name = new_vm_name
            params['main_vm'] = new_vm_name
        check_result(v2v_result, status_error)
    finally:
        # Cleanup constant files
        utils_v2v.cleanup_constant_files(params)
        process.run('ssh-agent -k')
        if checkpoint == 'vdsm':
            logging.info('Stop vdsmd')
            service_manager = service.Factory.create_generic_service()
            service_manager.stop('vdsmd')
            if params.get('extra_pkg'):
                utils_package.package_remove(params['extra_pkg'].split(','))
            for conf in params['bk_conf'].strip().split(','):
                if os.path.exists(conf + '.bk'):
                    logging.debug('Recover %s', conf)
                    os.remove(conf)
                    shutil.move(conf + '.bk', conf)
            logging.info('Restart libvirtd')
            libvirtd = utils_libvirtd.Libvirtd()
            libvirtd.restart()
            logging.info('Start network "default"')
            virsh.net_start('default')
            virsh.undefine(vm_name)
        if params.get('vmchecker'):
            params['vmchecker'].cleanup()
        if output_mode == 'libvirt':
            pvt.cleanup_pool(pool_name, pool_type, pool_target, '')
        if bk_xml:
            bk_xml.sync(virsh_instance=virsh_instance)
            virsh_instance.close_session()
        if checkpoint == 'ssh_banner':
            logging.info('Remove ssh_banner file')
            session = remote.remote_login("ssh", xen_host, "22", "root",
                                          xen_host_passwd, "#")
            session.cmd('rm -f /etc/ssh/ssh_banner')
            session.cmd('service sshd restart')
        if checkpoint.startswith('virtio_win'):
            utils_package.package_install(['virtio-win'])
예제 #28
0
def run(test, params, env):
    """
    WHQL DTM client installation:
    1) Log into the guest (the client machine) and into a DTM server machine
    2) Stop the DTM client service (wttsvc) on the client machine
    3) Delete the client machine from the server's data store
    4) Rename the client machine (give it a randomly generated name)
    5) Move the client machine into the server's workgroup
    6) Reboot the client machine
    7) Install the DTM client software
    8) Setup auto logon for the user created by the installation
       (normally DTMLLUAdminUser)
    9) Reboot again

    :param test: kvm test object
    :param params: Dictionary with the test parameters
    :param env: Dictionary with test environment.
    """
    vm = env.get_vm(params["main_vm"])
    vm.verify_alive()
    session = vm.wait_for_login(timeout=int(params.get("login_timeout", 360)))

    # Collect test params
    server_address = params.get("server_address")
    server_shell_port = int(params.get("server_shell_port"))
    server_file_transfer_port = int(params.get("server_file_transfer_port"))
    server_studio_path = params.get(
        "server_studio_path", "%programfiles%\\ "
        "Microsoft Driver Test Manager\\Studio")
    server_username = params.get("server_username")
    server_password = params.get("server_password")
    client_username = params.get("client_username")
    client_password = params.get("client_password")
    dsso_bin = params.get("dsso_delete_machine_binary",
                          "whql/whql_delete_machine_15.exe")
    dsso_delete_machine_binary = os.path.join(data_dir.get_deps_dir(),
                                              dsso_bin)
    install_timeout = float(params.get("install_timeout", 600))
    install_cmd = params.get("install_cmd")
    wtt_services = params.get("wtt_services")

    # Stop WTT service(s) on client
    for svc in wtt_services.split():
        utils_test.stop_windows_service(session, svc)

    # Copy dsso_delete_machine_binary to server
    rss_client.upload(server_address,
                      server_file_transfer_port,
                      dsso_delete_machine_binary,
                      server_studio_path,
                      timeout=60)

    # Open a shell session with server
    server_session = remote.remote_login("nc", server_address,
                                         server_shell_port, "", "",
                                         session.prompt, session.linesep)
    server_session.set_status_test_command(session.status_test_command)

    # Get server and client information
    cmd = "echo %computername%"
    server_name = server_session.cmd_output(cmd).strip()
    client_name = session.cmd_output(cmd).strip()
    cmd = "wmic computersystem get domain"
    server_workgroup = server_session.cmd_output(cmd).strip()
    server_workgroup = server_workgroup.splitlines()[-1]
    regkey = r"HKLM\SYSTEM\CurrentControlSet\Services\Tcpip\Parameters"
    cmd = "reg query %s /v Domain" % regkey
    o = server_session.cmd_output(cmd).strip().splitlines()[-1]
    try:
        server_dns_suffix = o.split(None, 2)[2]
    except IndexError:
        server_dns_suffix = ""

    # Delete the client machine from the server's data store (if it's there)
    server_session.cmd("cd %s" % server_studio_path)
    cmd = "%s %s %s" % (os.path.basename(dsso_delete_machine_binary),
                        server_name, client_name)
    server_session.cmd(cmd, print_func=logging.info)
    server_session.close()

    # Rename the client machine
    client_name = "autotest_%s" % utils_misc.generate_random_string(4)
    logging.info("Renaming client machine to '%s'", client_name)
    cmd = (
        'wmic computersystem where name="%%computername%%" rename name="%s"' %
        client_name)
    session.cmd(cmd, timeout=600)

    # Join the server's workgroup
    logging.info("Joining workgroup '%s'", server_workgroup)
    cmd = ('wmic computersystem where name="%%computername%%" call '
           'joindomainorworkgroup name="%s"' % server_workgroup)
    session.cmd(cmd, timeout=600)

    # Set the client machine's DNS suffix
    logging.info("Setting DNS suffix to '%s'", server_dns_suffix)
    cmd = 'reg add %s /v Domain /d "%s" /f' % (regkey, server_dns_suffix)
    session.cmd(cmd, timeout=300)

    # Reboot
    session = vm.reboot(session)

    # Access shared resources on the server machine
    logging.info("Attempting to access remote share on server")
    cmd = r"net use \\%s /user:%s %s" % (server_name, server_username,
                                         server_password)
    end_time = time.time() + 120
    while time.time() < end_time:
        try:
            session.cmd(cmd)
            break
        except Exception:
            pass
        time.sleep(5)
    else:
        test.error("Could not access server share from client machine")

    # Install
    logging.info("Installing DTM client (timeout=%ds)", install_timeout)
    install_cmd = r"cmd /c \\%s\%s" % (server_name, install_cmd.lstrip("\\"))
    session.cmd(install_cmd, timeout=install_timeout)

    # Setup auto logon
    logging.info("Setting up auto logon for user '%s'", client_username)
    cmd = ('reg add '
           '"HKLM\\Software\\Microsoft\\Windows NT\\CurrentVersion\\winlogon" '
           '/v "%s" /d "%s" /t REG_SZ /f')
    session.cmd(cmd % ("AutoAdminLogon", "1"))
    session.cmd(cmd % ("DefaultUserName", client_username))
    session.cmd(cmd % ("DefaultPassword", client_password))

    # Reboot one more time
    session = vm.reboot(session)
    session.close()
예제 #29
0
def run_virsh_domstate(test, params, env):
    """
    Test command: virsh domstate.

    1.Prepare test environment.
    2.When the libvirtd == "off", stop the libvirtd service.
    3.Perform virsh domstate operation.
    4.Recover test environment.
    5.Confirm the test result.
    """
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)

    libvirtd = params.get("libvirtd", "on")
    vm_ref = params.get("domstate_vm_ref")
    status_error = params.get("status_error", "no")

    domid = vm.get_id()
    domuuid = vm.get_uuid()

    if vm_ref == "id":
        vm_ref = domid
    elif vm_ref == "hex_id":
        vm_ref = hex(int(domid))
    elif vm_ref.find("invalid") != -1:
        vm_ref = params.get(vm_ref)
    elif vm_ref == "name":
        vm_ref = "%s %s" % (vm_name, params.get("domstate_extra"))
    elif vm_ref == "uuid":
        vm_ref = domuuid

    if libvirtd == "off":
        utils_libvirtd.libvirtd_stop()

    if vm_ref == "remote":
        remote_ip = params.get("remote_ip", "REMOTE.EXAMPLE.COM")
        local_ip = params.get("local_ip", "LOCAL.EXAMPLE.COM")
        remote_pwd = params.get("remote_pwd", None)
        if remote_ip.count("EXAMPLE.COM") or local_ip.count("EXAMPLE.COM"):
            raise error.TestNAError("Test 'remote' parameters not setup")
        status = 0
        try:
            remote_uri = libvirt_vm.complete_uri(local_ip)
            session = remote.remote_login("ssh", remote_ip, "22", "root",
                                          remote_pwd, "#")
            session.cmd_output('LANG=C')
            command = "virsh -c %s domstate %s" % (remote_uri, vm_name)
            status, output = session.cmd_status_output(command,
                                                       internal_timeout=5)
            session.close()
        except error.CmdError:
            status = 1
    else:
        result = virsh.domstate(vm_ref, ignore_status=True)
        status = result.exit_status
        output = result.stdout

    # recover libvirtd service start
    if libvirtd == "off":
        utils_libvirtd.libvirtd_start()

    # check status_error
    if status_error == "yes":
        if status == 0:
            raise error.TestFail("Run successfully with wrong command!")
    elif status_error == "no":
        if status != 0 or output == "":
            raise error.TestFail("Run failed with right command")
        if vm_ref == "remote":
            if not (re.match("running", output) or re.match("blocked", output)
                    or re.match("idle", output)):
                raise error.TestFail("Run failed with right command")
예제 #30
0
def run_whql_submission(test, params, env):
    """
    WHQL submission test:
    1) Log into the client machines and into a DTM server machine
    2) Copy the automation program binary (dsso_test_binary) to the server machine
    3) Run the automation program
    4) Pass the program all relevant parameters (e.g. device_data)
    5) Wait for the program to terminate
    6) Parse and report job results
    (logs and HTML reports are placed in test.debugdir)

    @param test: kvm test object
    @param params: Dictionary with the test parameters
    @param env: Dictionary with test environment.
    """
    # Log into all client VMs
    login_timeout = int(params.get("login_timeout", 360))
    vms = []
    sessions = []
    for vm_name in params.objects("vms"):
        vms.append(env.get_vm(vm_name))
        vms[-1].verify_alive()
        sessions.append(vms[-1].wait_for_login(timeout=login_timeout))

    # Make sure all NICs of all client VMs are up
    for vm in vms:
        nics = vm.params.objects("nics")
        for nic_index in range(len(nics)):
            s = vm.wait_for_login(nic_index, 600)
            s.close()

    # Collect parameters
    server_address = params.get("server_address")
    server_shell_port = int(params.get("server_shell_port"))
    server_file_transfer_port = int(params.get("server_file_transfer_port"))
    server_studio_path = params.get("server_studio_path", "%programfiles%\\ "
                                    "Microsoft Driver Test Manager\\Studio")
    dsso_test_binary = params.get("dsso_test_binary",
                                  "deps/whql_submission_15.exe")
    dsso_test_binary = utils_misc.get_path(test.virtdir, dsso_test_binary)
    dsso_delete_machine_binary = params.get("dsso_delete_machine_binary",
                                            "deps/whql_delete_machine_15.exe")
    dsso_delete_machine_binary = utils_misc.get_path(test.virtdir,
                                                    dsso_delete_machine_binary)
    test_timeout = float(params.get("test_timeout", 600))

    # Copy dsso binaries to the server
    for filename in dsso_test_binary, dsso_delete_machine_binary:
        rss_client.upload(server_address, server_file_transfer_port,
                                 filename, server_studio_path, timeout=60)

    # Open a shell session with the server
    server_session = remote.remote_login("nc", server_address,
                                              server_shell_port, "", "",
                                              sessions[0].prompt,
                                              sessions[0].linesep)
    server_session.set_status_test_command(sessions[0].status_test_command)

    # Get the computer names of the server and clients
    cmd = "echo %computername%"
    server_name = server_session.cmd_output(cmd).strip()
    client_names = [session.cmd_output(cmd).strip() for session in sessions]

    # Delete all client machines from the server's data store
    server_session.cmd("cd %s" % server_studio_path)
    for client_name in client_names:
        cmd = "%s %s %s" % (os.path.basename(dsso_delete_machine_binary),
                            server_name, client_name)
        server_session.cmd(cmd, print_func=logging.debug)

    # Reboot the client machines
    sessions = utils_misc.parallel((vm.reboot, (session,))
                                  for vm, session in zip(vms, sessions))

    # Check the NICs again
    for vm in vms:
        nics = vm.params.objects("nics")
        for nic_index in range(len(nics)):
            s = vm.wait_for_login(nic_index, 600)
            s.close()

    # Run whql_pre_command and close the sessions
    if params.get("whql_pre_command"):
        for session in sessions:
            session.cmd(params.get("whql_pre_command"),
                        int(params.get("whql_pre_command_timeout", 600)))
            session.close()

    # Run the automation program on the server
    pool_name = "%s_pool" % client_names[0]
    submission_name = "%s_%s" % (client_names[0],
                                 params.get("submission_name"))
    cmd = "%s %s %s %s %s %s" % (os.path.basename(dsso_test_binary),
                                 server_name, pool_name, submission_name,
                                 test_timeout, " ".join(client_names))
    server_session.sendline(cmd)

    # Helper function: wait for a given prompt and raise an exception if an
    # error occurs
    def find_prompt(prompt):
        m, o = server_session.read_until_last_line_matches(
            [prompt, server_session.prompt], print_func=logging.info,
            timeout=600)
        if m != 0:
            errors = re.findall("^Error:.*$", o, re.I | re.M)
            if errors:
                raise error.TestError(errors[0])
            else:
                raise error.TestError("Error running automation program: "
                                      "could not find '%s' prompt" % prompt)

    # Tell the automation program which device to test
    find_prompt("Device to test:")
    server_session.sendline(params.get("test_device"))

    # Tell the automation program which jobs to run
    find_prompt("Jobs to run:")
    server_session.sendline(params.get("job_filter", ".*"))

    # Set submission DeviceData
    find_prompt("DeviceData name:")
    for dd in params.objects("device_data"):
        dd_params = params.object_params(dd)
        if dd_params.get("dd_name") and dd_params.get("dd_data"):
            server_session.sendline(dd_params.get("dd_name"))
            server_session.sendline(dd_params.get("dd_data"))
    server_session.sendline()

    # Set submission descriptors
    find_prompt("Descriptor path:")
    for desc in params.objects("descriptors"):
        desc_params = params.object_params(desc)
        if desc_params.get("desc_path"):
            server_session.sendline(desc_params.get("desc_path"))
    server_session.sendline()

    # Set machine dimensions for each client machine
    for vm_name in params.objects("vms"):
        vm_params = params.object_params(vm_name)
        find_prompt(r"Dimension name\b.*:")
        for dp in vm_params.objects("dimensions"):
            dp_params = vm_params.object_params(dp)
            if dp_params.get("dim_name") and dp_params.get("dim_value"):
                server_session.sendline(dp_params.get("dim_name"))
                server_session.sendline(dp_params.get("dim_value"))
        server_session.sendline()

    # Set extra parameters for tests that require them (e.g. NDISTest)
    for vm_name in params.objects("vms"):
        vm_params = params.object_params(vm_name)
        find_prompt(r"Parameter name\b.*:")
        for dp in vm_params.objects("device_params"):
            dp_params = vm_params.object_params(dp)
            if dp_params.get("dp_name") and dp_params.get("dp_regex"):
                server_session.sendline(dp_params.get("dp_name"))
                server_session.sendline(dp_params.get("dp_regex"))
                # Make sure the prompt appears again (if the device isn't found
                # the automation program will terminate)
                find_prompt(r"Parameter name\b.*:")
        server_session.sendline()

    # Wait for the automation program to terminate
    try:
        o = server_session.read_up_to_prompt(print_func=logging.info,
                                             timeout=test_timeout + 300)
        # (test_timeout + 300 is used here because the automation program is
        # supposed to terminate cleanly on its own when test_timeout expires)
        done = True
    except aexpect.ExpectError, e:
        o = e.output
        done = False
예제 #31
0
def run(test, params, env):
    """
    Test command: virsh domstate.

    1.Prepare test environment.
    2.When the libvirtd == "off", stop the libvirtd service.
    3.Perform virsh domstate operation.
    4.Recover test environment.
    5.Confirm the test result.
    """
    vm_name = params.get("main_vm", "virt-tests-vm1")
    vm = env.get_vm(vm_name)

    libvirtd = params.get("libvirtd", "on")
    vm_ref = params.get("domstate_vm_ref")
    status_error = (params.get("status_error", "no") == "yes")
    extra = params.get("domstate_extra", "")
    vm_action = params.get("domstate_vm_action", "")
    vm_oncrash_action = params.get("domstate_vm_oncrash")

    domid = vm.get_id()
    domuuid = vm.get_uuid()
    libvirtd_service = utils_libvirtd.Libvirtd()

    if vm_ref == "id":
        vm_ref = domid
    elif vm_ref == "hex_id":
        vm_ref = hex(int(domid))
    elif vm_ref.find("invalid") != -1:
        vm_ref = params.get(vm_ref)
    elif vm_ref == "name":
        vm_ref = vm_name
    elif vm_ref == "uuid":
        vm_ref = domuuid

    # Back up xml file.
    vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    backup_xml = vmxml.copy()

    # Back up qemu.conf
    utils.run("cp %s %s" % (QEMU_CONF, QEMU_CONF_BK))

    dump_path = os.path.join(test.tmpdir, "dump/")
    dump_file = ""
    try:
        if vm_action == "crash":
            if vm.is_alive():
                vm.destroy(gracefully=False)
            vmxml.on_crash = vm_oncrash_action
            # Add <panic> device to domain
            panic_dev = Panic()
            panic_dev.addr_type = "isa"
            panic_dev.addr_iobase = "0x505"
            vmxml.add_device(panic_dev)
            vmxml.sync()
            # Config auto_dump_path in qemu.conf
            cmd = "echo auto_dump_path = \\\"%s\\\" >> %s" % (dump_path,
                                                              QEMU_CONF)
            utils.run(cmd)
            libvirtd_service.restart()
            if vm_oncrash_action in ['coredump-destroy', 'coredump-restart']:
                dump_file = dump_path + vm_name + "-*"
            # Start VM and check the panic device
            virsh.start(vm_name, ignore_status=False)
            vmxml_new = vm_xml.VMXML.new_from_dumpxml(vm_name)
            # Skip this test if no panic device find
            if not vmxml_new.xmltreefile.find('devices').findall('panic'):
                raise error.TestNAError("No 'panic' device in the guest,"
                                        " maybe your libvirt version doesn't"
                                        " support it.")
        try:
            if vm_action == "suspend":
                virsh.suspend(vm_name, ignore_status=False)
            elif vm_action == "resume":
                virsh.suspend(vm_name, ignore_status=False)
                virsh.resume(vm_name, ignore_status=False)
            elif vm_action == "destroy":
                virsh.destroy(vm_name, ignore_status=False)
            elif vm_action == "start":
                virsh.destroy(vm_name, ignore_status=False)
                virsh.start(vm_name, ignore_status=False)
            elif vm_action == "kill":
                libvirtd_service.stop()
                kill_process_by_pattern(vm_name)
                libvirtd_service.restart()
            elif vm_action == "crash":
                session = vm.wait_for_login()
                session.cmd("service kdump stop", ignore_all_errors=True)
                # Enable sysRq
                session.cmd("echo 1 > /proc/sys/kernel/sysrq")
                # Send key ALT-SysRq-c to crash VM, and command will not
                # return as vm crashed, so fail early for 'destroy' and
                # 'preserve' action. For 'restart', 'coredump-restart'
                # and 'coredump-destroy' actions, they all need more time
                # to dump core file or restart OS, so using the default
                # session command timeout(60s)
                try:
                    if vm_oncrash_action in ['destroy', 'preserve']:
                        timeout = 3
                    else:
                        timeout = 60
                    session.cmd("echo c > /proc/sysrq-trigger", timeout=timeout)
                except (ShellTimeoutError, ShellProcessTerminatedError):
                    pass
                session.close()
        except error.CmdError, e:
            raise error.TestError("Guest prepare action error: %s" % e)

        if libvirtd == "off":
            libvirtd_service.stop()

        if vm_ref == "remote":
            remote_ip = params.get("remote_ip", "REMOTE.EXAMPLE.COM")
            local_ip = params.get("local_ip", "LOCAL.EXAMPLE.COM")
            remote_pwd = params.get("remote_pwd", None)
            if remote_ip.count("EXAMPLE.COM") or local_ip.count("EXAMPLE.COM"):
                raise error.TestNAError("Test 'remote' parameters not setup")
            status = 0
            try:
                remote_uri = libvirt_vm.complete_uri(local_ip)
                session = remote.remote_login("ssh", remote_ip, "22", "root",
                                              remote_pwd, "#")
                session.cmd_output('LANG=C')
                command = "virsh -c %s domstate %s" % (remote_uri, vm_name)
                status, output = session.cmd_status_output(command,
                                                           internal_timeout=5)
                session.close()
            except error.CmdError:
                status = 1
        else:
            result = virsh.domstate(vm_ref, extra, ignore_status=True,
                                    debug=True)
            status = result.exit_status
            output = result.stdout.strip()

        # check status_error
        if status_error:
            if not status:
                raise error.TestFail("Run successfully with wrong command!")
        else:
            if status or not output:
                raise error.TestFail("Run failed with right command")
            if extra.count("reason"):
                if vm_action == "suspend":
                    # If not, will cost long time to destroy vm
                    virsh.destroy(vm_name)
                    if not output.count("user"):
                        raise ActionError(vm_action)
                elif vm_action == "resume":
                    if not output.count("unpaused"):
                        raise ActionError(vm_action)
                elif vm_action == "destroy":
                    if not output.count("destroyed"):
                        raise ActionError(vm_action)
                elif vm_action == "start":
                    if not output.count("booted"):
                        raise ActionError(vm_action)
                elif vm_action == "kill":
                    if not output.count("crashed"):
                        raise ActionError(vm_action)
                elif vm_action == "crash":
                    if not check_crash_state(output, vm_oncrash_action,
                                             dump_file):
                        raise ActionError(vm_action)
            if vm_ref == "remote":
                if not (re.search("running", output) or
                        re.search("blocked", output) or
                        re.search("idle", output)):
                    raise error.TestFail("Run failed with right command")
예제 #32
0
def run(test, params, env):
    """
    Test command: virsh destroy.

    The command can destroy (stop) a domain.
    1.Prepare test environment.
    2.When the ibvirtd == "off", stop the libvirtd service.
    3.Perform virsh destroy operation.
    4.Recover test environment.
    5.Confirm the test result.
    """
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    domid = vm.get_id()
    domuuid = vm.get_uuid()

    vm_ref = params.get("destroy_vm_ref")
    status_error = params.get("status_error", "no")
    libvirtd = params.get("libvirtd", "on")
    remote_ip = params.get("remote_ip", "REMOTE.EXAMPLE.COM")
    remote_pwd = params.get("remote_pwd", None)
    local_ip = params.get("local_ip", "LOCAL.EXAMPLE.COM")
    if vm_ref == "remote" and (remote_ip.count("EXAMPLE.COM")
                               or local_ip.count("EXAMPLE.COM")):
        raise error.TestNAError(
            "Remote test parameters unchanged from default")

    uri = params.get("virsh_uri")
    unprivileged_user = params.get('unprivileged_user')
    if unprivileged_user:
        if unprivileged_user.count('EXAMPLE'):
            unprivileged_user = '******'

    if not libvirt_version.version_compare(1, 1, 1):
        if params.get('setup_libvirt_polkit') == 'yes':
            raise error.TestNAError("API acl test not supported in current"
                                    + " libvirt version.")

    if vm_ref == "id":
        vm_ref = domid
    elif vm_ref == "hex_id":
        vm_ref = hex(int(domid))
    elif vm_ref.find("invalid") != -1:
        vm_ref = params.get(vm_ref)
    elif vm_ref == "name":
        vm_ref = "%s %s" % (vm_name, params.get("destroy_extra"))
    elif vm_ref == "uuid":
        vm_ref = domuuid

    if libvirtd == "off":
        utils_libvirtd.libvirtd_stop()

    if vm_ref != "remote":
        status = virsh.destroy(vm_ref, ignore_status=True,
                               unprivileged_user=unprivileged_user,
                               uri=uri, debug=True).exit_status
        output = ""
    else:
        status = 0
        try:
            remote_uri = libvirt_vm.complete_uri(local_ip)
            session = remote.remote_login("ssh", remote_ip, "22", "root",
                                          remote_pwd, "#")
            session.cmd_output('LANG=C')
            command = "virsh -c %s destroy %s" % (remote_uri, vm_name)
            status, output = session.cmd_status_output(command,
                                                       internal_timeout=5)
            session.close()
        except error.CmdError:
            status = 1

    if libvirtd == "off":
        utils_libvirtd.libvirtd_start()

    # check status_error
    if status_error == "yes":
        if status == 0:
            raise error.TestFail("Run successfully with wrong command! "
                                 "Output:\n%s" % output)
    elif status_error == "no":
        if status != 0:
            raise error.TestFail("Run failed with right command! Output:\n%s"
                                 % output)
예제 #33
0
def run_whql_hck_client_install(test, params, env):
    """
    WHQL HCK client installation:
    1) Login to the guest and setup the domain env in it
    2) Install packages needed

    :param test: kvm test object
    :param params: Dictionary with the test parameters
    :param env: Dictionary with test environment.
    """
    login_timeout = int(params.get("login_timeout", 360))
    vm = env.get_vm(params["main_vm"])
    vm.verify_alive()
    session = vm.wait_for_login(timeout=login_timeout)

    server_address = params["server_address"]
    server_shell_port = int(params["server_shell_port"])
    server_username = params["server_username"]
    server_password = params["server_password"]
    client_username = params["client_username"]
    client_password = params["client_password"]
    server_domname = params["server_domname"]

    server_session = remote.remote_login("nc", server_address,
                                         server_shell_port, "", "",
                                         session.prompt, session.linesep)
    client_name = session.cmd_output("echo %computername%").strip()
    install_timeout = float(params.get("install_timeout", 1800))

    services_installed = session.cmd_output("wmic service get")
    if "HCKcommunication" in services_installed:
        logging.info("HCK client already installed.")
        return

    # Join the server's workgroup
    if params.get("join_domain") == "yes":
        error.context("Join the workgroup", logging.info)
        cmd = ("netdom join %s /domain:%s /UserD:%s "
               "/PasswordD:%s" % (client_name, server_domname,
                                  client_username, client_password))
        session.cmd(cmd, timeout=600)

    error.context("Setting up auto logon for user '%s'" % client_username,
                  logging.info)
    cmd = ('reg add '
           '"HKLM\\Software\\Microsoft\\Windows NT\\CurrentVersion\\winlogon"'
           ' /v "%s" /d "%s" /t REG_SZ /f')
    session.cmd(cmd % ("AutoAdminLogon", "1"))
    session.cmd(cmd % ("DefaultUserName", server_username))
    session.cmd(cmd % ("DefaultPassword", server_password))

    session = vm.reboot(session)

    if params.get("pre_hck_install"):
        error.context("Install some program before install HCK client.",
                      logging.info)
        install_cmd = params.get("pre_hck_install")
        session.cmd(install_cmd, timeout=install_timeout)

    install_cmd = params["install_cmd"]
    error.context("Installing HCK client (timeout=%ds)" % install_timeout,
                  logging.info)
    session.cmd(install_cmd, timeout=install_timeout)
    reboot_timeout = login_timeout + 1500
    session = vm.reboot(session, timeout=reboot_timeout)
    session.close()
    server_session.close()
예제 #34
0
def run(test, params, env):
    """
    Test virsh undefine command.

    Undefine an inactive domain, or convert persistent to transient.
    1.Prepare test environment.
    2.Backup the VM's information to a xml file.
    3.When the libvirtd == "off", stop the libvirtd service.
    4.Perform virsh undefine operation.
    5.Recover test environment.(libvirts service,VM)
    6.Confirm the test result.
    """

    vm_ref = params.get("undefine_vm_ref", "vm_name")
    extra = params.get("undefine_extra", "")
    option = params.get("undefine_option", "")
    libvirtd_state = params.get("libvirtd", "on")
    status_error = ("yes" == params.get("status_error", "no"))
    undefine_twice = ("yes" == params.get("undefine_twice", 'no'))
    local_ip = params.get("local_ip", "LOCAL.EXAMPLE.COM")
    remote_ip = params.get("remote_ip", "REMOTE.EXAMPLE.COM")
    remote_user = params.get("remote_user", "user")
    remote_pwd = params.get("remote_pwd", "password")
    remote_prompt = params.get("remote_prompt", "#")
    pool_type = params.get("pool_type")
    pool_name = params.get("pool_name", "test")
    pool_target = params.get("pool_target")
    volume_size = params.get("volume_size", "1G")
    vol_name = params.get("vol_name", "test_vol")
    emulated_img = params.get("emulated_img", "emulated_img")
    emulated_size = "%sG" % (int(volume_size[:-1]) + 1)
    disk_target = params.get("disk_target", "vdb")
    wipe_data = "yes" == params.get("wipe_data", "no")
    if wipe_data:
        option += " --wipe-storage"

    vm_name = params.get("main_vm", "virt-tests-vm1")
    vm = env.get_vm(vm_name)
    vm_id = vm.get_id()
    vm_uuid = vm.get_uuid()

    # polkit acl related params
    uri = params.get("virsh_uri")
    unprivileged_user = params.get('unprivileged_user')
    if unprivileged_user:
        if unprivileged_user.count('EXAMPLE'):
            unprivileged_user = '******'

    if not libvirt_version.version_compare(1, 1, 1):
        if params.get('setup_libvirt_polkit') == 'yes':
            raise error.TestNAError("API acl test not supported in current"
                                    " libvirt version.")

    # Back up xml file.Xen host has no guest xml file to define a guset.
    backup_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)

    # Confirm how to reference a VM.
    if vm_ref == "vm_name":
        vm_ref = vm_name
    elif vm_ref == "id":
        vm_ref = vm_id
    elif vm_ref == "hex_vm_id":
        vm_ref = hex(int(vm_id))
    elif vm_ref == "uuid":
        vm_ref = vm_uuid
    elif vm_ref.find("invalid") != -1:
        vm_ref = params.get(vm_ref)

    volume = None
    pvtest = None
    status3 = None

    try:
        save_file = "/var/lib/libvirt/qemu/save/%s.save" % vm_name
        if option.count("managedsave") and vm.is_alive():
            virsh.managedsave(vm_name)

        if not vm.is_lxc():
            snp_list = virsh.snapshot_list(vm_name)
            if option.count("snapshot"):
                snp_file_list = []
                if not len(snp_list):
                    virsh.snapshot_create(vm_name)
                    logging.debug("Create a snapshot for test!")
                else:
                    # Backup snapshots for domain
                    for snp_item in snp_list:
                        tmp_file = os.path.join(test.tmpdir, snp_item + ".xml")
                        virsh.snapshot_dumpxml(vm_name,
                                               snp_item,
                                               to_file=tmp_file)
                        snp_file_list.append(tmp_file)
            else:
                if len(snp_list):
                    raise error.TestNAError("This domain has snapshot(s), "
                                            "cannot be undefined!")
        if option.count("remove-all-storage"):
            pvtest = utlv.PoolVolumeTest(test, params)
            pvtest.pre_pool(pool_name,
                            pool_type,
                            pool_target,
                            emulated_img,
                            emulated_size=emulated_size)
            new_pool = libvirt_storage.PoolVolume(pool_name)
            if not new_pool.create_volume(vol_name, volume_size):
                raise error.TestFail("Creation of volume %s failed." %
                                     vol_name)
            volumes = new_pool.list_volumes()
            volume = volumes[vol_name]
            virsh.attach_disk(vm_name, volume, disk_target, "--config")

        # Turn libvirtd into certain state.
        if libvirtd_state == "off":
            utils_libvirtd.libvirtd_stop()

        # Test virsh undefine command.
        output = ""
        if vm_ref != "remote":
            vm_ref = "%s %s" % (vm_ref, extra)
            cmdresult = virsh.undefine(vm_ref,
                                       option,
                                       unprivileged_user=unprivileged_user,
                                       uri=uri,
                                       ignore_status=True,
                                       debug=True)
            status = cmdresult.exit_status
            output = cmdresult.stdout.strip()
            if status:
                logging.debug("Error status, command output: %s",
                              cmdresult.stderr.strip())
            if undefine_twice:
                status2 = virsh.undefine(vm_ref,
                                         ignore_status=True).exit_status
        else:
            if remote_ip.count("EXAMPLE.COM") or local_ip.count("EXAMPLE.COM"):
                raise error.TestNAError("remote_ip and/or local_ip parameters"
                                        " not changed from default values")
            try:
                uri = libvirt_vm.complete_uri(local_ip)
                session = remote.remote_login("ssh", remote_ip, "22",
                                              remote_user, remote_pwd,
                                              remote_prompt)
                cmd_undefine = "virsh -c %s undefine %s" % (uri, vm_name)
                status, output = session.cmd_status_output(cmd_undefine)
                logging.info("Undefine output: %s", output)
            except (error.CmdError, remote.LoginError, aexpect.ShellError), de:
                logging.error("Detail: %s", de)
                status = 1

        # Recover libvirtd state.
        if libvirtd_state == "off":
            utils_libvirtd.libvirtd_start()

        # Shutdown VM.
        if virsh.domain_exists(vm.name):
            try:
                if vm.is_alive():
                    vm.destroy(gracefully=False)
            except error.CmdError, detail:
                logging.error("Detail: %s", detail)
예제 #35
0
def run_virsh_setvcpus(test, params, env):
    """
    Test command: virsh setvcpus.

    The conmand can change the number of virtual CPUs in the guest domain.
    1.Prepare test environment,destroy or suspend a VM.
    2.Perform virsh setvcpus operation.
    3.Recover test environment.
    4.Confirm the test result.
    """

    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    xml_file = params.get("setvcpus_xml_file", "vm.xml")
    virsh.dumpxml(vm_name, extra="--inactive", to_file=xml_file)
    tmp_file = params.get("setvcpus_tmp_file", "tmp.xml")
    pre_vm_state = params.get("setvcpus_pre_vm_state")
    command = params.get("setvcpus_command", "setvcpus")
    options = params.get("setvcpus_options")
    domain = params.get("setvcpus_domain")
    count = params.get("setvcpus_count")
    extra_param = params.get("setvcpus_extra_param")
    count_option = "%s %s" % (count, extra_param)
    status_error = params.get("status_error")

    def get_current_vcpus():
        """
        Get current vcpu number.
        """
        vcpus_set = ""
        virsh.dumpxml(vm_name, extra="", to_file=tmp_file)
        dom = parse(tmp_file)
        root = dom.documentElement
        vcpus_2 = root.getElementsByTagName("vcpu")
        for n in vcpus_2:
            vcpus_set += n.getAttribute("current")
            vcpus_set = int(vcpus_set)
        dom.unlink()
        return vcpus_set

    if vm.is_alive():
        vm.destroy()
    vm_xml = libvirt_xml.VMXML()
    vm_xml.set_vm_vcpus(vm_name, 2)
    vm.start()
    vm.wait_for_login()

    if status_error == "no":
        vcpus_new = len(vm.vcpuinfo())
    domid = vm.get_id()
    domuuid = vm.get_uuid()
    if pre_vm_state == "paused":
        vm.pause()
    elif pre_vm_state == "shut off":
        vm.destroy()

    if domain == "remote_name":
        remote_ssh_addr = params.get("remote_ip", None)
        remote_addr = params.get("local_ip", None)
        remote_password = params.get("remote_password", None)
        host_type = virsh.driver()
        if host_type == "qemu":
            remote_string = "qemu+ssh://%s/system" % remote_addr
        elif host_type == "xen":
            remote_string = "xen+ssh://%s" % remote_addr
        command = "virsh -c %s setvcpus %s 1 --live" % (remote_string, vm_name)
        if virsh.has_command_help_match(command, "--live") is None:
            status_error = "yes"
        session = remote.remote_login(
            "ssh", remote_ssh_addr, "22", "root", remote_password, "#")
        session.cmd_output('LANG=C')
        status, output = session.cmd_status_output(command, internal_timeout=5)
        session.close()
        vcpus_current = len(vm.vcpuinfo())
    else:
        if domain == "name":
            dom_option = vm_name
        elif domain == "id":
            dom_option = domid
            if params.get("setvcpus_hex_id") is not None:
                dom_option = hex(int(domid))
            elif params.get("setvcpus_invalid_id") is not None:
                dom_option = params.get("setvcpus_invalid_id")
        elif domain == "uuid":
            dom_option = domuuid
            if params.get("setvcpus_invalid_uuid") is not None:
                dom_option = params.get("setvcpus_invalid_uuid")
        else:
            dom_option = domain
        option_list = options.split(" ")
        for item in option_list:
            if virsh.has_command_help_match(command, item) is None:
                status_error = "yes"
                break
        status = virsh.setvcpus(
            dom_option, count_option, options, ignore_status=True).exit_status
        if pre_vm_state == "paused":
            virsh.resume(vm_name, ignore_status=True)
        if status_error == "no":
            if status == 0:
                if pre_vm_state == "shut off":
                    if options == "--config":
                        vcpus_set = len(vm.vcpuinfo())
                    elif options == "--current":
                        vcpus_set = get_current_vcpus()
                    elif options == "--maximum --config":
                        vcpus_set = ""
                        dom = parse("/etc/libvirt/qemu/%s.xml" % vm_name)
                        vcpus_set = dom.getElementsByTagName(
                            "vcpu")[0].firstChild.data
                        vcpus_set = int(vcpus_set)
                        dom.unlink()
                else:
                    vcpus_set = len(vm.vcpuinfo())
                if domain == "id":
                    cmd_chk = "cat /etc/libvirt/qemu/%s.xml" % vm_name
                    output1 = commands.getoutput(cmd_chk)
                    logging.info("guest-info:\n%s" % output1)

    virsh.destroy(vm_name)
    virsh.undefine(vm_name)
    virsh.define(xml_file)
    if os.path.exists(xml_file):
        os.remove(xml_file)
    if os.path.exists(tmp_file):
        os.remove(tmp_file)

    # check status_error
    if status_error == "yes":
        if status == 0:
            raise error.TestFail("Run successfully with wrong command!")
    else:
        if status != 0:
            raise error.TestFail("Run failed with right command")
        else:
            if options == "--maximum --config":
                if vcpus_set != 4:
                    raise error.TestFail("Run failed with right command1")
            elif domain == "id":
                if options == "--config":
                    if vcpus_set != vcpus_new or not re.search('<vcpu current=\'1\'>%s</vcpu>' % vcpus_new, output1):
                        raise error.TestFail("Run failed with right command2")
                elif options == "--config --live":
                    if vcpus_set != 1 or not re.search('<vcpu current=\'1\'>%s</vcpu>' % vcpus_new, output1):
                        raise error.TestFail("Run failed with right command3")
                else:
                    if vcpus_set != 1 or re.search('<vcpu current=\'1\'>%s</vcpu>' % vcpus_new, output1):
                        raise error.TestFail("Run failed with right command4")
            else:
                if vcpus_set != 1:
                    raise error.TestFail("Run failed with right command5")
예제 #36
0
def setup_or_cleanup_gluster(is_setup,
                             vol_name,
                             brick_path="",
                             pool_name="",
                             file_path="/etc/glusterfs/glusterd.vol",
                             **kwargs):
    # pylint: disable=E1121
    """
    Set up or clean up glusterfs environment on localhost or remote. These actions
    can be skipped by configuration on remote in case where a static gluster instance
    is used, s. base.cfg. In this case, only the host ip is returned.

    :param is_setup: Boolean value, true for setup, false for cleanup
    :param vol_name: gluster created volume name
    :param brick_path: Dir for create glusterfs
    :param kwargs: Other parameters that need to set for gluster
    :return: ip_addr or nothing
    """
    if kwargs.get("gluster_managed_by_test", "yes") != "yes":
        if not is_setup:
            return ""
        gluster_server_ip = kwargs.get("gluster_server_ip")
        if not gluster_server_ip:
            return utils_net.get_host_ip_address()
        return gluster_server_ip

    try:
        utils_path.find_command("gluster")
    except utils_path.CmdNotFoundError:
        raise exceptions.TestSkipError("Missing command 'gluster'")
    if not brick_path:
        tmpdir = data_dir.get_tmp_dir()
        brick_path = os.path.join(tmpdir, pool_name)

    # Check gluster server apply or not
    ip_addr = kwargs.get("gluster_server_ip", "")
    session = None
    if ip_addr != "":
        remote_user = kwargs.get("gluster_server_user")
        remote_pwd = kwargs.get("gluster_server_pwd")
        remote_identity_file = kwargs.get("gluster_identity_file")
        session = remote.remote_login("ssh",
                                      ip_addr,
                                      "22",
                                      remote_user,
                                      remote_pwd,
                                      "#",
                                      identity_file=remote_identity_file)
    if is_setup:
        if ip_addr == "":
            ip_addr = utils_net.get_host_ip_address()
            add_rpc_insecure(file_path)
            glusterd_start()
            logging.debug("finish start gluster")
            logging.debug("The contents of %s: \n%s", file_path,
                          open(file_path).read())

        gluster_vol_create(vol_name, ip_addr, brick_path, True, session)
        gluster_allow_insecure(vol_name, session)
        gluster_nfs_disable(vol_name, session)
        logging.debug("finish vol create in gluster")
        if session:
            session.close()
        return ip_addr
    else:
        gluster_vol_stop(vol_name, True, session)
        gluster_vol_delete(vol_name, session)
        gluster_brick_delete(brick_path, session)
        if session:
            session.close()
        return ""
예제 #37
0
def run(test, params, env):
    """
    Verify the login guest with multi backends spapr-vty:
    1) Boot guest with multi spapr-vty with backend
    2) Modify the kernel cfg file to specify the backend
    3) For pty and file backend:
      3.1) Open and close chardev
    4) For unix_socket and tcp_socket:
      4.1) Login guest
      4.2) Create and delete files inside guest
    :param test: QEMU test object
    :param params: Dictionary with the test parameters
    :param env: Dictionary with test environment.
    """
    prompt = params.get("shell_prompt")
    create_delete_file = params["create_delete_file"]
    vm = env.get_vm(params["main_vm"])
    vm.wait_for_login()
    for serial_id in params.objects("serials"):
        if serial_id != "vs1":
            hvc_id = int(serial_id.replace('vs', '')) - 1
            kernel_params = "console=hvc%s,115200" % hvc_id
            utils_test.update_boot_option(vm, args_added=kernel_params)

        backend = params.object_params(serial_id)["chardev_backend"]
        serial_device = vm.devices.get(serial_id)
        chardev_qid = serial_device.get_param("chardev")
        chardev_device = vm.devices.get_by_qid(chardev_qid)[0]

        logging.info("The currently tested backend is %s.", backend)
        if backend == 'unix_socket':
            session = vm.wait_for_serial_login(timeout=60)
            session.cmd(create_delete_file)
            session.close()
        elif backend == 'tcp_socket':
            session = remote.remote_login(client='nc',
                                          host=chardev_device.params['host'],
                                          port=chardev_device.params['port'],
                                          username=params['username'],
                                          password=params['password'],
                                          prompt=prompt,
                                          timeout=240)
            session.cmd(create_delete_file)
            session.close()
        elif backend == 'pty':
            chardev_info = vm.monitor.human_monitor_cmd('info chardev')
            hostfile = re.findall(
                '%s: filename=pty:(/dev/pts/\\d)?' % serial_id, chardev_info)
            if not hostfile:
                test.fail("Can't find the corresponding pty backend: %s" %
                          chardev_info)
            fd_pty = os.open(hostfile[0], os.O_RDWR | os.O_NONBLOCK)
            os.close(fd_pty)
        elif backend == 'file':
            filename = chardev_device.params['path']
            with open(filename) as f:
                if 'Linux' not in f.read():
                    test.fail("Guest boot fail with file backend.")
        elif backend == 'null':
            session = vm.wait_for_login()
            session.cmd(create_delete_file)

        vm.verify_dmesg()
    vm.destroy()
예제 #38
0
     os.unsetenv('SSH_AUTH_SOCK')
 if checkpoint in ['xml_without_image', 'format_convert']:
     xml_file = os.path.join(data_dir.get_tmp_dir(), '%s.xml' % vm_name)
     virsh.dumpxml(vm_name, to_file=xml_file, uri=uri)
     v2v_params['hypervisor'] = 'kvm'
     v2v_params['input_mode'] = 'libvirtxml'
     v2v_params.update({'input_file': xml_file})
     if params.get('img_path'):
         cmd = "sed -i 's|%s|%s|' %s" % (params['remote_disk_image'],
                                         params['img_path'], xml_file)
         process.run(cmd)
         logging.debug(process.run('cat %s' % xml_file).stdout)
     if checkpoint == 'format_convert':
         v2v_params['output_format'] = 'qcow2'
 if checkpoint == 'ssh_banner':
     session = remote.remote_login("ssh", xen_host, "22", "root",
                                   xen_host_passwd, "#")
     ssh_banner_content = r'"# no default banner path\n' \
                          r'#Banner /path/banner file\n' \
                          r'Banner /etc/ssh/ssh_banner"'
     logging.info('Create ssh_banner file')
     session.cmd('echo -e %s > /etc/ssh/ssh_banner' %
                 ssh_banner_content)
     logging.info('Content of ssh_banner file:')
     logging.info(session.cmd_output('cat /etc/ssh/ssh_banner'))
     logging.info('Restart sshd service on xen host')
     session.cmd('service sshd restart')
 if checkpoint.startswith('virtio_win'):
     src_dir = params.get('virtio_win_dir')
     dest_dir = os.path.join(data_dir.get_tmp_dir(), 'virtio-win')
     iso_path = os.path.join(dest_dir, 'virtio-win.iso')
     if not os.path.exists(dest_dir):
예제 #39
0
def run(test, params, env):
    """
    Test virsh undefine command.

    Undefine an inactive domain, or convert persistent to transient.
    1.Prepare test environment.
    2.Backup the VM's information to a xml file.
    3.When the libvirtd == "off", stop the libvirtd service.
    4.Perform virsh undefine operation.
    5.Recover test environment.(libvirts service,VM)
    6.Confirm the test result.
    """

    vm_ref = params.get("undefine_vm_ref", "vm_name")
    extra = params.get("undefine_extra", "")
    option = params.get("undefine_option", "")
    libvirtd_state = params.get("libvirtd", "on")
    status_error = ("yes" == params.get("status_error", "no"))
    undefine_twice = ("yes" == params.get("undefine_twice", 'no'))
    local_ip = params.get("local_ip", "LOCAL.EXAMPLE.COM")
    local_pwd = params.get("local_pwd", "password")
    remote_ip = params.get("remote_ip", "REMOTE.EXAMPLE.COM")
    remote_user = params.get("remote_user", "user")
    remote_pwd = params.get("remote_pwd", "password")
    remote_prompt = params.get("remote_prompt", "#")
    pool_type = params.get("pool_type")
    pool_name = params.get("pool_name", "test")
    pool_target = params.get("pool_target")
    volume_size = params.get("volume_size", "1G")
    vol_name = params.get("vol_name", "test_vol")
    emulated_img = params.get("emulated_img", "emulated_img")
    emulated_size = "%sG" % (int(volume_size[:-1]) + 1)
    disk_target = params.get("disk_target", "vdb")
    wipe_data = "yes" == params.get("wipe_data", "no")
    if wipe_data:
        option += " --wipe-storage"
    nvram_o = None
    if platform.machine() == 'aarch64':
        nvram_o = " --nvram"
        option += nvram_o

    vm_name = params.get("main_vm", "avocado-vt-vm1")
    vm = env.get_vm(vm_name)
    vm_id = vm.get_id()
    vm_uuid = vm.get_uuid()

    # polkit acl related params
    uri = params.get("virsh_uri")
    unprivileged_user = params.get('unprivileged_user')
    if unprivileged_user:
        if unprivileged_user.count('EXAMPLE'):
            unprivileged_user = '******'

    if not libvirt_version.version_compare(1, 1, 1):
        if params.get('setup_libvirt_polkit') == 'yes':
            test.cancel("API acl test not supported in current"
                        " libvirt version.")

    # Back up xml file.Xen host has no guest xml file to define a guset.
    backup_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)

    # Confirm how to reference a VM.
    if vm_ref == "vm_name":
        vm_ref = vm_name
    elif vm_ref == "id":
        vm_ref = vm_id
    elif vm_ref == "hex_vm_id":
        vm_ref = hex(int(vm_id))
    elif vm_ref == "uuid":
        vm_ref = vm_uuid
    elif vm_ref.find("invalid") != -1:
        vm_ref = params.get(vm_ref)

    volume = None
    pvtest = None
    status3 = None

    elems = backup_xml.xmltreefile.findall('/devices/disk/source')
    existing_images = [elem.get('file') for elem in elems]

    # Backup images since remove-all-storage could remove existing libvirt
    # managed guest images
    if existing_images and option.count("remove-all-storage"):
        for img in existing_images:
            backup_img = img + '.bak'
            logging.info('Backup %s to %s', img, backup_img)
            shutil.copyfile(img, backup_img)

    try:
        save_file = "/var/lib/libvirt/qemu/save/%s.save" % vm_name
        if option.count("managedsave") and vm.is_alive():
            virsh.managedsave(vm_name)

        if not vm.is_lxc():
            snp_list = virsh.snapshot_list(vm_name)
            if option.count("snapshot"):
                snp_file_list = []
                if not len(snp_list):
                    virsh.snapshot_create(vm_name)
                    logging.debug("Create a snapshot for test!")
                else:
                    # Backup snapshots for domain
                    for snp_item in snp_list:
                        tmp_file = os.path.join(test.tmpdir, snp_item + ".xml")
                        virsh.snapshot_dumpxml(vm_name, snp_item, to_file=tmp_file)
                        snp_file_list.append(tmp_file)
            else:
                if len(snp_list):
                    test.cancel("This domain has snapshot(s), "
                                "cannot be undefined!")
        if option.count("remove-all-storage"):
            pvtest = utlv.PoolVolumeTest(test, params)
            pvtest.pre_pool(pool_name, pool_type, pool_target, emulated_img,
                            emulated_size=emulated_size)
            new_pool = libvirt_storage.PoolVolume(pool_name)
            if not new_pool.create_volume(vol_name, volume_size):
                test.fail("Creation of volume %s failed." % vol_name)
            volumes = new_pool.list_volumes()
            volume = volumes[vol_name]
            virsh.attach_disk(vm_name, volume, disk_target, "--config")

        # Turn libvirtd into certain state.
        if libvirtd_state == "off":
            utils_libvirtd.libvirtd_stop()

        # Test virsh undefine command.
        output = ""
        if vm_ref != "remote":
            vm_ref = "%s %s" % (vm_ref, extra)
            cmdresult = virsh.undefine(vm_ref, option,
                                       unprivileged_user=unprivileged_user,
                                       uri=uri,
                                       ignore_status=True, debug=True)
            status = cmdresult.exit_status
            output = cmdresult.stdout.strip()
            if status:
                logging.debug("Error status, command output: %s",
                              cmdresult.stderr.strip())
            if undefine_twice:
                status2 = virsh.undefine(vm_ref, nvram_o,
                                         ignore_status=True).exit_status
        else:
            if remote_ip.count("EXAMPLE.COM") or local_ip.count("EXAMPLE.COM"):
                test.cancel("remote_ip and/or local_ip parameters"
                            " not changed from default values")
            try:
                local_user = params.get("username", "root")
                uri = libvirt_vm.complete_uri(local_ip)
                # setup ssh auto login from remote machine to test machine
                # for the command to execute remotely
                ssh_key.setup_remote_ssh_key(remote_ip, remote_user,
                                             remote_pwd, hostname2=local_ip,
                                             user2=local_user,
                                             password2=local_pwd)
                session = remote.remote_login("ssh", remote_ip, "22",
                                              remote_user, remote_pwd,
                                              remote_prompt)
                cmd_undefine = "virsh -c %s undefine %s" % (uri, vm_name)
                status, output = session.cmd_status_output(cmd_undefine)
                logging.info("Undefine output: %s", output)
            except (process.CmdError, remote.LoginError, aexpect.ShellError) as de:
                logging.error("Detail: %s", de)
                status = 1

        # Recover libvirtd state.
        if libvirtd_state == "off":
            utils_libvirtd.libvirtd_start()

        # Shutdown VM.
        if virsh.domain_exists(vm.name):
            try:
                if vm.is_alive():
                    vm.destroy(gracefully=False)
            except process.CmdError as detail:
                logging.error("Detail: %s", detail)

        # After vm.destroy, virsh.domain_exists returns True due to
        # timing issue and tests fails.
        time.sleep(2)
        # Check if VM exists.
        vm_exist = virsh.domain_exists(vm_name)

        # Check if xml file exists.
        xml_exist = False
        if vm.is_qemu() and os.path.exists("/etc/libvirt/qemu/%s.xml" % vm_name):
            xml_exist = True
        if vm.is_lxc() and os.path.exists("/etc/libvirt/lxc/%s.xml" % vm_name):
            xml_exist = True
        if vm.is_xen() and os.path.exists("/etc/xen/%s" % vm_name):
            xml_exist = True

        # Check if save file exists if use --managed-save
        save_exist = os.path.exists(save_file)

        # Check if save file exists if use --managed-save
        volume_exist = volume and os.path.exists(volume)

        # Test define with acl control and recover domain.
        if params.get('setup_libvirt_polkit') == 'yes':
            if virsh.domain_exists(vm.name):
                virsh.undefine(vm_ref, nvram_o, ignore_status=True)
            cmd = "chmod 666 %s" % backup_xml.xml
            process.run(cmd, ignore_status=False, shell=True)
            s_define = virsh.define(backup_xml.xml,
                                    unprivileged_user=unprivileged_user,
                                    uri=uri, ignore_status=True, debug=True)
            status3 = s_define.exit_status

    finally:
        # Recover main VM.
        try:
            backup_xml.sync()
        except LibvirtXMLError:
            # sync() tries to undefines and define the xml to sync
            # but virsh_undefine test would have undefined already
            # may lead to error out
            backup_xml.define()

        # Recover existing guest images
        if existing_images and option.count("remove-all-storage"):
            for img in existing_images:
                backup_img = img + '.bak'
                logging.info('Recover image %s to %s', backup_img, img)
                shutil.move(backup_img, img)

        # Clean up pool
        if pvtest:
            pvtest.cleanup_pool(pool_name, pool_type,
                                pool_target, emulated_img)
        # Recover VM snapshots.
        if option.count("snapshot") and (not vm.is_lxc()):
            logging.debug("Recover snapshots for domain!")
            for file_item in snp_file_list:
                virsh.snapshot_create(vm_name, file_item)

    # Check results.
    if status_error:
        if not status:
            test.fail("virsh undefine return unexpected result.")
        if params.get('setup_libvirt_polkit') == 'yes':
            if status3 == 0:
                test.fail("virsh define with false acl permission" +
                          " should failed.")
    else:
        if status:
            test.fail("virsh undefine failed.")
        if undefine_twice:
            if not status2:
                test.fail("Undefine the same VM twice succeeded.")
        if vm_exist:
            test.fail("VM still exists after undefine.")
        if xml_exist:
            test.fail("Xml file still exists after undefine.")
        if option.count("managedsave") and save_exist:
            test.fail("Save file still exists after undefine.")
        if option.count("remove-all-storage") and volume_exist:
            test.fail("Volume file '%s' still exists after"
                      " undefine." % volume)
        if wipe_data and option.count("remove-all-storage"):
            if not output.count("Wiping volume '%s'" % disk_target):
                test.fail("Command didn't wipe volume storage!")
        if params.get('setup_libvirt_polkit') == 'yes':
            if status3:
                test.fail("virsh define with right acl permission" +
                          " should succeeded")
예제 #40
0
def run_virsh_dommemstat(test, params, env):
    """
    Test command: virsh dommemstat.

    The command gets memory statistics for a domain
    1.Prepare test environment.
    2.When the ibvirtd == "off", stop the libvirtd service.
    3.Perform virsh dommemstat operation.
    4.Recover test environment.
    5.Confirm the test result.
    """
    vm_name = params.get("main_vm")
    vm = env.get_vm(params["main_vm"])

    domid = vm.get_id()
    domuuid = vm.get_uuid()

    status_error = params.get("status_error", "no")
    vm_ref = params.get("dommemstat_vm_ref", "name")
    libvirtd = params.get("libvirtd", "on")
    extra = params.get("dommemstat_extra", "")
    if libvirtd == "off":
        utils_libvirtd.libvirtd_stop()

    # run test case
    if vm_ref == "id":
        vm_ref = domid
    elif vm_ref == "hex_id":
        vm_ref = hex(int(domid))
    elif vm_ref == "uuid":
        vm_ref = domuuid
    elif vm_ref.count("_invalid_"):
        # vm_ref names parameter to fetch
        vm_ref = params.get(vm_ref)
    elif vm_ref == "name":
        vm_ref = "%s" % vm_name

    if libvirtd == "off":
        utils_libvirtd.libvirtd_stop()

    if vm_ref != "remote":
        status = virsh.dommemstat(vm_ref,
                                  extra,
                                  ignore_status=True,
                                  debug=True).exit_status
    else:
        remote_ip = params.get("remote_ip", "REMOTE.EXAMPLE.COM")
        remote_pwd = params.get("remote_pwd", None)
        local_ip = params.get("local_ip", "LOCAL.EXAMPLE.COM")
        if remote_ip.count("EXAMPLE.COM") or local_ip.count("EXAMPLE.COM"):
            raise error.TestNAError("local/remote ip parameters not set.")
        status = 0
        try:
            remote_uri = libvirt_vm.complete_uri(local_ip)
            session = remote.remote_login("ssh", remote_ip, "22", "root",
                                          remote_pwd, "#")
            session.cmd_output('LANG=C')
            command = "virsh -c %s dommemstat %s %s" % (remote_uri, vm_name,
                                                        extra)
            status = session.cmd_status(command, internal_timeout=5)
            session.close()
        except error.CmdError:
            status = 1

    # recover libvirtd service start
    if libvirtd == "off":
        utils_libvirtd.libvirtd_start()

    # check status_error
    if status_error == "yes":
        if status == 0:
            raise error.TestFail("Run successfully with wrong command!")
    elif status_error == "no":
        if status != 0:
            raise error.TestFail("Run failed with right command")
예제 #41
0
    def check_domjobinfo_output(option="", is_mig_compelete=False):
        """
        Check all items in domjobinfo of the guest on both remote and local

        :param option: options for domjobinfo
        :param is_mig_compelete: False for domjobinfo checking during migration,
                            True for domjobinfo checking after migration
        :raise: test.fail if the value of given item is unexpected
        """
        expected_list_during_mig = ["Job type", "Operation", "Time elapsed",
                                    "Data processed", "Data remaining",
                                    "Data total", "Memory processed",
                                    "Memory remaining", "Memory total",
                                    "Memory bandwidth", "Dirty rate", "Page size",
                                    "Iteration", "Constant pages", "Normal pages",
                                    "Normal data", "Expected downtime", "Setup time"]
        if libvirt_version.version_compare(4, 10, 0):
            expected_list_during_mig.insert(13, "Postcopy requests")

        expected_list_after_mig_src = copy.deepcopy(expected_list_during_mig)
        expected_list_after_mig_src[-2] = 'Total downtime'
        expected_list_after_mig_dest = copy.deepcopy(expected_list_after_mig_src)

        # Check version in remote
        if not expected_list_after_mig_dest.count("Postcopy requests"):
            remote_session = remote.remote_login("ssh", server_ip, "22", server_user,
                                                 server_pwd, "#")
            if libvirt_version.version_compare(4, 10, 0, session=remote_session):
                expected_list_after_mig_dest.insert(14, "Postcopy requests")
            remote_session.close()

        expect_dict = {"src_notdone": {"Job type": "Unbounded",
                                       "Operation": "Outgoing migration",
                                       "all_items": expected_list_during_mig},
                       "dest_notdone": {"error": "Operation not supported: mig"
                                                 "ration statistics are availab"
                                                 "le only on the source host"},
                       "src_done": {"Job type": "Completed",
                                    "Operation": "Outgoing migration",
                                    "all_items": expected_list_after_mig_src},
                       "dest_done": {"Job type": "Completed",
                                     "Operation": "Incoming migration",
                                     "all_items": expected_list_after_mig_dest}}
        pc_opt = False
        if postcopy_options:
            pc_opt = True
            if is_mig_compelete:
                expect_dict["dest_done"].clear()
                expect_dict["dest_done"]["Job type"] = "None"
            else:
                set_migratepostcopy()

        vm_ref = '{}{}'.format(vm_name, option)
        src_jobinfo = virsh.domjobinfo(vm_ref, **virsh_args)
        cmd = "virsh domjobinfo {} {}".format(vm_name, option)
        dest_jobinfo = remote.run_remote_cmd(cmd, cmd_parms, runner_on_target)

        if not is_mig_compelete:
            search_jobinfo_output(src_jobinfo.stdout, expect_dict["src_notdone"],
                                  postcopy_req=pc_opt)
            search_jobinfo_output(dest_jobinfo.stderr, expect_dict["dest_notdone"])
        else:
            search_jobinfo_output(src_jobinfo.stdout, expect_dict["src_done"])
            search_jobinfo_output(dest_jobinfo.stdout, expect_dict["dest_done"])
예제 #42
0
def run(test, params, env):
    """
    Test command: virsh reboot.

    Run a reboot command in the target domain.
    1.Prepare test environment.
    2.When the libvirtd == "off", stop the libvirtd service.
    3.Perform virsh reboot operation.
    4.Recover test environment.(libvirts service)
    5.Confirm the test result.
    """

    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)

    # run test case
    libvirtd = params.get("libvirtd", "on")
    vm_ref = params.get("reboot_vm_ref")
    status_error = ("yes" == params.get("status_error"))
    extra = params.get("reboot_extra")
    remote_ip = params.get("remote_ip", "REMOTE.EXAMPLE.COM")
    local_ip = params.get("local_ip", "LOCAL.EXAMPLE.COM")
    remote_pwd = params.get("remote_pwd", "password")
    agent = ("yes" == params.get("reboot_agent", "no"))
    mode = params.get("reboot_mode", "")
    pre_domian_status = params.get("reboot_pre_domian_status", "running")
    xml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    try:
        # Add or remove qemu-agent from guest before test
        try:
            vm.prepare_guest_agent(channel=agent, start=agent)
        except virt_vm.VMError, e:
            logging.debug(e)
            # qemu-guest-agent is not available on REHL5
            raise error.TestNAError("qemu-guest-agent package is not available")

        if pre_domian_status == "shutoff":
            virsh.destroy(vm_name)
        if libvirtd == "off":
            utils_libvirtd.libvirtd_stop()

        domid = vm.get_id()
        domuuid = vm.get_uuid()
        if vm_ref == "id":
            vm_ref = domid
        elif vm_ref == "name":
            vm_ref = vm_name
        elif vm_ref == "uuid":
            vm_ref = domuuid
        elif vm_ref == "hex_id":
            vm_ref = hex(int(domid))
        elif vm_ref.find("invalid") != -1:
            vm_ref = params.get(vm_ref)
        elif vm_ref == "remote_name":
            if remote_ip.count("EXAMPLE.COM") or local_ip.count("EXAMPLE.COM"):
                raise error.TestNAError("remote_ip and/or local_ip parameters"
                                        " not changed from default values")
            complete_uri = libvirt_vm.complete_uri(local_ip)
            try:
                session = remote.remote_login("ssh", remote_ip, "22", "root",
                                              remote_pwd, "#")
                session.cmd_output('LANG=C')
                command = "virsh -c %s reboot %s %s" % (complete_uri, vm_name,
                                                        mode)
                status, output = session.cmd_status_output(command,
                                                           internal_timeout=5)
                session.close()
            except (remote.LoginError, process.CmdError, aexpect.ShellError), e:
                logging.error("Exception: %s", str(e))
                status = -1
예제 #43
0
def run(test, params, env):
    """
    Test command: virsh destroy.

    The command can destroy (stop) a domain.
    1.Prepare test environment.
    2.When the ibvirtd == "off", stop the libvirtd service.
    3.Perform virsh destroy operation.
    4.Recover test environment.
    5.Confirm the test result.
    """
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    domid = vm.get_id()
    domuuid = vm.get_uuid()

    vm_ref = params.get("destroy_vm_ref")
    status_error = params.get("status_error", "no")
    libvirtd = params.get("libvirtd", "on")
    remote_ip = params.get("remote_ip", "REMOTE.EXAMPLE.COM")
    remote_pwd = params.get("remote_pwd", None)
    local_ip = params.get("local_ip", "LOCAL.EXAMPLE.COM")
    local_pwd = params.get("local_pwd", "LOCAL.EXAMPLE.COM")
    paused_after_start_vm = "yes" == params.get("paused_after_start_vm", "no")
    destroy_readonly = "yes" == params.get("destroy_readonly", "no")
    start_destroy_times = params.get("start_destroy_times", "")
    limit_nofile = params.get("limit_nofile", "")
    if vm_ref == "remote" and (remote_ip.count("EXAMPLE.COM")
                               or local_ip.count("EXAMPLE.COM")):
        test.cancel("Remote test parameters unchanged from default")

    uri = params.get("virsh_uri")
    unprivileged_user = params.get('unprivileged_user')
    if unprivileged_user:
        if unprivileged_user.count('EXAMPLE'):
            unprivileged_user = '******'

    if not libvirt_version.version_compare(1, 1, 1):
        if params.get('setup_libvirt_polkit') == 'yes':
            test.cancel("API acl test not supported in current"
                        " libvirt version.")

    def modify_virtual_daemon(service_path, modify_info):
        """
        Modify libvirtd or virtqemud service

        :param service_path: service path
        :param modify_info: service modify info
        """
        ori_value = process.getoutput("cat %s | grep LimitNOFILE" %
                                      service_path,
                                      shell=True)
        with open(service_path, 'r+') as f:
            content = f.read()
            content = re.sub(ori_value, modify_info, content)
            f.seek(0)
            f.write(content)
            f.truncate()

    if vm_ref == "id":
        vm_ref = domid
    elif vm_ref == "hex_id":
        vm_ref = hex(int(domid))
    elif vm_ref.find("invalid") != -1:
        vm_ref = params.get(vm_ref)
    elif vm_ref == "name":
        vm_ref = "%s %s" % (vm_name, params.get("destroy_extra"))
    elif vm_ref == "uuid":
        vm_ref = domuuid

    if libvirtd == "off":
        utils_libvirtd.libvirtd_stop()

    if vm_ref != "remote":
        if paused_after_start_vm:
            virsh.suspend(vm_ref)
            if not vm.is_paused():
                test.fail("VM suspend failed")
        status = virsh.destroy(vm_ref,
                               ignore_status=True,
                               unprivileged_user=unprivileged_user,
                               uri=uri,
                               debug=True).exit_status
        output = ""

        if start_destroy_times:
            status = 0
            try:
                modify_service = "{}.service".format(
                    utils_libvirtd.Libvirtd().service_name)
                LOG.debug("Modify service {}".format(modify_service))
                service_path = "/usr/lib/systemd/system/{}"\
                               .format(modify_service)
                LOG.debug("Service path is: {}".format(service_path))

                # Backup original libvirtd.service
                backup_file = os.path.join(data_dir.get_tmp_dir(),
                                           "{}-bak".format(modify_service))
                shutil.copy(service_path, backup_file)

                # Decrease domain number to speed up test
                modify_virtual_daemon(service_path, limit_nofile)
                process.run("systemctl daemon-reload")
                utils_libvirtd.Libvirtd(modify_service).restart()

                # Keep start/destroy guest to see whether domain unix socket
                # was cleaned up.
                for i in range(int(start_destroy_times)):
                    LOG.debug("Start guest {} times".format(i))
                    ret = virsh.start(vm_name)
                    if ret.exit_status:
                        test.fail("Failed to start guest: {}".format(
                            ret.stderr_text))
                    virsh.destroy(vm_name)
            finally:
                # Recover libvirtd.service
                shutil.copy(backup_file, service_path)
                process.run("systemctl daemon-reload")
                utils_libvirtd.Libvirtd(modify_service).restart()

    else:
        status = 0
        try:
            remote_uri = libvirt_vm.complete_uri(local_ip)
            session = remote.remote_login("ssh", remote_ip, "22", "root",
                                          remote_pwd, "#")
            session.cmd_output('LANG=C')

            # Setup up remote to remote login in local host
            ssh_key.setup_remote_ssh_key(remote_ip, "root", remote_pwd,
                                         local_ip, "root", local_pwd)

            command = "virsh -c %s destroy %s" % (remote_uri, vm_name)
            status, output = session.cmd_status_output(command,
                                                       internal_timeout=5)
            session.close()
        except process.CmdError:
            status = 1

    if libvirtd == "off":
        utils_libvirtd.libvirtd_start()

    # Test the read_only mode
    if destroy_readonly:
        result = virsh.destroy(vm_ref,
                               ignore_status=True,
                               debug=True,
                               readonly=True)
        libvirt.check_exit_status(result, expect_error=True)
        # This is for status_error check
        status = result.exit_status

    # check status_error
    if status_error == "yes":
        if status == 0:
            test.fail("Run successfully with wrong command! "
                      "Output:\n%s" % output)
    elif status_error == "no":
        if status != 0:
            test.fail("Run failed with right command! Output:\n%s" % output)
예제 #44
0
def run_virsh_domstate(test, params, env):
    """
    Test command: virsh domstate.

    1.Prepare test environment.
    2.When the libvirtd == "off", stop the libvirtd service.
    3.Perform virsh domstate operation.
    4.Recover test environment.
    5.Confirm the test result.
    """
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)

    libvirtd = params.get("libvirtd", "on")
    vm_ref = params.get("domstate_vm_ref")
    status_error = params.get("status_error", "no")

    domid = vm.get_id()
    domuuid = vm.get_uuid()

    if vm_ref == "id":
        vm_ref = domid
    elif vm_ref == "hex_id":
        vm_ref = hex(int(domid))
    elif vm_ref.find("invalid") != -1:
        vm_ref = params.get(vm_ref)
    elif vm_ref == "name":
        vm_ref = "%s %s" % (vm_name, params.get("domstate_extra"))
    elif  vm_ref == "uuid":
        vm_ref = domuuid

    if libvirtd == "off":
        libvirt_vm.libvirtd_stop()

    if vm_ref == "remote":
        remote_ip = params.get("remote_ip", "REMOTE.EXAMPLE.COM")
        local_ip = params.get("local_ip", "LOCAL.EXAMPLE.COM")
        remote_pwd = params.get("remote_pwd", None)
        if remote_ip.count("EXAMPLE.COM") or local_ip.count("EXAMPLE.COM"):
            raise error.TestNAError("Test 'remote' parameters not setup")
        status = 0
        try:
            remote_uri = libvirt_vm.complete_uri(local_ip)
            session = remote.remote_login("ssh", remote_ip, "22", "root",
                                          remote_pwd, "#")
            session.cmd_output('LANG=C')
            command = "virsh -c %s domstate %s" % (remote_uri, vm_name)
            status, output = session.cmd_status_output(command,
                                                       internal_timeout=5)
            session.close()
        except error.CmdError:
            status = 1
    else:
        result = virsh.domstate(vm_ref, ignore_status=True)
        status = result.exit_status
        output = result.stdout

    #recover libvirtd service start
    if libvirtd == "off":
        libvirt_vm.libvirtd_start()

    #check status_error
    if status_error == "yes":
        if status == 0:
            raise error.TestFail("Run successfully with wrong command!")
    elif status_error == "no":
        if status != 0 or output == "":
            raise error.TestFail("Run failed with right command")
        if vm_ref == "remote":
            if not (re.match("running", output) or re.match("blocked", output)
                 or re.match("idle", output)):
                raise error.TestFail("Run failed with right command")
예제 #45
0
def run(test, params, env):
    """
    Test command: virsh domstate.

    1.Prepare test environment.
    2.When the libvirtd == "off", stop the libvirtd service.
    3.Perform virsh domstate operation.
    4.Recover test environment.
    5.Confirm the test result.
    """
    vm_name = params.get("main_vm", "avocado-vt-vm1")
    vm = env.get_vm(vm_name)

    libvirtd_state = params.get("libvirtd", "on")
    vm_ref = params.get("domstate_vm_ref")
    status_error = (params.get("status_error", "no") == "yes")
    extra = params.get("domstate_extra", "")
    vm_action = params.get("domstate_vm_action", "")
    vm_oncrash_action = params.get("domstate_vm_oncrash")
    reset_action = "yes" == params.get("reset_action", "no")
    dump_option = params.get("dump_option", "")
    start_action = params.get("start_action", "normal")
    kill_action = params.get("kill_action", "normal")
    check_libvirtd_log = params.get("check_libvirtd_log", "no")
    err_msg = params.get("err_msg", "")

    domid = vm.get_id()
    domuuid = vm.get_uuid()

    if vm_ref == "id":
        vm_ref = domid
    elif vm_ref == "hex_id":
        vm_ref = hex(int(domid))
    elif vm_ref.find("invalid") != -1:
        vm_ref = params.get(vm_ref)
    elif vm_ref == "name":
        vm_ref = vm_name
    elif vm_ref == "uuid":
        vm_ref = domuuid

    # Back up xml file.
    vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    backup_xml = vmxml.copy()

    # Back up qemu.conf
    qemu_conf = utils_config.LibvirtQemuConfig()
    libvirtd = utils_libvirtd.Libvirtd()

    # Config libvirtd log
    if check_libvirtd_log == "yes":
        libvirtd_conf = utils_config.LibvirtdConfig()
        libvirtd_log_file = os.path.join(test.tmpdir, "libvirtd.log")
        libvirtd_conf["log_level"] = '1'
        libvirtd_conf["log_filters"] = ('"1:json 1:libvirt 1:qemu 1:monitor '
                                        '3:remote 4:event"')
        libvirtd_conf["log_outputs"] = '"1:file:%s"' % libvirtd_log_file
        logging.debug("the libvirtd config file content is:\n %s" %
                      libvirtd_conf)
        libvirtd.restart()

    # Get image file
    image_source = vm.get_first_disk_devices()['source']
    logging.debug("image source: %s" % image_source)
    new_image_source = image_source + '.rename'

    dump_path = os.path.join(test.tmpdir, "dump/")
    logging.debug("dump_path: %s", dump_path)
    try:
        os.mkdir(dump_path)
    except OSError:
        # If the path already exists then pass
        pass
    dump_file = ""
    try:
        # Let's have guest memory less so that dumping core takes
        # time which doesn't timeout the testcase
        if vm_oncrash_action in ['coredump-destroy', 'coredump-restart']:
            memory_value = int(params.get("memory_value", "2097152"))
            memory_unit = params.get("memory_unit", "KiB")
            vmxml.set_memory(memory_value)
            vmxml.set_memory_unit(memory_unit)
            logging.debug(vmxml)
            vmxml.sync()

        if vm_action == "crash":
            if vm.is_alive():
                vm.destroy(gracefully=False)
            vmxml.on_crash = vm_oncrash_action
            if not vmxml.xmltreefile.find('devices').findall('panic'):
                # Add <panic> device to domain
                panic_dev = Panic()
                if "ppc" not in platform.machine():
                    panic_dev.addr_type = "isa"
                    panic_dev.addr_iobase = "0x505"
                vmxml.add_device(panic_dev)
            vmxml.sync()
            # Config auto_dump_path in qemu.conf
            qemu_conf.auto_dump_path = dump_path
            libvirtd.restart()
            if vm_oncrash_action in ['coredump-destroy', 'coredump-restart']:
                dump_file = dump_path + "*" + vm_name[:20] + "-*"
            # Start VM and check the panic device
            virsh.start(vm_name, ignore_status=False)
            vmxml_new = vm_xml.VMXML.new_from_dumpxml(vm_name)
            # Skip this test if no panic device find
            if not vmxml_new.xmltreefile.find('devices').findall('panic'):
                test.cancel("No 'panic' device in the guest. Maybe your "
                            "libvirt version doesn't support it.")
        try:
            if vm_action == "suspend":
                virsh.suspend(vm_name, ignore_status=False)
            elif vm_action == "resume":
                virsh.suspend(vm_name, ignore_status=False)
                virsh.resume(vm_name, ignore_status=False)
            elif vm_action == "destroy":
                virsh.destroy(vm_name, ignore_status=False)
            elif vm_action == "start":
                virsh.destroy(vm_name, ignore_status=False)
                if start_action == "rename":
                    # rename the guest image file to make guest fail to start
                    os.rename(image_source, new_image_source)
                    virsh.start(vm_name, ignore_status=True)
                else:
                    virsh.start(vm_name, ignore_status=False)
            elif vm_action == "kill":
                if kill_action == "stop_libvirtd":
                    libvirtd.stop()
                    utils_misc.kill_process_by_pattern(vm_name)
                    libvirtd.restart()
                elif kill_action == "reboot_vm":
                    virsh.reboot(vm_name, ignore_status=False)
                    utils_misc.kill_process_tree(vm.get_pid(), signal.SIGKILL)
                else:
                    utils_misc.kill_process_tree(vm.get_pid(), signal.SIGKILL)
            elif vm_action == "crash":
                session = vm.wait_for_login()
                session.cmd("service kdump stop", ignore_all_errors=True)
                # Enable sysRq
                session.cmd("echo 1 > /proc/sys/kernel/sysrq")
                # Send key ALT-SysRq-c to crash VM, and command will not
                # return as vm crashed, so fail early for 'destroy' and
                # 'preserve' action. For 'restart', 'coredump-restart'
                # and 'coredump-destroy' actions, they all need more time
                # to dump core file or restart OS, so using the default
                # session command timeout(60s)
                try:
                    if vm_oncrash_action in ['destroy', 'preserve']:
                        timeout = 3
                    else:
                        timeout = 60
                    session.cmd("echo c > /proc/sysrq-trigger",
                                timeout=timeout)
                except (ShellTimeoutError, ShellProcessTerminatedError):
                    pass
                session.close()
            elif vm_action == "dump":
                dump_file = dump_path + "*" + vm_name + "-*"
                virsh.dump(vm_name,
                           dump_file,
                           dump_option,
                           ignore_status=False)
        except process.CmdError as detail:
            test.error("Guest prepare action error: %s" % detail)

        if libvirtd_state == "off":
            libvirtd.stop()

        if vm_ref == "remote":
            remote_ip = params.get("remote_ip", "REMOTE.EXAMPLE.COM")
            local_ip = params.get("local_ip", "LOCAL.EXAMPLE.COM")
            remote_pwd = params.get("remote_pwd", None)
            if remote_ip.count("EXAMPLE.COM") or local_ip.count("EXAMPLE.COM"):
                test.cancel("Test 'remote' parameters not setup")
            status = 0
            try:
                remote_uri = libvirt_vm.complete_uri(local_ip)
                session = remote.remote_login("ssh", remote_ip, "22", "root",
                                              remote_pwd, "#")
                session.cmd_output('LANG=C')
                command = "virsh -c %s domstate %s" % (remote_uri, vm_name)
                status, output = session.cmd_status_output(command,
                                                           internal_timeout=5)
                session.close()
            except process.CmdError:
                status = 1
        else:
            result = virsh.domstate(vm_ref,
                                    extra,
                                    ignore_status=True,
                                    debug=True)
            status = result.exit_status
            output = result.stdout.strip()

        # check status_error
        if status_error:
            if not status:
                test.fail("Run successfully with wrong command!")
        else:
            if status or not output:
                test.fail("Run failed with right command")
            if extra.count("reason"):
                if vm_action == "suspend":
                    # If not, will cost long time to destroy vm
                    virsh.destroy(vm_name)
                    if not output.count("user"):
                        test.fail(err_msg % vm_action)
                elif vm_action == "resume":
                    if not output.count("unpaused"):
                        test.fail(err_msg % vm_action)
                elif vm_action == "destroy":
                    if not output.count("destroyed"):
                        test.fail(err_msg % vm_action)
                elif vm_action == "start":
                    if start_action == "rename":
                        if not output.count("shut off (failed)"):
                            test.fail(err_msg % vm_action)
                    else:
                        if not output.count("booted"):
                            test.fail(err_msg % vm_action)
                elif vm_action == "kill":
                    if not output.count("crashed"):
                        test.fail(err_msg % vm_action)
                elif vm_action == "crash":
                    if not check_crash_state(output, vm_oncrash_action,
                                             vm_name, dump_file):
                        test.fail(err_msg % vm_action)
                    # VM will be in preserved state, perform virsh reset
                    # and check VM reboots and domstate reflects running
                    # state from crashed state as bug is observed here
                    if vm_oncrash_action == "preserve" and reset_action:
                        virsh_dargs = {'debug': True, 'ignore_status': True}
                        ret = virsh.reset(vm_name, **virsh_dargs)
                        libvirt.check_exit_status(ret)
                        ret = virsh.domstate(vm_name, extra,
                                             **virsh_dargs).stdout.strip()
                        if "paused (crashed)" not in ret:
                            test.fail("vm fails to change state from crashed"
                                      " to paused after virsh reset")
                        # it will be in paused (crashed) state after reset
                        # and resume is required for the vm to reboot
                        ret = virsh.resume(vm_name, **virsh_dargs)
                        libvirt.check_exit_status(ret)
                        vm.wait_for_login()
                        cmd_output = virsh.domstate(vm_name,
                                                    '--reason').stdout.strip()
                        if "running" not in cmd_output:
                            test.fail("guest state failed to get updated")
                    if vm_oncrash_action in [
                            'coredump-destroy', 'coredump-restart'
                    ]:
                        if not find_dump_file:
                            test.fail("Core dump file is not created in dump "
                                      "path: %s" % dump_path)
                    # For cover bug 1178652
                    if (vm_oncrash_action == "rename-restart"
                            and check_libvirtd_log == "yes"):
                        libvirtd.restart()
                        if not os.path.exists(libvirtd_log_file):
                            test.fail("Expected VM log file: %s not exists" %
                                      libvirtd_log_file)
                        cmd = ("grep -nr '%s' %s" %
                               (err_msg, libvirtd_log_file))
                        if not process.run(cmd, ignore_status=True,
                                           shell=True).exit_status:
                            test.fail(
                                "Find error message %s from log file: %s." %
                                (err_msg, libvirtd_log_file))
                elif vm_action == "dump":
                    if dump_option == "--live":
                        if not output.count("running (unpaused)"):
                            test.fail(err_msg % vm_action)
                    elif dump_option == "--crash":
                        if not output.count("shut off (crashed)"):
                            test.fail(err_msg % vm_action)
            if vm_ref == "remote":
                if not (re.search("running", output) or re.search(
                        "blocked", output) or re.search("idle", output)):
                    test.fail("Run failed with right command")
    finally:
        qemu_conf.restore()
        if check_libvirtd_log == "yes":
            libvirtd_conf.restore()
            if os.path.exists(libvirtd_log_file):
                os.remove(libvirtd_log_file)
        libvirtd.restart()
        if vm_action == "start" and start_action == "rename":
            os.rename(new_image_source, image_source)
        if vm.is_alive():
            vm.destroy(gracefully=False)
        backup_xml.sync()
        if os.path.exists(dump_path):
            shutil.rmtree(dump_path)
예제 #46
0
def run(test, params, env):
    """
    Test command: virsh reboot.

    Run a reboot command in the target domain.
    1.Prepare test environment.
    2.When the libvirtd == "off", stop the libvirtd service.
    3.Perform virsh reboot operation.
    4.Recover test environment.(libvirts service)
    5.Confirm the test result.
    """

    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)

    # run test case
    libvirtd = params.get("libvirtd", "on")
    vm_ref = params.get("reboot_vm_ref")
    status_error = ("yes" == params.get("status_error"))
    extra = params.get("reboot_extra")
    remote_ip = params.get("remote_ip", "REMOTE.EXAMPLE.COM")
    local_ip = params.get("local_ip", "LOCAL.EXAMPLE.COM")
    remote_pwd = params.get("remote_pwd", "password")
    agent = ("yes" == params.get("reboot_agent", "no"))
    mode = params.get("reboot_mode", "")
    pre_domian_status = params.get("reboot_pre_domian_status", "running")
    xml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    try:
        # Add or remove qemu-agent from guest before test
        if agent:
            vm_xml.VMXML.set_agent_channel(vm_name)
        else:
            vm_xml.VMXML.remove_agent_channel(vm_name)

        virsh.start(vm_name)
        guest_session = vm.wait_for_login()
        if agent:
            if guest_session.cmd_status("which qemu-ga"):
                raise error.TestNAError("Cannot execute this test for domain"
                                        " doesn't have qemu-ga command!")
            # check if the qemu-guest-agent is active or not firstly
            stat_ps = guest_session.cmd_status("ps aux |grep [q]emu-ga")
            if stat_ps != 0:
                s, o = guest_session.cmd_status_output("qemu-ga -d")
                if s != 0:
                    raise error.TestError("'qemu-ga -d' failed.\noutput:%s" % o)
            stat_ps = guest_session.cmd_status("ps aux |grep [q]emu-ga")
            guest_session.close()
            if stat_ps:
                raise error.TestError("Fail to start qemu-guest-agent!")
        if pre_domian_status == "shutoff":
            virsh.destroy(vm_name)
        if libvirtd == "off":
            utils_libvirtd.libvirtd_stop()

        domid = vm.get_id()
        domuuid = vm.get_uuid()
        if vm_ref == "id":
            vm_ref = domid
        elif vm_ref == "name":
            vm_ref = vm_name
        elif vm_ref == "uuid":
            vm_ref = domuuid
        elif vm_ref == "hex_id":
            vm_ref = hex(int(domid))
        elif vm_ref.find("invalid") != -1:
            vm_ref = params.get(vm_ref)
        elif vm_ref == "remote_name":
            if remote_ip.count("EXAMPLE.COM") or local_ip.count("EXAMPLE.COM"):
                raise error.TestNAError("remote_ip and/or local_ip parameters"
                                        " not changed from default values")
            complete_uri = libvirt_vm.complete_uri(local_ip)
            try:
                session = remote.remote_login("ssh", remote_ip, "22", "root",
                                              remote_pwd, "#")
                session.cmd_output('LANG=C')
                command = "virsh -c %s reboot %s %s" % (complete_uri, vm_name,
                                                        mode)
                status, output = session.cmd_status_output(command,
                                                           internal_timeout=5)
                session.close()
            # FIXME: Catch specific exception
            except Exception, detail:
                logging.error("Exception: %s", str(detail))
                status = -1
        if vm_ref != "remote_name":
            vm_ref = "%s %s" % (vm_ref, extra)
            cmdresult = virsh.reboot(vm_ref, mode,
                                     ignore_status=True, debug=True)
            status = cmdresult.exit_status
            if status:
                logging.debug("Error status, cmd error: %s", cmdresult.stderr)
                if not virsh.has_command_help_match('reboot', '\s+--mode\s+'):
                    # old libvirt doesn't support reboot
                    status = -2
        output = virsh.dom_list(ignore_status=True).stdout.strip()

        # recover libvirtd service start
        if libvirtd == "off":
            utils_libvirtd.libvirtd_start()

        # check status_error
        if status_error:
            if not status:
                raise error.TestFail("Run successfully with wrong command!")
        else:
            if status or (not re.search(vm_name, output)):
                if status == -2:
                    raise error.TestNAError(
                        "Reboot command doesn't work on older libvirt versions")
                raise error.TestFail("Run failed with right command")
예제 #47
0
def run(test, params, env):
    """
    Test storage migration
    1) Do storage migration(copy-storage-all/copy-storage-inc) with
    TLS encryption - NBD transport
    2) Cancel storage migration with TLS encryption
    3) Copy only the top image for storage migration with backing chain
    4) Migrate vm with copy storage - Native TLS(--tls) - inconsistent CN and
        server hostname
    5) Migrate vm with copy storage over TCP transport - Specified IP
    6) Migrate vm with copy storage over TCP transport - Specified IP+Port
    7) Migrate vm with copy storage over TCP transport - Specified disks_uri

    :param test: test object
    :param params: Dictionary with the test parameters
    :param env: Dictionary with test environment.
    """
    def prepare_nfs_backingfile(vm, params):
        """
        Create an image using nfs type backing_file

        :param vm: The guest
        :param params: the parameters used
        """
        mnt_path_name = params.get("nfs_mount_dir", "nfs-mount")
        exp_opt = params.get("export_options", "rw,no_root_squash,fsid=0")
        exp_dir = params.get("export_dir", "nfs-export")
        backingfile_img = params.get("source_dist_img", "nfs-img")
        disk_format = params.get("disk_format", "qcow2")
        img_name = params.get("img_name", "test.img")
        precreation = "yes" == params.get("precreation", "yes")
        vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm.name)
        disk_xml = vmxml.devices.by_device_tag('disk')[0]
        src_disk_format = disk_xml.xmltreefile.find('driver').get('type')
        first_disk = vm.get_first_disk_devices()
        blk_source = first_disk['source']
        disk_img = os.path.join(os.path.dirname(blk_source), img_name)

        res = libvirt.setup_or_cleanup_nfs(True,
                                           mnt_path_name,
                                           is_mount=True,
                                           export_options=exp_opt,
                                           export_dir=exp_dir)
        mnt_path = res["mount_dir"]
        params["selinux_status_bak"] = res["selinux_status_bak"]

        if vm.is_alive():
            vm.destroy(gracefully=False)

        disk_cmd = ("qemu-img convert -f %s -O %s %s %s/%s" %
                    (src_disk_format, disk_format, blk_source, mnt_path,
                     backingfile_img))
        process.run(disk_cmd, ignore_status=False, verbose=True)
        local_image_list.append("%s/%s" % (mnt_path, backingfile_img))
        logging.debug("Create a local image backing on NFS.")
        disk_cmd = (
            "qemu-img create -f %s -F %s -b %s/%s %s" %
            (disk_format, disk_format, mnt_path, backingfile_img, disk_img))
        process.run(disk_cmd, ignore_status=False, verbose=True)
        local_image_list.append(disk_img)
        if precreation:
            logging.debug("Create an image backing on NFS on remote host.")
            remote_session = remote.remote_login("ssh", server_ip, "22",
                                                 server_user, server_pwd,
                                                 r'[$#%]')
            utils_misc.make_dirs(os.path.dirname(blk_source), remote_session)
            status, stdout = utils_misc.cmd_status_output(
                disk_cmd, session=remote_session)
            logging.debug("status: {}, stdout: {}".format(status, stdout))
            remote_image_list.append("%s/%s" % (mnt_path, backingfile_img))
            remote_image_list.append(disk_img)
            remote_session.close()

        params.update({
            'disk_source_name': disk_img,
            'disk_type': 'file',
            'disk_source_protocol': 'file'
        })
        libvirt.set_vm_disk(vm, params)

    migration_test = migration.MigrationTest()
    migration_test.check_parameters(params)

    # Local variables
    server_ip = params["server_ip"] = params.get("remote_ip")
    server_user = params["server_user"] = params.get("remote_user", "root")
    server_pwd = params["server_pwd"] = params.get("remote_pwd")
    client_ip = params["client_ip"] = params.get("local_ip")
    client_pwd = params["client_pwd"] = params.get("local_pwd")
    virsh_options = params.get("virsh_options", "")
    copy_storage_option = params.get("copy_storage_option")
    extra = params.get("virsh_migrate_extra", "")
    options = params.get("virsh_migrate_options", "--live --verbose")
    backingfile_type = params.get("backingfile_type")
    check_str_local_log = params.get("check_str_local_log", "")
    disk_format = params.get("disk_format", "qcow2")
    log_file = params.get("log_outputs", "/var/log/libvirt/libvirtd.log")
    daemon_conf_dict = eval(params.get("daemon_conf_dict", '{}'))
    cancel_migration = "yes" == params.get("cancel_migration", "no")
    check_disks_port = "yes" == params.get("check_disks_port", "no")
    migrate_again = "yes" == params.get("migrate_again", "no")
    precreation = "yes" == params.get("precreation", "yes")
    tls_recovery = "yes" == params.get("tls_auto_recovery", "yes")
    status_error = "yes" == params.get("status_error", "no")

    local_image_list = []
    remote_image_list = []
    tls_obj = None

    action_during_mig = None
    daemon_conf = None
    mig_result = None
    remote_session = None
    vm_session = None
    remove_dict = {}
    src_libvirt_file = None

    libvirt_version.is_libvirt_feature_supported(params)

    # params for migration connection
    params["virsh_migrate_desturi"] = libvirt_vm.complete_uri(
        params.get("migrate_dest_host"))
    dest_uri = params.get("virsh_migrate_desturi")

    vm_name = params.get("migrate_main_vm")
    vm = env.get_vm(vm_name)
    vm.verify_alive()

    extra = "{} {}".format(extra, copy_storage_option)
    extra_args = migration_test.update_virsh_migrate_extra_args(params)
    if cancel_migration:
        action_during_mig = migration_test.do_cancel
    elif check_disks_port:
        action_during_mig = libvirt_network.check_established

    # For safety reasons, we'd better back up  xmlfile.
    vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    orig_config_xml = vmxml.copy()

    try:
        if backingfile_type:
            if backingfile_type == "nfs":
                prepare_nfs_backingfile(vm, params)
        if extra.count("copy-storage-all") and precreation:
            blk_source = vm.get_first_disk_devices()['source']
            vsize = utils_misc.get_image_info(blk_source).get("vsize")
            remote_session = remote.remote_login("ssh", server_ip, "22",
                                                 server_user, server_pwd,
                                                 r'[$#%]')
            utils_misc.make_dirs(os.path.dirname(blk_source), remote_session)
            disk_cmd = ("qemu-img create -f %s %s %s" %
                        (disk_format, blk_source, vsize))
            status, stdout = utils_misc.cmd_status_output(
                disk_cmd, session=remote_session)
            logging.debug("status: {}, stdout: {}".format(status, stdout))
            remote_image_list.append(blk_source)
            remote_session.close()

        # Update libvirtd configuration
        if daemon_conf_dict:
            if os.path.exists(log_file):
                os.remove(log_file)
            daemon_conf = libvirt.customize_libvirt_config(daemon_conf_dict)

        if extra.count("--tls"):
            tls_obj = TLSConnection(params)
            if tls_recovery:
                tls_obj.auto_recover = True
                tls_obj.conn_setup()

        if not vm.is_alive():
            vm.start()

        logging.debug("Guest xml after starting:\n%s",
                      vm_xml.VMXML.new_from_dumpxml(vm_name))
        # Check local guest network connection before migration
        vm_session = vm.wait_for_login(restart_network=True)
        migration_test.ping_vm(vm, params)

        remove_dict = {"do_search": '{"%s": "ssh:/"}' % dest_uri}
        src_libvirt_file = libvirt_config.remove_key_for_modular_daemon(
            remove_dict)

        # Execute migration process
        vms = [vm]

        migration_test.do_migration(vms,
                                    None,
                                    dest_uri,
                                    'orderly',
                                    options,
                                    thread_timeout=900,
                                    ignore_status=True,
                                    virsh_opt=virsh_options,
                                    extra_opts=extra,
                                    func=action_during_mig,
                                    **extra_args)

        mig_result = migration_test.ret

        if migrate_again and status_error:
            logging.debug(
                "Sleeping 10 seconds before rerunning the migration.")
            time.sleep(10)
            if cancel_migration:
                action_during_mig = None
            extra_args["status_error"] = "no"
            migration_test.do_migration(vms,
                                        None,
                                        dest_uri,
                                        'orderly',
                                        options,
                                        thread_timeout=1200,
                                        ignore_status=True,
                                        virsh_opt=virsh_options,
                                        extra_opts=extra,
                                        func=action_during_mig,
                                        **extra_args)

            mig_result = migration_test.ret
        if int(mig_result.exit_status) == 0:
            migration_test.ping_vm(vm, params, uri=dest_uri)

        if check_str_local_log:
            libvirt.check_logfile(check_str_local_log, log_file)

    finally:
        logging.debug("Recover test environment")
        # Clean VM on destination and source
        migration_test.cleanup_vm(vm, dest_uri)
        orig_config_xml.sync()

        if daemon_conf:
            logging.debug("Recover the configurations")
            libvirt.customize_libvirt_config(None,
                                             is_recover=True,
                                             config_object=daemon_conf)
        if src_libvirt_file:
            src_libvirt_file.restore()

        if tls_obj:
            logging.debug("Clean up local objs")
            del tls_obj
        for source_file in local_image_list:
            libvirt.delete_local_disk("file", path=source_file)
        for img in remote_image_list:
            remote.run_remote_cmd("rm -rf %s" % img, params)

        if remote_session:
            remote_session.close()
예제 #48
0
def setup_remote_ssh_key(hostname1,
                         user1,
                         password1,
                         hostname2=None,
                         user2=None,
                         password2=None,
                         port=22,
                         config_options=None,
                         public_key="dsa"):
    """
    Setup up remote to remote login in another server by using public key
    If hostname2 is not supplied, setup to local.

    :param hostname1: the server wants to login other host
    :param hostname2: the server to be logged in
    :type hostname: str
    :param user: user to login
    :type user: str
    :param password: password
    :type password: str
    :param port: port number
    :type port: int
    :param config_options: list of options eg: ["StrictHostKeyChecking=no"]
    :type config_options: list of str
    """
    logging.debug('Performing SSH key setup on %s:%d as %s.' %
                  (hostname1, port, user1))

    try:
        session1 = remote.remote_login(client='ssh',
                                       host=hostname1,
                                       port=port,
                                       username=user1,
                                       password=password1,
                                       prompt=r'[$#%]')
        public_key = get_remote_public_key(session1, public_key=public_key)

        if hostname2 is None:
            # Simply create a session to local
            session2 = aexpect.ShellSession("sh", linesep='\n', prompt='#')
            # set config in local machine
            if config_options:
                for each_option in config_options:
                    session2.cmd_output("echo '%s' >> ~/.ssh/config" %
                                        each_option)
        else:
            session2 = remote.remote_login(client='ssh',
                                           host=hostname2,
                                           port=port,
                                           username=user2,
                                           password=password2,
                                           prompt=r'[$#%]')
            # set config in remote machine
            if config_options:
                for each_option in config_options:
                    session1.cmd_output("echo '%s' >> ~/.ssh/config" %
                                        each_option)
        session2.cmd_output('mkdir -p ~/.ssh')
        session2.cmd_output('chmod 700 ~/.ssh')
        session2.cmd_output("echo '%s' >> ~/.ssh/authorized_keys; " %
                            public_key)
        session2.cmd_output('chmod 600 ~/.ssh/authorized_keys')
        logging.debug('SSH key setup on %s complete.', session2)
    except Exception as err:
        logging.debug('SSH key setup has failed: %s', err)
        try:
            session1.close()
            session2.close()
        except Exception:
            pass
예제 #49
0
def run_virsh_undefine(test, params, env):
    """
    Test virsh undefine command.

    Undefine an inactive domain, or convert persistent to transient.
    1.Prepare test environment.
    2.Backup the VM's information to a xml file.
    3.When the libvirtd == "off", stop the libvirtd service.
    4.Perform virsh undefine operation.
    5.Recover test environment.(libvirts service,VM)
    6.Confirm the test result.
    """

    vm_ref = params.get("undefine_vm_ref", "vm_name")
    extra = params.get("undefine_extra", "")
    libvirtd_state = params.get("libvirtd", "on")
    status_error = params.get("status_error")
    undefine_twice = params.get("undefine_twice", 'no')
    local_ip = params.get("local_ip", "LOCAL.EXAMPLE.COM")
    remote_ip = params.get("remote_ip", "REMOTE.EXAMPLE.COM")
    xml_file = os.path.join(test.tmpdir, 'tmp.xml')
    remote_user = params.get("remote_user", "user")
    remote_password = params.get("remote_password", "password")
    remote_prompt = params.get("remote_prompt", "#")

    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    vm_id = vm.get_id()
    vm_uuid = vm.get_uuid()

    # Back up xml file.Xen host has no guest xml file to define a guset.
    virsh.dumpxml(vm_name, extra="", to_file=xml_file)

    # Confirm how to reference a VM.
    if vm_ref == "vm_name":
        vm_ref = vm_name
    elif vm_ref == "id":
        vm_ref = vm_id
    elif vm_ref == "hex_vm_id":
        vm_ref = hex(int(vm_id))
    elif vm_ref == "uuid":
        vm_ref = vm_uuid
    elif vm_ref.find("invalid") != -1:
        vm_ref = params.get(vm_ref)

    # Turn libvirtd into certain state.
    if libvirtd_state == "off":
        utils_libvirtd.libvirtd_stop()

    # Test virsh undefine command.
    status = 0
    try:
        uri = libvirt_vm.complete_uri(local_ip)
    except error.CmdError:
        status = 1
        uri = None
    if vm_ref != "remote":
        vm_ref = "%s %s" % (vm_ref, extra)
        cmdresult = virsh.undefine(vm_ref, uri=uri,
                                   ignore_status=True, debug=True)
        status = cmdresult.exit_status
        if status:
            logging.debug("Error status, command output: %s", cmdresult.stdout)
        if undefine_twice == "yes":
            status2 = virsh.undefine(vm_ref, uri=uri,
                                     ignore_status=True).exit_status
    else:
        if remote_ip.count("EXAMPLE.COM") or local_ip.count("EXAMPLE.COM"):
            raise error.TestNAError("remote_ip and/or local_ip parameters not"
                                    " changed from default values")
        session = remote.remote_login("ssh", remote_ip, "22", remote_user,
                                      remote_password, remote_prompt)
        cmd_undefine = "virsh -c %s undefine %s" % (uri, vm_name)
        status, output = session.cmd_status_output(cmd_undefine)
        logging.info("Undefine output: %s", output)

    # Recover libvirtd state.
    if libvirtd_state == "off":
        utils_libvirtd.libvirtd_start()

    # Shutdown VM.
    if virsh.domain_exists(vm.name, uri=uri):
        try:
            if vm.is_alive():
                vm.destroy()
        except error.CmdError, detail:
            logging.error("Detail: %s", detail)
예제 #50
0
    def prepare_nfs_backingfile(vm, params):
        """
        Create an image using nfs type backing_file

        :param vm: The guest
        :param params: the parameters used
        """
        mnt_path_name = params.get("nfs_mount_dir", "nfs-mount")
        exp_opt = params.get("export_options", "rw,no_root_squash,fsid=0")
        exp_dir = params.get("export_dir", "nfs-export")
        backingfile_img = params.get("source_dist_img", "nfs-img")
        disk_format = params.get("disk_format", "qcow2")
        img_name = params.get("img_name", "test.img")
        precreation = "yes" == params.get("precreation", "yes")
        vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm.name)
        disk_xml = vmxml.devices.by_device_tag('disk')[0]
        src_disk_format = disk_xml.xmltreefile.find('driver').get('type')
        first_disk = vm.get_first_disk_devices()
        blk_source = first_disk['source']
        disk_img = os.path.join(os.path.dirname(blk_source), img_name)

        res = libvirt.setup_or_cleanup_nfs(True,
                                           mnt_path_name,
                                           is_mount=True,
                                           export_options=exp_opt,
                                           export_dir=exp_dir)
        mnt_path = res["mount_dir"]
        params["selinux_status_bak"] = res["selinux_status_bak"]

        if vm.is_alive():
            vm.destroy(gracefully=False)

        disk_cmd = ("qemu-img convert -f %s -O %s %s %s/%s" %
                    (src_disk_format, disk_format, blk_source, mnt_path,
                     backingfile_img))
        process.run(disk_cmd, ignore_status=False, verbose=True)
        local_image_list.append("%s/%s" % (mnt_path, backingfile_img))
        logging.debug("Create a local image backing on NFS.")
        disk_cmd = (
            "qemu-img create -f %s -F %s -b %s/%s %s" %
            (disk_format, disk_format, mnt_path, backingfile_img, disk_img))
        process.run(disk_cmd, ignore_status=False, verbose=True)
        local_image_list.append(disk_img)
        if precreation:
            logging.debug("Create an image backing on NFS on remote host.")
            remote_session = remote.remote_login("ssh", server_ip, "22",
                                                 server_user, server_pwd,
                                                 r'[$#%]')
            utils_misc.make_dirs(os.path.dirname(blk_source), remote_session)
            status, stdout = utils_misc.cmd_status_output(
                disk_cmd, session=remote_session)
            logging.debug("status: {}, stdout: {}".format(status, stdout))
            remote_image_list.append("%s/%s" % (mnt_path, backingfile_img))
            remote_image_list.append(disk_img)
            remote_session.close()

        params.update({
            'disk_source_name': disk_img,
            'disk_type': 'file',
            'disk_source_protocol': 'file'
        })
        libvirt.set_vm_disk(vm, params)
예제 #51
0
def run(test, params, env):
    """
    Test command: virsh shutdown.

    The conmand can gracefully shutdown a domain.

    1.Prepare test environment.
    2.When the libvirtd == "off", stop the libvirtd service.
    3.Perform virsh setvcpus operation.
    4.Recover test environment.
    5.Confirm the test result.
    """

    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    vm_ref = params.get("shutdown_vm_ref")
    status_error = ("yes" == params.get("status_error"))
    agent = ("yes" == params.get("shutdown_agent", "no"))
    mode = params.get("shutdown_mode", "")
    pre_domian_status = params.get("reboot_pre_domian_status", "running")
    libvirtd = params.get("libvirtd", "on")
    xml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    timeout = eval(params.get("shutdown_timeout", "60"))
    readonly = "yes" == params.get("shutdown_readonly", "no")
    expect_msg = params.get("shutdown_err_msg")

    # Libvirt acl test related params
    uri = params.get("virsh_uri")
    unprivileged_user = params.get('unprivileged_user')
    if unprivileged_user:
        if unprivileged_user.count('EXAMPLE'):
            unprivileged_user = '******'

    if not libvirt_version.version_compare(1, 1, 1):
        if params.get('setup_libvirt_polkit') == 'yes':
            test.cancel("API acl test not supported in"
                        " current libvirt version.")

    try:
        # Add or remove qemu-agent from guest before test
        vm.prepare_guest_agent(channel=agent, start=agent)
        if pre_domian_status == "shutoff":
            virsh.destroy(vm_name)
        domid = vm.get_id()
        domuuid = vm.get_uuid()
        # run test case
        if vm_ref == "id":
            vm_ref = domid
        elif vm_ref == "hex_id":
            vm_ref = hex(int(domid))
        elif vm_ref.find("invalid") != -1:
            vm_ref = params.get(vm_ref)
        elif vm_ref == "name":
            vm_ref = "%s %s" % (vm_name, params.get("shutdown_extra"))
        elif vm_ref == "uuid":
            vm_ref = domuuid

        if libvirtd == "off":
            utils_libvirtd.libvirtd_stop()

        if vm_ref != "remote":
            result = virsh.shutdown(vm_ref, mode,
                                    unprivileged_user=unprivileged_user,
                                    uri=uri, debug=True,
                                    ignore_status=True,
                                    readonly=readonly)
            status = result.exit_status
        else:
            remote_ip = params.get("remote_ip", "REMOTE.EXAMPLE.COM")
            remote_pwd = params.get("remote_pwd", None)
            remote_user = params.get("remote_user", "root")
            local_ip = params.get("local_ip", "LOCAL.EXAMPLE.COM")
            local_pwd = params.get("local_pwd", "password")
            local_user = params.get("username", "root")
            if remote_ip.count("EXAMPLE.COM") or local_ip.count("EXAMPLE.COM"):
                test.cancel("Remote test parameters"
                            " unchanged from default")
            status = 0
            try:
                remote_uri = libvirt_vm.complete_uri(local_ip)
                # set up auto ssh login from remote machine to
                # execute commands
                config_opt = ["StrictHostKeyChecking=no"]
                ssh_key.setup_remote_ssh_key(remote_ip, remote_user,
                                             remote_pwd, hostname2=local_ip,
                                             user2=local_user,
                                             password2=local_pwd,
                                             config_options=config_opt)
                session = remote.remote_login("ssh", remote_ip, "22", "root",
                                              remote_pwd, "#")
                session.cmd_output('LANG=C')
                command = ("virsh -c %s shutdown %s %s"
                           % (remote_uri, vm_name, mode))
                status = session.cmd_status(command, internal_timeout=5)
                session.close()
            except process.CmdError:
                status = 1

        # recover libvirtd service start
        if libvirtd == "off":
            utils_libvirtd.libvirtd_start()

        # check status_error
        if status_error:
            if not status:
                test.fail("Run successfully with wrong command!")
            if expect_msg:
                libvirt.check_result(result, expect_msg.split(';'))
        else:
            if status:
                test.fail("Run failed with right command")
            if not vm.wait_for_shutdown(timeout):
                test.fail("Failed to shutdown in timeout %s" % timeout)
    finally:
        if utils_misc.wait_for(utils_libvirtd.libvirtd_is_running, 60):
            xml_backup.sync()
예제 #52
0
def run_virsh_shutdown(test, params, env):
    """
    Test command: virsh shutdown.

    The conmand can gracefully shutdown a domain.

    1.Prepare test environment.
    2.When the libvirtd == "off", stop the libvirtd service.
    3.Perform virsh setvcpus operation.
    4.Recover test environment.
    5.Confirm the test result.
    """

    vm_name = params.get("main_vm", "vm1")
    vm = env.get_vm(vm_name)

    domid = vm.get_id()
    domuuid = vm.get_uuid()

    vm_ref = params.get("shutdown_vm_ref")
    libvirtd = params.get("libvirtd", "on")

    #run test case
    if vm_ref == "id":
        vm_ref = domid
    elif vm_ref == "hex_id":
        vm_ref = hex(int(domid))
    elif vm_ref.find("invalid") != -1:
        vm_ref = params.get(vm_ref)
    elif vm_ref == "name":
        vm_ref = "%s %s" % (vm_name, params.get("shutdown_extra"))
    elif vm_ref == "uuid":
        vm_ref = domuuid

    if libvirtd == "off":
        libvirt_vm.libvirtd_stop()

    if vm_ref != "remote":
        status = virsh.shutdown(vm_ref, ignore_status = True).exit_status
    else:
        remote_ip = params.get("remote_ip", None)
        remote_pwd = params.get("remote_pwd", None)
        local_ip = params.get("local_ip", None)
        status = 0
        try:
            remote_uri = libvirt_vm.complete_uri(local_ip)
            session = remote.remote_login("ssh", remote_ip, "22", "root",
                                          remote_pwd, "#")
            session.cmd_output('LANG=C')
            command = "virsh -c %s shutdown %s" % (remote_uri, vm_name)
            status = session.cmd_status(command, internal_timeout=5)
            session.close()
        except error.CmdError:
            status = 1

    #recover libvirtd service start
    if libvirtd == "off":
        libvirt_vm.libvirtd_start()

    #check status_error
    status_error = params.get("status_error")
    if status_error == "yes":
        if status == 0:
            raise error.TestFail("Run successfully with wrong command!")
    elif status_error == "no":
        if status != 0:
            raise error.TestFail("Run failed with right command")
예제 #53
0
def run_whql_submission(test, params, env):
    """
    WHQL submission test:
    1) Log into the client machines and into a DTM server machine
    2) Copy the automation program binary (dsso_test_binary) to the server machine
    3) Run the automation program
    4) Pass the program all relevant parameters (e.g. device_data)
    5) Wait for the program to terminate
    6) Parse and report job results
    (logs and HTML reports are placed in test.debugdir)

    :param test: kvm test object
    :param params: Dictionary with the test parameters
    :param env: Dictionary with test environment.
    """
    # Log into all client VMs
    login_timeout = int(params.get("login_timeout", 360))
    vms = []
    sessions = []
    for vm_name in params.objects("vms"):
        vms.append(env.get_vm(vm_name))
        vms[-1].verify_alive()
        sessions.append(vms[-1].wait_for_login(timeout=login_timeout))

    # Make sure all NICs of all client VMs are up
    for vm in vms:
        nics = vm.params.objects("nics")
        for nic_index in range(len(nics)):
            s = vm.wait_for_login(nic_index, 600)
            s.close()

    # Collect parameters
    server_address = params.get("server_address")
    server_shell_port = int(params.get("server_shell_port"))
    server_file_transfer_port = int(params.get("server_file_transfer_port"))
    server_studio_path = params.get("server_studio_path", "%programfiles%\\ "
                                    "Microsoft Driver Test Manager\\Studio")
    dsso_test_binary = params.get("dsso_test_binary",
                                  "deps/whql_submission_15.exe")
    dsso_test_binary = utils_misc.get_path(test.virtdir, dsso_test_binary)
    dsso_delete_machine_binary = params.get("dsso_delete_machine_binary",
                                            "deps/whql_delete_machine_15.exe")
    dsso_delete_machine_binary = utils_misc.get_path(test.virtdir,
                                                     dsso_delete_machine_binary)
    test_timeout = float(params.get("test_timeout", 600))

    # Copy dsso binaries to the server
    for filename in dsso_test_binary, dsso_delete_machine_binary:
        rss_client.upload(server_address, server_file_transfer_port,
                          filename, server_studio_path, timeout=60)

    # Open a shell session with the server
    server_session = remote.remote_login("nc", server_address,
                                         server_shell_port, "", "",
                                         sessions[0].prompt,
                                         sessions[0].linesep)
    server_session.set_status_test_command(sessions[0].status_test_command)

    # Get the computer names of the server and clients
    cmd = "echo %computername%"
    server_name = server_session.cmd_output(cmd).strip()
    client_names = [session.cmd_output(cmd).strip() for session in sessions]

    # Delete all client machines from the server's data store
    server_session.cmd("cd %s" % server_studio_path)
    for client_name in client_names:
        cmd = "%s %s %s" % (os.path.basename(dsso_delete_machine_binary),
                            server_name, client_name)
        server_session.cmd(cmd, print_func=logging.debug)

    # Reboot the client machines
    sessions = utils_misc.parallel((vm.reboot, (session,))
                                   for vm, session in zip(vms, sessions))

    # Check the NICs again
    for vm in vms:
        nics = vm.params.objects("nics")
        for nic_index in range(len(nics)):
            s = vm.wait_for_login(nic_index, 600)
            s.close()

    # Run whql_pre_command and close the sessions
    if params.get("whql_pre_command"):
        for session in sessions:
            session.cmd(params.get("whql_pre_command"),
                        int(params.get("whql_pre_command_timeout", 600)))
            session.close()

    # Run the automation program on the server
    pool_name = "%s_pool" % client_names[0]
    submission_name = "%s_%s" % (client_names[0],
                                 params.get("submission_name"))
    cmd = "%s %s %s %s %s %s" % (os.path.basename(dsso_test_binary),
                                 server_name, pool_name, submission_name,
                                 test_timeout, " ".join(client_names))
    server_session.sendline(cmd)

    # Helper function: wait for a given prompt and raise an exception if an
    # error occurs
    def find_prompt(prompt):
        m, o = server_session.read_until_last_line_matches(
            [prompt, server_session.prompt], print_func=logging.info,
            timeout=600)
        if m != 0:
            errors = re.findall("^Error:.*$", o, re.I | re.M)
            if errors:
                raise error.TestError(errors[0])
            else:
                raise error.TestError("Error running automation program: "
                                      "could not find '%s' prompt" % prompt)

    # Tell the automation program which device to test
    find_prompt("Device to test:")
    server_session.sendline(params.get("test_device"))

    # Tell the automation program which jobs to run
    find_prompt("Jobs to run:")
    server_session.sendline(params.get("job_filter", ".*"))

    # Set submission DeviceData
    find_prompt("DeviceData name:")
    for dd in params.objects("device_data"):
        dd_params = params.object_params(dd)
        if dd_params.get("dd_name") and dd_params.get("dd_data"):
            server_session.sendline(dd_params.get("dd_name"))
            server_session.sendline(dd_params.get("dd_data"))
    server_session.sendline()

    # Set submission descriptors
    find_prompt("Descriptor path:")
    for desc in params.objects("descriptors"):
        desc_params = params.object_params(desc)
        if desc_params.get("desc_path"):
            server_session.sendline(desc_params.get("desc_path"))
    server_session.sendline()

    # Set machine dimensions for each client machine
    for vm_name in params.objects("vms"):
        vm_params = params.object_params(vm_name)
        find_prompt(r"Dimension name\b.*:")
        for dp in vm_params.objects("dimensions"):
            dp_params = vm_params.object_params(dp)
            if dp_params.get("dim_name") and dp_params.get("dim_value"):
                server_session.sendline(dp_params.get("dim_name"))
                server_session.sendline(dp_params.get("dim_value"))
        server_session.sendline()

    # Set extra parameters for tests that require them (e.g. NDISTest)
    for vm_name in params.objects("vms"):
        vm_params = params.object_params(vm_name)
        find_prompt(r"Parameter name\b.*:")
        for dp in vm_params.objects("device_params"):
            dp_params = vm_params.object_params(dp)
            if dp_params.get("dp_name") and dp_params.get("dp_regex"):
                server_session.sendline(dp_params.get("dp_name"))
                server_session.sendline(dp_params.get("dp_regex"))
                # Make sure the prompt appears again (if the device isn't found
                # the automation program will terminate)
                find_prompt(r"Parameter name\b.*:")
        server_session.sendline()

    # Wait for the automation program to terminate
    try:
        o = server_session.read_up_to_prompt(print_func=logging.info,
                                             timeout=test_timeout + 300)
        # (test_timeout + 300 is used here because the automation program is
        # supposed to terminate cleanly on its own when test_timeout expires)
        done = True
    except aexpect.ExpectError, e:
        o = e.output
        done = False
예제 #54
0
def run(test, params, env):
    """
    Test migration with special network settings
    1) migrate guest with bridge type interface connected to ovs bridge
    2) migrate guest with direct type interface when a macvtap device name
        exists on dest host

    :param test: test object
    :param params: Dictionary with the test parameters
    :param env: Dictionary with test environment.
    """
    def check_vm_network_accessed(ping_dest, session=None):
        """
        The operations to the VM need to be done before or after
        migration happens

        :param ping_dest: The destination to be ping
        :param session: The session object to the host
        :raise: test.fail when ping fails
        """
        # Confirm local/remote VM can be accessed through network.
        logging.info("Check VM network connectivity")
        status, output = utils_net.ping(ping_dest,
                                        count=10,
                                        timeout=20,
                                        output_func=logging.debug,
                                        session=session)
        if status != 0:
            test.fail("Ping failed, status: %s, output: %s" % (status, output))

    def vm_sync(vmxml, vm_name=None, virsh_instance=virsh):
        """
        A wrapper to sync vm xml on localhost and remote host

        :param vmxml: domain VMXML instance
        :param vm_name: The name of VM
        :param virsh_instance: virsh instance object
        """
        if vm_name and virsh_instance != virsh:
            remote.scp_to_remote(server_ip, '22', server_user,
                                 server_pwd,
                                 vmxml.xml, vmxml.xml)
            if virsh_instance.domain_exists(vm_name):
                if virsh_instance.is_alive(vm_name):
                    virsh_instance.destroy(vm_name, ignore_status=True)
                virsh_instance.undefine(vmxml.xml, ignore_status=True)
            virsh_instance.define(vmxml.xml, debug=True)
        else:
            vmxml.sync()

    def update_iface_xml(vm_name, iface_dict, virsh_instance=virsh):
        """
        Update interfaces for guest

        :param vm_name: The name of VM
        :param iface_dict: The interface configurations params
        :param virsh_instance: virsh instance object
        """
        logging.debug("update iface xml")
        vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(
            vm_name, virsh_instance=virsh_instance)
        vmxml.remove_all_device_by_type('interface')
        vm_sync(vmxml, vm_name, virsh_instance=virsh_instance)
        iface = interface.Interface('network')
        iface.xml = libvirt.modify_vm_iface(vm_name, "get_xml", iface_dict,
                                            virsh_instance=virsh_instance)
        vmxml.add_device(iface)
        vmxml.xmltreefile.write()
        vm_sync(vmxml, vm_name, virsh_instance=virsh_instance)
        logging.debug("VM XML after updating interface: %s" % vmxml)

    def update_net_dict(net_dict, runner=utils_net.local_runner):
        """
        Update network dict

        :param net_dict: The network dict to be updated
        :param runner: Command runner
        :return: Updated network dict
        """
        if net_dict.get("net_name", "") == "direct-macvtap":
            logging.info("Updating network iface name")
            iface_name = utils_net.get_net_if(runner=runner, state="UP")[0]
            net_dict.update({"forward_iface": iface_name})
        else:
            # TODO: support other types
            logging.info("No need to update net_dict. We only support to "
                         "update direct-macvtap type for now.")
        logging.debug("net_dict is %s" % net_dict)
        return net_dict

    def get_remote_direct_mode_vm_mac(vm_name, uri):
        """
        Get mac of remote direct mode VM

        :param vm_name: The name of VM
        :param uri: The uri on destination
        :return: mac
        :raise: test.fail when the result of virsh domiflist is incorrect
        """
        vm_mac = None
        res = virsh.domiflist(
            vm_name, uri=uri, ignore_status=False).stdout_text.strip().split("\n")
        if len(res) < 2:
            test.fail("Unable to get remote VM's mac: %s" % res)
        else:
            vm_mac = res[-1].split()[-1]
        return vm_mac

    def create_fake_tap(remote_session):
        """
        Create a fake macvtap on destination host.

        :param remote_session: The session to the destination host.
        :return: The new tap device
        """
        tap_cmd = "ls /dev/tap* |awk -F 'tap' '{print $NF}'"
        tap_idx = remote_session.cmd_output(tap_cmd).strip()
        if not tap_idx:
            test.fail("Unable to get tap index using %s."
                      % tap_cmd)
        fake_tap_dest = 'tap'+str(int(tap_idx)+1)
        logging.debug("creating a fake tap %s...", fake_tap_dest)
        cmd = "touch /dev/%s" % fake_tap_dest
        remote_session.cmd(cmd)
        return fake_tap_dest

    migration_test = migration.MigrationTest()
    migration_test.check_parameters(params)

    libvirt_version.is_libvirt_feature_supported(params)

    # Params to update disk using shared storage
    params["disk_type"] = "file"
    params["disk_source_protocol"] = "netfs"
    params["mnt_path_name"] = params.get("nfs_mount_dir")

    # Local variables
    virsh_args = {"debug": True}
    server_ip = params.get("server_ip")
    server_user = params.get("server_user", "root")
    server_pwd = params.get("server_pwd")
    client_ip = params.get("client_ip")
    client_pwd = params.get("client_pwd")
    virsh_options = params.get("virsh_options", "")
    extra = params.get("virsh_migrate_extra")
    options = params.get("virsh_migrate_options", "--live --p2p --verbose")
    restart_dhclient = params.get("restart_dhclient", "dhclient -r; dhclient")
    ping_dest = params.get("ping_dest", "www.baidu.com")
    extra_args = migration_test.update_virsh_migrate_extra_args(params)

    migrate_vm_back = "yes" == params.get("migrate_vm_back", "no")

    target_vm_name = params.get("target_vm_name")
    direct_mode = "yes" == params.get("direct_mode", "no")
    check_macvtap_exists = "yes" == params.get("check_macvtap_exists", "no")
    create_fake_tap_dest = "yes" == params.get("create_fake_tap_dest", "no")
    macvtap_cmd = params.get("macvtap_cmd")
    modify_target_vm = "yes" == params.get("modify_target_vm", "no")
    ovs_bridge_name = params.get("ovs_bridge_name")
    network_dict = eval(params.get("network_dict", '{}'))
    iface_dict = eval(params.get("iface_dict", '{}'))
    remote_virsh_dargs = {'remote_ip': server_ip, 'remote_user': server_user,
                          'remote_pwd': server_pwd, 'unprivileged_user': None,
                          'ssh_remote_auth': True}
    cmd_parms = {'server_ip': server_ip, 'server_user': server_user,
                 'server_pwd': server_pwd}

    virsh_session_remote = None
    libvirtd_conf = None
    mig_result = None
    target_org_xml = None
    target_vm_session = None
    target_vm = None
    exp_macvtap = []
    fake_tap_dest = None

    # params for migration connection
    params["virsh_migrate_desturi"] = libvirt_vm.complete_uri(
        params.get("migrate_dest_host"))
    params["virsh_migrate_connect_uri"] = libvirt_vm.complete_uri(
        params.get("migrate_source_host"))
    src_uri = params.get("virsh_migrate_connect_uri")
    dest_uri = params.get("virsh_migrate_desturi")

    vm_name = params.get("migrate_main_vm")
    vm = env.get_vm(vm_name)
    vm.verify_alive()
    bk_uri = vm.connect_uri

    postcopy_options = params.get("postcopy_options")
    action_during_mig = None
    if postcopy_options:
        extra = "%s %s" % (extra, postcopy_options)
        action_during_mig = virsh.migrate_postcopy

    # For safety reasons, we'd better back up  xmlfile.
    new_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    orig_config_xml = new_xml.copy()

    try:
        # Create a remote runner for later use
        runner_on_target = remote.RemoteRunner(host=server_ip,
                                               username=server_user,
                                               password=server_pwd)
        remote_session = remote.remote_login("ssh", server_ip, "22",
                                             server_user, server_pwd,
                                             r'[$#%]')
        virsh_session_remote = virsh.VirshPersistent(**remote_virsh_dargs)

        if target_vm_name:
            target_vm = libvirt_vm.VM(target_vm_name, params, vm.root_dir,
                                      vm.address_cache)
            target_vm.connect_uri = dest_uri
            if not virsh_session_remote.domain_exists(target_vm_name):
                test.error("VM %s should be installed on %s."
                           % (target_vm_name, server_ip))
            # Backup guest's xml on remote
            target_org_xml = vm_xml.VMXML.new_from_inactive_dumpxml(
                target_vm_name, virsh_instance=virsh_session_remote)
            # Scp original xml to remote for restoration
            remote.scp_to_remote(server_ip, '22', server_user,
                                 server_pwd,
                                 target_org_xml.xml, target_org_xml.xml)
            logging.debug("target xml is %s" % target_org_xml)

        if ovs_bridge_name:
            status, stdout = utils_net.create_ovs_bridge(ovs_bridge_name)
            if status:
                test.fail("Failed to create ovs bridge on local. Status: %s"
                          "Stdout: %s" % (status, stdout))
            status, stdout = utils_net.create_ovs_bridge(
                ovs_bridge_name, session=remote_session)
            if status:
                test.fail("Failed to create ovs bridge on remote. Status: %s"
                          "Stdout: %s" % (status, stdout))
        if network_dict:
            update_net_dict(network_dict, runner=remote_session.cmd)
            libvirt_network.create_or_del_network(
                network_dict, remote_args=remote_virsh_dargs)
            logging.info("dest: network created")
            update_net_dict(network_dict)
            libvirt_network.create_or_del_network(network_dict)
            logging.info("localhost: network created")

        if target_vm_name:
            if modify_target_vm and iface_dict:
                logging.info("Updating remote VM's interface")
                update_iface_xml(target_vm_name, iface_dict,
                                 virsh_instance=virsh_session_remote)
            target_vm.start()
            target_vm_session = target_vm.wait_for_serial_login(timeout=240)
            check_vm_network_accessed(ping_dest, session=target_vm_session)
            if check_macvtap_exists and macvtap_cmd:
                # Get macvtap device's index on remote after target_vm started
                idx = remote_session.cmd_output(macvtap_cmd).strip()
                if not idx:
                    test.fail("Unable to get macvtap index using %s."
                              % macvtap_cmd)
                # Generate the expected macvtap devices' index list
                exp_macvtap = ['macvtap'+idx, 'macvtap'+str(int(idx)+1)]
                if create_fake_tap_dest:
                    fake_tap_dest = create_fake_tap(remote_session)

        remote_session.close()
        # Change domain network xml
        if iface_dict:
            if "mac" not in iface_dict:
                mac = utils_net.generate_mac_address_simple()
                iface_dict.update({'mac': mac})
            else:
                mac = iface_dict["mac"]

            update_iface_xml(vm_name, iface_dict)

        # Change the disk of the vm
        libvirt.set_vm_disk(vm, params)

        if not vm.is_alive():
            try:
                vm.start()
            except virt_vm.VMStartError as err:
                test.fail("Failed to start VM: %s" % err)

        logging.debug("Guest xml after starting:\n%s",
                      vm_xml.VMXML.new_from_dumpxml(vm_name))

        # Check local guest network connection before migration
        if vm.serial_console is not None:
            vm.cleanup_serial_console()
        vm.create_serial_console()
        vm_session = vm.wait_for_serial_login(timeout=240)
        if not utils_package.package_install('dhcp-client', session=vm_session):
            test.error("Failed to install dhcp-client on guest.")
        utils_net.restart_guest_network(vm_session)
        vm_ip = utils_net.get_guest_ip_addr(vm_session, mac)
        logging.debug("VM IP Addr: %s", vm_ip)

        if direct_mode:
            check_vm_network_accessed(ping_dest, session=vm_session)
        else:
            check_vm_network_accessed(vm_ip)

        # Execute migration process
        vms = [vm]

        migration_test.do_migration(vms, None, dest_uri, 'orderly',
                                    options, thread_timeout=900,
                                    ignore_status=True, virsh_opt=virsh_options,
                                    func=action_during_mig,
                                    extra_opts=extra,
                                    **extra_args)

        mig_result = migration_test.ret

        # Check network accessibility after migration
        if int(mig_result.exit_status) == 0:
            vm.connect_uri = dest_uri
            if vm.serial_console is not None:
                vm.cleanup_serial_console()
            vm.create_serial_console()
            vm_session_after_mig = vm.wait_for_serial_login(timeout=240)
            vm_session_after_mig.cmd(restart_dhclient)
            check_vm_network_accessed(ping_dest, session=vm_session_after_mig)

            if check_macvtap_exists and macvtap_cmd:
                remote_session = remote.remote_login("ssh", server_ip, "22",
                                                     server_user, server_pwd,
                                                     r'[$#%]')
                # Check macvtap devices' index after migration
                idx = remote_session.cmd_output(macvtap_cmd)
                act_macvtap = ['macvtap'+i for i in idx.strip().split("\n")]
                if act_macvtap != exp_macvtap:
                    test.fail("macvtap devices after migration are incorrect!"
                              " Actual: %s, Expected: %s. "
                              % (act_macvtap, exp_macvtap))
        else:
            if fake_tap_dest:
                res = remote.run_remote_cmd("ls /dev/%s" % fake_tap_dest,
                                            params, runner_on_target)
                libvirt.check_exit_status(res)

        if target_vm_session:
            check_vm_network_accessed(ping_dest, session=target_vm_session)
        # Execute migration from remote
        if migrate_vm_back:
            ssh_connection = utils_conn.SSHConnection(server_ip=client_ip,
                                                      server_pwd=client_pwd,
                                                      client_ip=server_ip,
                                                      client_pwd=server_pwd)
            try:
                ssh_connection.conn_check()
            except utils_conn.ConnectionError:
                ssh_connection.conn_setup()
                ssh_connection.conn_check()

            # Pre migration setup for local machine
            migration_test.migrate_pre_setup(src_uri, params)

            cmd = "virsh migrate %s %s %s" % (vm_name, options, src_uri)
            logging.debug("Start migration: %s", cmd)
            cmd_result = remote.run_remote_cmd(cmd, params, runner_on_target)
            logging.info(cmd_result)
            if cmd_result.exit_status:
                test.fail("Failed to run '%s' on remote: %s" % (cmd, cmd_result))
            logging.debug("VM is migrated back.")

            vm.connect_uri = bk_uri
            if vm.serial_console is not None:
                vm.cleanup_serial_console()
            vm.create_serial_console()
            vm_session_after_mig_bak = vm.wait_for_serial_login(timeout=240)
            vm_session_after_mig_bak.cmd(restart_dhclient)
            check_vm_network_accessed(ping_dest, vm_session_after_mig_bak)
    finally:
        logging.debug("Recover test environment")
        vm.connect_uri = bk_uri
        migration_test.cleanup_vm(vm, dest_uri)

        logging.info("Recovery VM XML configration")
        orig_config_xml.sync()
        remote_session = remote.remote_login("ssh", server_ip, "22",
                                             server_user, server_pwd,
                                             r'[$#%]')
        if target_vm and target_vm.is_alive():
            target_vm.destroy(gracefully=False)

        if target_org_xml and target_vm_name:
            logging.info("Recovery XML configration for %s.", target_vm_name)
            virsh_session_remote = virsh.VirshPersistent(**remote_virsh_dargs)
            vm_sync(target_org_xml, vm_name=target_vm_name,
                    virsh_instance=virsh_session_remote)
            virsh_session_remote.close_session()

        if fake_tap_dest:
            remote_session.cmd_output_safe("rm -rf /dev/%s" % fake_tap_dest)

        if network_dict:
            libvirt_network.create_or_del_network(
                network_dict, is_del=True, remote_args=remote_virsh_dargs)
            libvirt_network.create_or_del_network(network_dict, is_del=True)
        if ovs_bridge_name:
            utils_net.delete_ovs_bridge(ovs_bridge_name)
            utils_net.delete_ovs_bridge(ovs_bridge_name, session=remote_session)

        remote_session.close()
        if target_vm_session:
            target_vm_session.close()

        if virsh_session_remote:
            virsh_session_remote.close_session()

        if migrate_vm_back:
            if 'ssh_connection' in locals():
                ssh_connection.auto_recover = True
            migration_test.migrate_pre_setup(src_uri, params,
                                             cleanup=True)
        logging.info("Remove local NFS image")
        source_file = params.get("source_file")
        if source_file:
            libvirt.delete_local_disk("file", path=source_file)
예제 #55
0
def run(test, params, env):
    """
    Test command: virsh dommemstat.

    The command gets memory statistics for a domain
    1.Prepare test environment.
    2.When the ibvirtd == "off", stop the libvirtd service.
    3.Perform virsh dommemstat operation.
    4.Recover test environment.
    5.Confirm the test result.
    """
    vm_name = params.get("main_vm")
    vm = env.get_vm(params["main_vm"])

    domid = vm.get_id()
    domuuid = vm.get_uuid()

    status_error = params.get("status_error", "no")
    vm_ref = params.get("dommemstat_vm_ref", "name")
    libvirtd = params.get("libvirtd", "on")
    extra = params.get("dommemstat_extra", "")
    if libvirtd == "off":
        utils_libvirtd.libvirtd_stop()

    # run test case
    if vm_ref == "id":
        vm_ref = domid
    elif vm_ref == "hex_id":
        vm_ref = hex(int(domid))
    elif vm_ref == "uuid":
        vm_ref = domuuid
    elif vm_ref.count("_invalid_"):
        # vm_ref names parameter to fetch
        vm_ref = params.get(vm_ref)
    elif vm_ref == "name":
        vm_ref = "%s" % vm_name

    if libvirtd == "off":
        utils_libvirtd.libvirtd_stop()

    if vm_ref != "remote":
        status = virsh.dommemstat(vm_ref, extra, ignore_status=True,
                                  debug=True).exit_status
    else:
        remote_ip = params.get("remote_ip", "REMOTE.EXAMPLE.COM")
        remote_pwd = params.get("remote_pwd", None)
        local_ip = params.get("local_ip", "LOCAL.EXAMPLE.COM")
        if remote_ip.count("EXAMPLE.COM") or local_ip.count("EXAMPLE.COM"):
            test.cancel("local/remote ip parameters not set.")
        status = 0
        try:
            remote_uri = libvirt_vm.complete_uri(local_ip)
            session = remote.remote_login("ssh", remote_ip, "22", "root",
                                          remote_pwd, "#")
            session.cmd_output('LANG=C')
            command = "virsh -c %s dommemstat %s %s" % (remote_uri, vm_name,
                                                        extra)
            status = session.cmd_status(command, internal_timeout=5)
            session.close()
        except process.CmdError:
            status = 1

    # recover libvirtd service start
    if libvirtd == "off":
        utils_libvirtd.libvirtd_start()

    # check status_error
    if status_error == "yes":
        if status == 0:
            test.fail("Run successfully with wrong command!")
    elif status_error == "no":
        if status != 0:
            test.fail("Run failed with right command")
예제 #56
0
    def update_disk(vm, params):
        """
        Update disk for testing.

        :param vm: vm object.
        :param params: the parameters used.
        :return: updated images.
        """
        local_image_list = []
        remote_image_list = []
        vm_did_list = []
        # Change the disk of the vm
        if storage_type == "nfs":
            libvirt.set_vm_disk(vm, params)
        else:
            disk_format = params.get("disk_format", "qcow2")
            disk_num = eval(params.get("disk_num", "1"))
            blk_source = vm.get_first_disk_devices()['source']
            vsize = utils_misc.get_image_info(blk_source).get("vsize")
            remote_session = remote.remote_login("ssh", server_ip, "22",
                                                 server_user, server_pwd,
                                                 r'[$#%]')
            # Create disk on remote host
            utils_misc.make_dirs(os.path.dirname(blk_source), remote_session)
            libvirt_disk.create_disk("file",
                                     disk_format=disk_format,
                                     path=blk_source,
                                     size=vsize,
                                     session=remote_session)
            remote_image_list.append(blk_source)

            for idx in range(2, disk_num + 1):
                disk_path = os.path.join(os.path.dirname(blk_source),
                                         "test%s.img" % str(idx))
                # Create disk on local
                libvirt_disk.create_disk("file",
                                         disk_format=disk_format,
                                         path=disk_path)
                local_image_list.append(disk_path)

                target_dev = 'vd' + chr(idx + ord('a') - 1)
                new_disk_dict = {"driver_type": disk_format}
                vm_was_running = vm.is_alive()
                libvirt_pcicontr.reset_pci_num(vm_name)
                if vm_was_running and not vm.is_alive():
                    vm.start()
                    vm.wait_for_login().close()
                result = libvirt.attach_additional_device(
                    vm_name, target_dev, disk_path, new_disk_dict, False)
                libvirt.check_exit_status(result)

                libvirt_disk.create_disk("file",
                                         disk_format=disk_format,
                                         path=disk_path,
                                         session=remote_session)

                remote_image_list.append(disk_path)
                vm_did_list.append(target_dev)

            remote_session.close()
        return local_image_list, remote_image_list, vm_did_list
예제 #57
0
def run_virsh_setvcpus(test, params, env):
    """
    Test command: virsh setvcpus.

    The conmand can change the number of virtual CPUs in the guest domain.
    1.Prepare test environment,destroy or suspend a VM.
    2.Perform virsh setvcpus operation.
    3.Recover test environment.
    4.Confirm the test result.
    """

    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    xml_file = params.get("setvcpus_xml_file", "vm.xml")
    virsh.dumpxml(vm_name, xml_file)
    tmp_file = params.get("setvcpus_tmp_file", "tmp.xml")
    pre_vm_state = params.get("setvcpus_pre_vm_state")
    command = params.get("setvcpus_command", "setvcpus")
    options = params.get("setvcpus_options")
    domain = params.get("setvcpus_domain")
    count = params.get("setvcpus_count")
    extra_param = params.get("setvcpus_extra_param")
    count_option = "%s %s" % (count, extra_param)
    status_error = params.get("status_error")

    def get_current_vcpus():
        """
        Get current vcpu number.
        """
        vcpus_set = ""
        virsh.dumpxml(vm_name, tmp_file)
        dom = parse(tmp_file)
        root = dom.documentElement
        vcpus_2 = root.getElementsByTagName("vcpu")
        for n in vcpus_2:
            vcpus_set += n.getAttribute("current")
            vcpus_set = int(vcpus_set)
        dom.unlink()
        return vcpus_set

    if vm.is_alive():
        vm.destroy()
    vm_xml = libvirt_xml.VMXML()
    vm_xml.set_vm_vcpus(vm_name, 2)
    vm.start()
    vm.wait_for_login()

    if status_error == "no":
        vcpus_new = len(vm.vcpuinfo())
    domid = vm.get_id()
    domuuid = vm.get_uuid()
    if pre_vm_state == "paused":
        vm.pause()
    elif pre_vm_state == "shut off":
        vm.destroy()

    if domain == "remote_name":
        remote_ssh_addr = params.get("remote_ip", None)
        remote_addr = params.get("local_ip", None)
        remote_password = params.get("remote_password", None)
        host_type = virsh.driver()
        if host_type == "qemu":
            remote_string = "qemu+ssh://%s/system" % remote_addr
        elif host_type == "xen":
            remote_string = "xen+ssh://%s" % remote_addr
        command = "virsh -c %s setvcpus %s 1 --live" % (remote_string, vm_name)
        if virsh.has_command_help_match(command, "--live") == None:
            status_error = "yes"
        session = remote.remote_login("ssh", remote_ssh_addr, "22", "root",
                                      remote_password, "#")
        session.cmd_output('LANG=C')
        status, output = session.cmd_status_output(command, internal_timeout=5)
        session.close()
        vcpus_current = len(vm.vcpuinfo())
    else:
        if domain == "name":
            dom_option = vm_name
        elif domain == "id":
            dom_option = domid
            if params.get("setvcpus_hex_id") != None:
                dom_option = hex(int(domid))
            elif params.get("setvcpus_invalid_id") != None:
                dom_option = params.get("setvcpus_invalid_id")
        elif domain == "uuid":
            dom_option = domuuid
            if params.get("setvcpus_invalid_uuid") != None:
                dom_option = params.get("setvcpus_invalid_uuid")
        else:
            dom_option = domain
        option_list = options.split(" ")
        for item in option_list:
            if virsh.has_command_help_match(command, item) == None:
                status_error = "yes"
                break
        status = virsh.setvcpus(dom_option,
                                count_option,
                                options,
                                ignore_status=True).exit_status
        if pre_vm_state == "paused":
            virsh.resume(vm_name, ignore_status=True)
        if status_error == "no":
            if status == 0:
                if pre_vm_state == "shut off":
                    if options == "--config":
                        vcpus_set = len(vm.vcpuinfo())
                    elif options == "--current":
                        vcpus_set = get_current_vcpus()
                    elif options == "--maximum --config":
                        vcpus_set = ""
                        dom = parse("/etc/libvirt/qemu/%s.xml" % vm_name)
                        vcpus_set = dom.getElementsByTagName(
                            "vcpu")[0].firstChild.data
                        vcpus_set = int(vcpus_set)
                        dom.unlink()
                else:
                    vcpus_set = len(vm.vcpuinfo())
                if domain == "id":
                    cmd_chk = "cat /etc/libvirt/qemu/%s.xml" % vm_name
                    output1 = commands.getoutput(cmd_chk)
                    logging.info("guest-info:\n%s" % output1)

    virsh.destroy(vm_name)
    virsh.undefine(vm_name)
    virsh.define(xml_file)
    if os.path.exists(xml_file):
        os.remove(xml_file)
    if os.path.exists(tmp_file):
        os.remove(tmp_file)

    #check status_error
    if status_error == "yes":
        if status == 0:
            raise error.TestFail("Run successfully with wrong command!")
    else:
        if status != 0:
            raise error.TestFail("Run failed with right command")
        else:
            if options == "--maximum --config":
                if vcpus_set != 4:
                    raise error.TestFail("Run failed with right command1")
            elif domain == "id":
                if options == "--config":
                    if vcpus_set != vcpus_new or not re.search(
                            '<vcpu current=\'1\'>%s</vcpu>' % vcpus_new,
                            output1):
                        raise error.TestFail("Run failed with right command2")
                elif options == "--config --live":
                    if vcpus_set != 1 or not re.search(
                            '<vcpu current=\'1\'>%s</vcpu>' % vcpus_new,
                            output1):
                        raise error.TestFail("Run failed with right command3")
                else:
                    if vcpus_set != 1 or re.search(
                            '<vcpu current=\'1\'>%s</vcpu>' % vcpus_new,
                            output1):
                        raise error.TestFail("Run failed with right command4")
            else:
                if vcpus_set != 1:
                    raise error.TestFail("Run failed with right command5")
예제 #58
0
def run(test, params, env):
    """
    Test remote access with TCP, TLS connection
    """
    test_dict = dict(params)
    vm_name = test_dict.get("main_vm")
    vm = env.get_vm(vm_name)
    start_vm = test_dict.get("start_vm", "no")

    # Server and client parameters
    server_ip = test_dict.get("server_ip")
    server_user = test_dict.get("server_user")
    server_pwd = test_dict.get("server_pwd")
    client_ip = test_dict.get("client_ip")
    client_user = test_dict.get("client_user")
    client_pwd = test_dict.get("client_pwd")
    server_cn = test_dict.get("server_cn")
    client_cn = test_dict.get("client_cn")
    target_ip = test_dict.get("target_ip", "")
    # generate remote IP
    if target_ip == "":
        if server_cn:
            target_ip = server_cn
        elif server_ip:
            target_ip = server_ip
        else:
            target_ip = target_ip
    remote_virsh_dargs = {
        'remote_ip': server_ip,
        'remote_user': server_user,
        'remote_pwd': server_pwd,
        'unprivileged_user': None,
        'ssh_remote_auth': True
    }

    # Ceph disk parameters
    driver = test_dict.get("test_driver", "qemu")
    transport = test_dict.get("transport")
    plus = test_dict.get("conn_plus", "+")
    source_type = test_dict.get("vm_disk_source_type", "file")
    virsh_options = test_dict.get("virsh_options", "--verbose --live")
    vol_name = test_dict.get("vol_name")
    disk_src_protocol = params.get("disk_source_protocol")
    source_file = test_dict.get("disk_source_file")
    disk_format = test_dict.get("disk_format", "qcow2")
    mon_host = params.get("mon_host")
    ceph_key_opt = ""
    attach_disk = False
    # Disk XML file
    disk_xml = None
    # Define ceph_disk conditional variable
    ceph_disk = "yes" == test_dict.get("ceph_disk")

    # For --postcopy enable
    postcopy_options = test_dict.get("postcopy_options")
    if postcopy_options and not virsh_options.count(postcopy_options):
        virsh_options = "%s %s" % (virsh_options, postcopy_options)
        test_dict['virsh_options'] = virsh_options

    # For bi-directional and tls reverse test
    uri_port = test_dict.get("uri_port", ":22")
    uri_path = test_dict.get("uri_path", "/system")
    src_uri = test_dict.get("migration_source_uri", "qemu:///system")
    uri = "%s%s%s://%s%s%s" % (driver, plus, transport, target_ip, uri_port,
                               uri_path)
    test_dict["desuri"] = uri

    # Make sure all of parameters are assigned a valid value
    check_parameters(test, test_dict)
    # Set up SSH key

    #ssh_key.setup_ssh_key(server_ip, server_user, server_pwd, port=22)
    remote_session = remote.wait_for_login('ssh', server_ip, '22', server_user,
                                           server_pwd, r"[\#\$]\s*$")
    remote_session.close()
    #ssh_key.setup_ssh_key(server_ip, server_user, server_pwd, port=22)

    # Set up remote ssh key and remote /etc/hosts file for bi-direction migration
    migr_vm_back = "yes" == test_dict.get("migrate_vm_back", "no")
    if migr_vm_back:
        ssh_key.setup_remote_ssh_key(server_ip, server_user, server_pwd)
        ssh_key.setup_remote_known_hosts_file(client_ip, server_ip,
                                              server_user, server_pwd)
    # Reset Vm state if needed
    if vm.is_alive() and start_vm == "no":
        vm.destroy(gracefully=False)

    # Back up xml file.
    vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)

    # Setup migration context
    migrate_setup = migration.MigrationTest()
    migrate_setup.migrate_pre_setup(test_dict["desuri"], params)

    # Install ceph-common on remote host machine.
    remote_ssh_session = remote.remote_login("ssh", server_ip, "22",
                                             server_user, server_pwd,
                                             r"[\#\$]\s*$")
    if not utils_package.package_install(["ceph-common"], remote_ssh_session):
        test.error("Failed ot install required packages on remote host")
    remote_ssh_session.close()
    try:
        # Create a remote runner for later use
        runner_on_target = remote.RemoteRunner(host=server_ip,
                                               username=server_user,
                                               password=server_pwd)
        # Get initial Selinux config flex bit
        LOCAL_SELINUX_ENFORCING_STATUS = utils_selinux.get_status()
        logging.info("previous local enforce :%s",
                     LOCAL_SELINUX_ENFORCING_STATUS)
        cmd_result = remote.run_remote_cmd('getenforce', params,
                                           runner_on_target)
        REMOTE_SELINUX_ENFORCING_STATUS = cmd_result.stdout_text
        logging.info("previous remote enforce :%s",
                     REMOTE_SELINUX_ENFORCING_STATUS)

        if ceph_disk:
            logging.info(
                "Put local SELinux in permissive mode when test ceph migrating"
            )
            utils_selinux.set_status("enforcing")

            logging.info("Put remote SELinux in permissive mode")
            cmd = "setenforce enforcing"
            cmd_result = remote.run_remote_cmd(cmd, params, runner_on_target)
            status, output = cmd_result.exit_status, cmd_result.stdout_text.strip(
            )
            if status:
                test.Error("Failed to set SELinux " "in permissive mode")

            # Prepare ceph disk.
            key_file = os.path.join(data_dir.get_tmp_dir(), "ceph.key")
            test_dict['key_file'] = key_file
            test_dict['first_disk'] = vm.get_first_disk_devices()
            ceph_key_opt, secret_uuid = prepare_ceph_disk(
                test_dict, remote_virsh_dargs, test, runner_on_target)
            host_ip = test_dict.get('mon_host')
            disk_image = test_dict.get('disk_img')

            # Build auth information.
            auth_attrs = {}
            auth_attrs['auth_user'] = params.get("auth_user")
            auth_attrs['secret_type'] = params.get("secret_type")
            auth_attrs['secret_uuid'] = secret_uuid
            build_disk_xml(vm_name,
                           disk_format,
                           host_ip,
                           disk_src_protocol,
                           vol_name,
                           disk_image,
                           auth=auth_attrs)

            vm_xml_cxt = process.run("virsh dumpxml %s" % vm_name,
                                     shell=True).stdout_text
            logging.debug("The VM XML with ceph disk source: \n%s", vm_xml_cxt)
            try:
                if vm.is_dead():
                    vm.start()
            except virt_vm.VMStartError as e:
                logging.info("Failed to start VM")
                test.fail("Failed to start VM: %s" % vm_name)

        # Ensure the same VM name doesn't exist on remote host before migrating.
        destroy_vm_cmd = "virsh destroy %s" % vm_name
        remote.run_remote_cmd(cmd, params, runner_on_target)

        # Trigger migration
        migrate_vm(test, test_dict)

        if migr_vm_back:
            ssh_connection = utils_conn.SSHConnection(server_ip=client_ip,
                                                      server_pwd=client_pwd,
                                                      client_ip=server_ip,
                                                      client_pwd=server_pwd)
            try:
                ssh_connection.conn_check()
            except utils_conn.ConnectionError:
                ssh_connection.conn_setup()
                ssh_connection.conn_check()
            # Pre migration setup for local machine
            migrate_setup.migrate_pre_setup(src_uri, params)
            cmd = "virsh migrate %s %s %s" % (vm_name, virsh_options, src_uri)
            logging.debug("Start migrating: %s", cmd)
            cmd_result = remote.run_remote_cmd(cmd, params, runner_on_target)
            status, output = cmd_result.exit_status, cmd_result.stdout_text.strip(
            )
            logging.info(output)
            if status:
                destroy_cmd = "virsh destroy %s" % vm_name
                remote.run_remote_cmd(destroy_cmd, params, runner_on_target)
                test.fail("Failed to run '%s' on remote: %s" % (cmd, output))
    finally:
        logging.info("Recovery test environment")
        # Clean up of pre migration setup for local machine
        if migr_vm_back:
            migrate_setup.migrate_pre_setup(src_uri, params, cleanup=True)
        # Ensure VM can be cleaned up on remote host even migrating fail.
        destroy_vm_cmd = "virsh destroy %s" % vm_name
        remote.run_remote_cmd(destroy_vm_cmd, params, runner_on_target)

        logging.info("Recovery VM XML configration")
        vmxml_backup.sync()
        logging.debug("The current VM XML:\n%s", vmxml_backup.xmltreefile)

        # Clean up ceph environment.
        if disk_src_protocol == "rbd":
            # Clean up secret
            secret_list = get_secret_list()
            if secret_list:
                for secret_uuid in secret_list:
                    virsh.secret_undefine(secret_uuid)
            # Clean up dirty secrets on remote host if testing involve in ceph auth.
            client_name = test_dict.get('client_name')
            client_key = test_dict.get("client_key")
            if client_name and client_key:
                try:
                    remote_virsh = virsh.VirshPersistent(**remote_virsh_dargs)
                    remote_dirty_secret_list = get_secret_list(remote_virsh)
                    for dirty_secret_uuid in remote_dirty_secret_list:
                        remote_virsh.secret_undefine(dirty_secret_uuid)
                except (process.CmdError, remote.SCPError) as detail:
                    test.Error(detail)
                finally:
                    remote_virsh.close_session()
            # Delete the disk if it exists.
            disk_src_name = "%s/%s" % (vol_name, test_dict.get('disk_img'))
            cmd = ("rbd -m {0} {1} info {2} && rbd -m {0} {1} rm "
                   "{2}".format(mon_host, ceph_key_opt, disk_src_name))
            process.run(cmd, ignore_status=True, shell=True)

        if LOCAL_SELINUX_ENFORCING_STATUS:
            logging.info("Restore SELinux in original mode")
            utils_selinux.set_status(LOCAL_SELINUX_ENFORCING_STATUS)
        if REMOTE_SELINUX_ENFORCING_STATUS:
            logging.info("Put remote SELinux in original mode")
            cmd = "yes yes | setenforce %s" % REMOTE_SELINUX_ENFORCING_STATUS
            remote.run_remote_cmd(cmd, params, runner_on_target)

        # Remove known hosts on local host
        cmd = "ssh-keygen -R  %s" % server_ip
        process.run(cmd, ignore_status=True, shell=True)

        # Remove known hosts on remote host
        cmd = "ssh-keygen -R  %s" % client_ip
        remote.run_remote_cmd(cmd, params, runner_on_target)
예제 #59
0
def run(test, params, env):
    """
    Test command: virsh destroy.

    The command can destroy (stop) a domain.
    1.Prepare test environment.
    2.When the ibvirtd == "off", stop the libvirtd service.
    3.Perform virsh destroy operation.
    4.Recover test environment.
    5.Confirm the test result.
    """
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    domid = vm.get_id()
    domuuid = vm.get_uuid()

    vm_ref = params.get("destroy_vm_ref")
    status_error = params.get("status_error", "no")
    libvirtd = params.get("libvirtd", "on")
    remote_ip = params.get("remote_ip", "REMOTE.EXAMPLE.COM")
    remote_pwd = params.get("remote_pwd", None)
    local_ip = params.get("local_ip", "LOCAL.EXAMPLE.COM")
    local_pwd = params.get("local_pwd", "LOCAL.EXAMPLE.COM")
    if vm_ref == "remote" and (remote_ip.count("EXAMPLE.COM") or
                               local_ip.count("EXAMPLE.COM")):
        raise error.TestNAError(
            "Remote test parameters unchanged from default")

    uri = params.get("virsh_uri")
    unprivileged_user = params.get('unprivileged_user')
    if unprivileged_user:
        if unprivileged_user.count('EXAMPLE'):
            unprivileged_user = '******'

    if not libvirt_version.version_compare(1, 1, 1):
        if params.get('setup_libvirt_polkit') == 'yes':
            raise error.TestNAError("API acl test not supported in current"
                                    " libvirt version.")

    if vm_ref == "id":
        vm_ref = domid
    elif vm_ref == "hex_id":
        vm_ref = hex(int(domid))
    elif vm_ref.find("invalid") != -1:
        vm_ref = params.get(vm_ref)
    elif vm_ref == "name":
        vm_ref = "%s %s" % (vm_name, params.get("destroy_extra"))
    elif vm_ref == "uuid":
        vm_ref = domuuid

    if libvirtd == "off":
        utils_libvirtd.libvirtd_stop()

    if vm_ref != "remote":
        status = virsh.destroy(vm_ref, ignore_status=True,
                               unprivileged_user=unprivileged_user,
                               uri=uri, debug=True).exit_status
        output = ""
    else:
        status = 0
        try:
            remote_uri = libvirt_vm.complete_uri(local_ip)
            session = remote.remote_login("ssh", remote_ip, "22",
                                          "root", remote_pwd, "#")
            session.cmd_output('LANG=C')

            # Setup up remote to remote login in local host
            ssh_key.setup_remote_ssh_key(remote_ip, "root", remote_pwd,
                                         local_ip, "root", local_pwd)

            command = "virsh -c %s destroy %s" % (remote_uri, vm_name)
            status, output = session.cmd_status_output(command,
                                                       internal_timeout=5)
            session.close()
        except error.CmdError:
            status = 1

    if libvirtd == "off":
        utils_libvirtd.libvirtd_start()

    # check status_error
    if status_error == "yes":
        if status == 0:
            raise error.TestFail("Run successfully with wrong command! "
                                 "Output:\n%s" % output)
    elif status_error == "no":
        if status != 0:
            raise error.TestFail("Run failed with right command! Output:\n%s"
                                 % output)
예제 #60
0
def run(test, params, env):
    """
    Test command: virsh shutdown.

    The conmand can gracefully shutdown a domain.

    1.Prepare test environment.
    2.When the libvirtd == "off", stop the libvirtd service.
    3.Perform virsh setvcpus operation.
    4.Recover test environment.
    5.Confirm the test result.
    """

    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    vm_ref = params.get("shutdown_vm_ref")
    status_error = ("yes" == params.get("status_error"))
    agent = ("yes" == params.get("shutdown_agent", "no"))
    mode = params.get("shutdown_mode", "")
    pre_domian_status = params.get("reboot_pre_domian_status", "running")
    libvirtd = params.get("libvirtd", "on")
    xml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)

    # Libvirt acl test related params
    uri = params.get("virsh_uri")
    unprivileged_user = params.get('unprivileged_user')
    if unprivileged_user:
        if unprivileged_user.count('EXAMPLE'):
            unprivileged_user = '******'

    if not libvirt_version.version_compare(1, 1, 1):
        if params.get('setup_libvirt_polkit') == 'yes':
            raise error.TestNAError("API acl test not supported in current"
                                    " libvirt version.")

    try:
        # Add or remove qemu-agent from guest before test
        vm.prepare_guest_agent(channel=agent, start=agent)
        if pre_domian_status == "shutoff":
            virsh.destroy(vm_name)
        domid = vm.get_id()
        domuuid = vm.get_uuid()
        # run test case
        if vm_ref == "id":
            vm_ref = domid
        elif vm_ref == "hex_id":
            vm_ref = hex(int(domid))
        elif vm_ref.find("invalid") != -1:
            vm_ref = params.get(vm_ref)
        elif vm_ref == "name":
            vm_ref = "%s %s" % (vm_name, params.get("shutdown_extra"))
        elif vm_ref == "uuid":
            vm_ref = domuuid

        if libvirtd == "off":
            utils_libvirtd.libvirtd_stop()

        if vm_ref != "remote":
            status = virsh.shutdown(vm_ref,
                                    mode,
                                    unprivileged_user=unprivileged_user,
                                    uri=uri,
                                    debug=True,
                                    ignore_status=True).exit_status
        else:
            remote_ip = params.get("remote_ip", "REMOTE.EXAMPLE.COM")
            remote_pwd = params.get("remote_pwd", None)
            local_ip = params.get("local_ip", "LOCAL.EXAMPLE.COM")
            if remote_ip.count("EXAMPLE.COM") or local_ip.count("EXAMPLE.COM"):
                raise error.TestNAError(
                    "Remote test parameters unchanged from default")
            status = 0
            try:
                remote_uri = libvirt_vm.complete_uri(local_ip)
                session = remote.remote_login("ssh", remote_ip, "22", "root",
                                              remote_pwd, "#")
                session.cmd_output('LANG=C')
                command = ("virsh -c %s shutdown %s %s" %
                           (remote_uri, vm_name, mode))
                status = session.cmd_status(command, internal_timeout=5)
                session.close()
            except error.CmdError:
                status = 1

        # recover libvirtd service start
        if libvirtd == "off":
            utils_libvirtd.libvirtd_start()

        # check status_error
        if status_error:
            if not status:
                raise error.TestFail("Run successfully with wrong command!")
        else:
            if status:
                raise error.TestFail("Run failed with right command")
    finally:
        xml_backup.sync()