Пример #1
0
 def do_operation():
     """
         Do operation in guest os with vf and check the os behavior after operation.
     """
     if operation == "resume_suspend":
         try:
             virsh.suspend(vm.name, debug=True, ignore_status=False)
             virsh.resume(vm.name, debug=True, ignore_statue=False)
             get_ip_by_mac(mac_addr, timeout=120)
         except process.CmdError as detail:
             err_msg = "Suspend-Resume %s with vf failed: %s" % (vm_name, detail)
             test.fail(err_msg)
     if operation == "reboot":
         try:
             if vm.serial_console is not None:
                 vm.cleanup_serial_console()
                 vm.create_serial_console()
             virsh.reboot(vm.name, ignore_status=False)
             get_ip_by_mac(mac_addr, timeout=120)
         except process.CmdError as detail:
             err_msg = "Reboot %s with vf failed: %s" % (vm_name, detail)
             test.fail(err_msg)
     if operation == "save":
         result = virsh.managedsave(vm_name, ignore_status=True, debug=True)
         utils_test.libvirt.check_exit_status(result, expect_error=True)
Пример #2
0
 def vm_managedsave_loop(vm_name, loop_range, libvirtd):
     """
     Run a loop of managedsave command and check its result.
     """
     if vm.is_dead():
         virsh.start(vm_name)
     for i in range(int(loop_range)):
         logging.debug("Test loop: %s" % i)
         virsh.managedsave(vm_name)
         virsh.start(vm_name)
     # Check libvirtd status.
     if not libvirtd.is_running():
         raise error.TestFail("libvirtd is stopped after cmd")
     # Check vm status.
     if vm.state() != "running":
         raise error.TestFail("Guest isn't in running state")
Пример #3
0
def manipulate_domain(vm_name, action, recover=False):
    """
    Save/managedsave/S3/S4 domain or recover it.
    """
    tmp_dir = data_dir.get_tmp_dir()
    save_file = os.path.join(tmp_dir, vm_name + ".save")
    if not recover:
        if action == "save":
            save_option = ""
            result = virsh.save(vm_name, save_file, save_option,
                                ignore_status=True, debug=True)
            libvirt.check_exit_status(result)
        elif action == "managedsave":
            managedsave_option = ""
            result = virsh.managedsave(vm_name, managedsave_option,
                                       ignore_status=True, debug=True)
            libvirt.check_exit_status(result)
        elif action == "s3":
            suspend_target = "mem"
            result = virsh.dompmsuspend(vm_name, suspend_target,
                                        ignore_status=True, debug=True)
            libvirt.check_exit_status(result)
        elif action == "s4":
            suspend_target = "disk"
            result = virsh.dompmsuspend(vm_name, suspend_target,
                                        ignore_status=True, debug=True)
            libvirt.check_exit_status(result)
            # Wait domain state change: 'in shutdown' -> 'shut off'
            utils_misc.wait_for(lambda: virsh.is_dead(vm_name), 5)
        else:
            logging.debug("No operation for the domain")

    else:
        if action == "save":
            if os.path.exists(save_file):
                result = virsh.restore(save_file, ignore_status=True, debug=True)
                libvirt.check_exit_status(result)
                os.remove(save_file)
            else:
                raise error.TestError("No save file for domain restore")
        elif action in ["managedsave", "s4"]:
            result = virsh.start(vm_name, ignore_status=True, debug=True)
            libvirt.check_exit_status(result)
        elif action == "s3":
            suspend_target = "mem"
            result = virsh.dompmwakeup(vm_name, ignore_status=True, debug=True)
            libvirt.check_exit_status(result)
        else:
            logging.debug("No need recover the domain")
Пример #4
0
    def managedsave_hook():
        """
        Do managedsave operation and check the results.
        """
        hook_para = "%s %s" % (hook_file, vm_name)
        save_file = os.path.join(test.tmpdir,
                                 "%s.save" % vm_name)
        disk_src = vm.get_first_disk_devices()['source']
        if domainxml_test:
            disk_dist = "/tmp/%s.move" % vm_name
            shutil.copy(disk_src, disk_dist)
            script = (hook_script %
                      (vm_name, disk_src, disk_dist))
            prepare_hook_file(script)
        elif basic_test:
            prepare_hook_file(hook_script %
                              (vm_name, hook_log))
        ret = virsh.managedsave(vm_name, **virsh_dargs)
        libvirt.check_exit_status(ret)
        if domainxml_test:
            disk_src_save = vm.get_first_disk_devices()['source']
            if disk_src != disk_src_save:
                raise error.TestFail("Failed to check hooks for"
                                     " managedsave operation")
        vm.start()
        if os.path.exists(save_file):
            os.remove(save_file)
        if domainxml_test:
            disk_src_restore = vm.get_first_disk_devices()['source']
            if disk_dist != disk_src_restore:
                raise error.TestFail("Failed to check hooks for"
                                     " managedsave operation")
            vm.destroy()
            if os.path.exists(disk_dist):
                os.remove(disk_dist)
            vmxml_backup.sync()

        if basic_test:
            hook_str = hook_para + " restore begin -"
            if not check_hooks(hook_str):
                raise error.TestFail("Failed to check "
                                     "managedsave hooks.")
Пример #5
0
def run(test, params, env):
    """
    Test command: virsh domjobinfo.

    The command returns information about jobs running on a domain.
    1.Prepare test environment.
    2.When the libvirtd == "off", stop the libvirtd service.
    3.Perform virsh domjobinfo operation.
    4.Recover test environment.
    5.Confirm the test result.
    """
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)

    domid = vm.get_id()
    domuuid = vm.get_uuid()

    pre_vm_state = params.get("domjobinfo_pre_vm_state", "null")
    vm_ref = params.get("domjobinfo_vm_ref")
    status_error = params.get("status_error", "no")
    libvirtd = params.get("libvirtd", "on")
    tmp_file = os.path.join(test.tmpdir, '%s.tmp' % vm_name)

    # prepare the state of vm
    if pre_vm_state == "dump":
        virsh.dump(vm_name, tmp_file)
    elif pre_vm_state == "save":
        virsh.save(vm_name, tmp_file)
    elif pre_vm_state == "restore":
        virsh.save(vm_name, tmp_file)
        virsh.restore(tmp_file)
    elif pre_vm_state == "managedsave":
        virsh.managedsave(vm_name)

    # run test case
    if vm_ref == "id":
        vm_ref = domid
    elif vm_ref == "hex_id":
        vm_ref = hex(int(domid))
    elif vm_ref == "name":
        vm_ref = "%s %s" % (vm_name, params.get("domjobinfo_extra"))
    elif vm_ref == "uuid":
        vm_ref = domuuid
    elif vm_ref.find("invalid") != -1:
        vm_ref = params.get(vm_ref)

    if libvirtd == "off":
        utils_libvirtd.libvirtd_stop()

    status = virsh.domjobinfo(vm_ref, ignore_status=True).exit_status

    # recover libvirtd service start
    if libvirtd == "off":
        utils_libvirtd.libvirtd_start()

    # check status_error
    if status_error == "yes":
        if status == 0:
            test.fail("Run successfully with wrong command!")
    elif status_error == "no":
        if status != 0:
            test.fail("Run failed with right command")
Пример #6
0
def run(test, params, env):
    """
    Test command: virsh list.

    1) Filt parameters according libvirtd's version
    2) Prepare domain's exist state:transient,managed-save.
    3) Prepare libvirt's status.
    4) Execute list command.
    5) Result check.
    """
    def list_local_domains_on_remote(options_ref, remote_ip, remote_passwd,
                                     local_ip, remote_user, local_user,
                                     local_pwd):
        """
        Create a virsh list command and execute it on remote host.
        It will list local domains on remote host.

        :param options_ref:options in virsh list command.
        :param remote_ip:remote host's ip.
        :param remote_passwd:remote host's password.
        :param local_ip:local ip, to create uri in virsh list.
        :return:return status and output of the virsh list command.
        """
        complete_uri = libvirt_vm.complete_uri(local_ip)
        command_on_remote = ("virsh -c %s list %s"
                             % (complete_uri, options_ref))
        try:
            # setup autologin for ssh from remote machine to execute commands
            # remotely
            config_opt = ["StrictHostKeyChecking=no"]
            ssh_key.setup_remote_ssh_key(remote_ip, remote_user,
                                         remote_passwd, hostname2=local_ip,
                                         user2=local_user,
                                         password2=local_pwd,
                                         config_options=config_opt)
            session = remote.remote_login("ssh", remote_ip, "22", remote_user,
                                          remote_passwd, "#")
            time.sleep(5)
            status, output = session.cmd_status_output(
                command_on_remote, internal_timeout=30)
        except Exception as info:
            logging.error("Shell failed to execute command from"
                          " remote")
            return 1, info
        time.sleep(5)
        session.close()
        return int(status), output

    vm_name = params.get("main_vm", "avocado-vt-vm1")
    options_ref = params.get("list_options_ref", "")
    list_ref = params.get("list_type_ref", "")
    vm_ref = params.get("vm_ref", "")
    status_error = params.get("status_error", "no")
    addition_status_error = params.get("addition_status_error", "no")
    desc = params.get("list_desc", "")
    libvirtd = params.get("libvirtd", "on")
    remote_ref = params.get("remote_ref", "")
    remote_ip = params.get("remote_ip")
    remote_pwd = params.get("remote_pwd", None)
    local_ip = params.get("local_ip")
    remote_user = params.get("remote_user", "root")
    local_user = params.get("username", "root")
    local_pwd = params.get("local_pwd", None)

    vm = env.get_vm(vm_name)
    domuuid = vm.get_uuid()
    domid = vm.get_id()

    # Some parameters are not supported on old libvirt, skip them.
    help_info = virsh.help("list").stdout.strip()
    if vm_ref and not re.search(vm_ref, help_info):
        raise exceptions.TestSkipError("This version do not support vm type:%s"
                                       % vm_ref)
    if list_ref and not re.search(list_ref, help_info):
        raise exceptions.TestSkipError("This version do not support list"
                                       " type:%s" % list_ref)

    # If a transient domain is destroyed, it will disappear.
    if vm_ref == "transient" and options_ref == "inactive":
        logging.info("Set addition_status_error to yes")
        logging.info(
            "because transient domain will disappear after destroyed.")
        addition_status_error = "yes"

    if vm_ref == "transient":
        vm_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
        vm.undefine()
    elif vm_ref == "managed-save":
        virsh.managedsave(vm_name, ignore_status=True, print_info=True)

    try:
        # run test case
        if list_ref == "--uuid":
            result_expected = domuuid
            logging.info("%s's uuid is: %s", vm_name, domuuid)
        elif list_ref == "--title":
            vm_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
            if options_ref == "inactive":
                virsh.desc(vm_name, "--config --title", desc)
            else:
                virsh.desc(vm_name, "--live --title", desc)
            result_expected = desc
            logging.info("%s's title is: %s", vm_name, desc)
        else:
            result_expected = vm_name
            logging.info("domain's name is: %s", vm_name)

        if options_ref == "vm_id":
            logging.info("%s's running-id is: %s", vm_name, domid)
            options_ref = "%s %s" % (domid, list_ref)
        elif options_ref == "vm_uuid":
            logging.info("%s's uuid is: %s", vm_name, domuuid)
            options_ref = "%s %s" % (domuuid, list_ref)
        elif options_ref == "inactive":
            vm.destroy()
            options_ref = "--inactive %s" % list_ref
        elif options_ref == "vm_name":
            options_ref = "%s %s" % (vm_name, list_ref)
        elif options_ref == "all":
            options_ref = "--all %s" % list_ref
        elif options_ref == "":
            options_ref = "%s" % list_ref

        # Prepare libvirtd status
        if libvirtd == "off":
            utils_libvirtd.libvirtd_stop()

        if remote_ref == "remote":
            if remote_ip.count("EXAMPLE.COM") or local_ip.count("EXAMPLE.COM"):
                raise exceptions.TestSkipError(
                    "Remote test parameters unchanged from default")
            logging.info("Execute virsh command on remote host %s.", remote_ip)
            status, output = list_local_domains_on_remote(options_ref,
                                                          remote_ip,
                                                          remote_pwd,
                                                          local_ip,
                                                          remote_user,
                                                          local_user,
                                                          local_pwd)
            logging.info("Status:%s", status)
            logging.info("Output:\n%s", output)
        else:
            if vm_ref:
                options_ref = "%s --%s" % (options_ref, vm_ref)
            result = virsh.dom_list(
                options_ref, ignore_status=True, print_info=True)
            status = result.exit_status
            output = result.stdout.strip()

    except Exception as output:
        status = True
        logging.error("Exception: %s" % output)

    finally:
        # Recover libvirtd service status
        if libvirtd == "off":
            utils_libvirtd.libvirtd_start()

        # Recover of domain
        if vm_ref == "transient" or list_ref == "--title":
            vm_backup.sync()
        elif vm_ref == "managed-save":
            # Recover saved guest.
            virsh.managedsave_remove(vm_name, ignore_status=True,
                                     print_info=True)

        # Check result
        status_error = (status_error == "no") and \
                       (addition_status_error == "no")
        if vm_ref == "managed-save":
            saved_output = re.search(vm_name + "\s+saved", output)
            if saved_output:
                output = saved_output.group(0)
            else:
                output = ""

        if not status_error:
            if not status and re.search(result_expected, output):
                raise exceptions.TestFail("Run successful with wrong command!")
        else:
            if status:
                raise exceptions.TestFail("Run failed with right command.")
            if not re.search(result_expected, output):
                raise exceptions.TestFail("Run successful but result is not"
                                          " expected.")
Пример #7
0
def run(test, params, env):
    """
    Test virtiofs filesystem device:

    1.Start guest with 1/2 virtiofs filesystem devices.
    2.Start 2 guest with the same virtiofs filesystem device.
    3.Coldplug/Coldunplug virtiofs filesystem device
    4.Share data between guests and host.
    5.Lifecycle for guest with virtiofs filesystem device.
    """
    def generate_expected_process_option(expected_results):
        """
        Generate expected virtiofsd process option
        """
        if cache_mode != "auto":
            expected_results = "cache=%s" % cache_mode
        if xattr == "on":
            expected_results += ",xattr"
        elif xattr == "off":
            expected_results += ",no_xattr"
        if flock == "on":
            expected_results += ",flock"
        else:
            expected_results += ",no_flock"
        if lock_posix == "on":
            expected_results += ",posix_lock"
        else:
            expected_results += ",no_posix_lock"
        logging.debug(expected_results)
        return expected_results

    def shared_data(vm_names, fs_devs):
        """
        Shared data between guests and host:
        1.Mount dir in guest;
        2.Write a file in guest;
        3.Check the md5sum value are the same in guests and host;
        """
        md5s = []
        for vm in vms:
            session = vm.wait_for_login()
            for fs_dev in fs_devs:
                logging.debug(fs_dev)
                mount_dir = '/var/tmp/' + fs_dev.target['dir']
                session.cmd('rm -rf %s' % mount_dir, ignore_all_errors=False)
                session.cmd('mkdir -p %s' % mount_dir)
                logging.debug("mount virtiofs dir in guest")
                cmd = "mount -t virtiofs %s %s" % (fs_dev.target['dir'],
                                                   mount_dir)
                status, output = session.cmd_status_output(cmd, timeout=300)
                if status != 0:
                    session.close()
                    test.fail("mount virtiofs dir failed: %s" % output)
                if vm == vms[0]:
                    filename_guest = mount_dir + '/' + vm.name
                    cmd = "dd if=/dev/urandom of=%s bs=1M count=512 oflag=direct" % filename_guest
                    status, output = session.cmd_status_output(cmd,
                                                               timeout=300)
                    if status != 0:
                        session.close()
                        test.fail("Write data failed: %s" % output)
                md5_value = session.cmd_status_output(
                    "md5sum %s" % filename_guest)[1].strip().split()[0]
                md5s.append(md5_value)
                logging.debug(md5_value)
                md5_value = process.run(
                    "md5sum %s" %
                    filename_guest).stdout_text.strip().split()[0]
                logging.debug(md5_value)
                md5s.append(md5_value)
            session.close()
        if len(set(md5s)) != len(fs_devs):
            test.fail("The md5sum value are not the same in guests and host")

    def launch_externally_virtiofs(source_dir, source_socket):
        """
        Launch externally virtiofs

        :param source_dir:  the dir shared on host
        :param source_socket: the socket file listened on
        """
        process.run('chcon -t virtd_exec_t %s' % path,
                    ignore_status=False,
                    shell=True)
        cmd = "systemd-run %s --socket-path=%s -o source=%s" % (
            path, source_socket, source_dir)
        try:
            process.run(cmd, ignore_status=False, shell=True)
            # Make sure the socket is created
            utils_misc.wait_for(lambda: os.path.isdir(source_socket),
                                timeout=3)
            process.run("chown qemu:qemu %s" % source_socket,
                        ignore_status=False)
            process.run('chcon -t svirt_image_t %s' % source_socket,
                        ignore_status=False,
                        shell=True)
        except Exception as err:
            cmd = "pkill virtiofsd"
            process.run(cmd, shell=True)
            test.fail("{}".format(err))

    def prepare_stress_script(script_path, script_content):
        """
        Refer to xfstest generic/531. Create stress test script to create a lot of unlinked files.

        :param source_path: The path of script
        :param content: The content of stress script
        """
        logging.debug("stress script path: %s content: %s" %
                      (script_path, script_content))
        script_lines = script_content.split(';')
        try:
            with open(script_path, 'w') as fd:
                fd.write('\n'.join(script_lines))
            os.chmod(script_path, 0o777)
        except Exception as e:
            test.error("Prepare the guest stress script failed %s" % e)

    def run_stress_script(session, script_path):
        """
        Run stress script in the guest

        :param session: guest session
        :param script_path: The path of script in the guest
        """
        # Set ULIMIT_NOFILE to increase the number of unlinked files
        session.cmd("ulimit -n 500000 && /usr/bin/python3 %s" % script_path,
                    timeout=120)

    def umount_fs(vm):
        """
        Unmount the filesystem in guest

        :param vm: filesystem in this vm that should be unmounted
        """
        if vm.is_alive():
            session = vm.wait_for_login()
            for fs_dev in fs_devs:
                mount_dir = '/var/tmp/' + fs_dev.target['dir']
                session.cmd('umount -f %s' % mount_dir, ignore_all_errors=True)
                session.cmd('rm -rf %s' % mount_dir, ignore_all_errors=True)
            session.close()

    def check_detached_xml(vm):
        """
        Check whether there is xml about the filesystem device
        in the vm xml

        :param vm: the vm to be checked
        """
        vmxml = vm_xml.VMXML.new_from_dumpxml(vm.name)
        filesystems = vmxml.devices.by_device_tag('filesystem')
        if filesystems:
            test.fail("There should be no filesystem devices in guest "
                      "xml after hotunplug")

    def check_filesystem_in_guest(vm, fs_dev):
        """
        Check whether there is virtiofs in vm

        :param vm: the vm to be checked
        :param fs_dev: the virtiofs device to be checked
        """
        session = vm.wait_for_login()
        mount_dir = '/var/tmp/' + fs_dev.target['dir']
        cmd = "mkdir %s; mount -t virtiofs %s %s" % (
            mount_dir, fs_dev.target['dir'], mount_dir)
        status, output = session.cmd_status_output(cmd, timeout=300)
        session.cmd('rm -rf %s' % mount_dir, ignore_all_errors=True)
        if not status:
            test.fail(
                "Mount virtiofs should failed after hotunplug device. %s" %
                output)
        session.close()

    start_vm = params.get("start_vm", "no")
    vm_names = params.get("vms", "avocado-vt-vm1").split()
    cache_mode = params.get("cache_mode", "none")
    xattr = params.get("xattr", "on")
    lock_posix = params.get("lock_posix", "on")
    flock = params.get("flock", "on")
    xattr = params.get("xattr", "on")
    path = params.get("virtiofsd_path", "/usr/libexec/virtiofsd")
    queue_size = int(params.get("queue_size", "512"))
    driver_type = params.get("driver_type", "virtiofs")
    guest_num = int(params.get("guest_num", "1"))
    fs_num = int(params.get("fs_num", "1"))
    vcpus_per_cell = int(params.get("vcpus_per_cell", 2))
    dir_prefix = params.get("dir_prefix", "mount_tag")
    error_msg_start = params.get("error_msg_start", "")
    error_msg_save = params.get("error_msg_save", "")
    status_error = params.get("status_error", "no") == "yes"
    socket_file_checking = params.get("socket_file_checking", "no") == "yes"
    suspend_resume = params.get("suspend_resume", "no") == "yes"
    managedsave = params.get("managedsave", "no") == "yes"
    coldplug = params.get("coldplug", "no") == "yes"
    hotplug_unplug = params.get("hotplug_unplug", "no") == "yes"
    detach_device_alias = params.get("detach_device_alias", "no") == "yes"
    extra_hugepages = params.get_numeric("extra_hugepages")
    edit_start = params.get("edit_start", "no") == "yes"
    with_hugepages = params.get("with_hugepages", "yes") == "yes"
    with_numa = params.get("with_numa", "yes") == "yes"
    with_memfd = params.get("with_memfd", "no") == "yes"
    source_socket = params.get("source_socket", "/var/tmp/vm001.socket")
    launched_mode = params.get("launched_mode", "auto")
    destroy_start = params.get("destroy_start", "no") == "yes"
    bug_url = params.get("bug_url", "")
    script_content = params.get("stress_script", "")
    stdio_handler_file = "file" == params.get("stdio_handler")

    fs_devs = []
    vms = []
    vmxml_backups = []
    expected_fails_msg = []
    expected_results = ""
    host_hp_size = utils_memory.get_huge_page_size()
    backup_huge_pages_num = utils_memory.get_num_huge_pages()
    huge_pages_num = 0

    if len(vm_names) != guest_num:
        test.cancel("This test needs exactly %d vms." % guest_num)

    if not libvirt_version.version_compare(7, 0, 0) and not with_numa:
        test.cancel("Not supported without NUMA before 7.0.0")

    if not libvirt_version.version_compare(7, 6, 0) and destroy_start:
        test.cancel("Bug %s is not fixed on current build" % bug_url)

    try:
        # Define filesystem device xml
        for index in range(fs_num):
            driver = {'type': driver_type, 'queue': queue_size}
            source_dir = os.path.join('/var/tmp/',
                                      str(dir_prefix) + str(index))
            logging.debug(source_dir)
            not os.path.isdir(source_dir) and os.mkdir(source_dir)
            target_dir = dir_prefix + str(index)
            source = {'socket': source_socket}
            target = {'dir': target_dir}
            if launched_mode == "auto":
                binary_keys = [
                    'path', 'cache_mode', 'xattr', 'lock_posix', 'flock'
                ]
                binary_values = [path, cache_mode, xattr, lock_posix, flock]
                binary_dict = dict(zip(binary_keys, binary_values))
                source = {'dir': source_dir}
                accessmode = "passthrough"
                fsdev_keys = [
                    'accessmode', 'driver', 'source', 'target', 'binary'
                ]
                fsdev_values = [
                    accessmode, driver, source, target, binary_dict
                ]
            else:
                fsdev_keys = ['driver', 'source', 'target']
                fsdev_values = [driver, source, target]
            fsdev_dict = dict(zip(fsdev_keys, fsdev_values))
            logging.debug(fsdev_dict)
            fs_dev = libvirt_device_utils.create_fs_xml(
                fsdev_dict, launched_mode)
            logging.debug(fs_dev)
            fs_devs.append(fs_dev)

        #Start guest with virtiofs filesystem device
        for index in range(guest_num):
            logging.debug("prepare vm %s", vm_names[index])
            vm = env.get_vm(vm_names[index])
            vms.append(vm)
            vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_names[index])
            vmxml_backup = vmxml.copy()
            vmxml_backups.append(vmxml_backup)
            if vmxml.max_mem < 1024000:
                vmxml.max_mem = 1024000
            if with_hugepages:
                huge_pages_num += vmxml.max_mem // host_hp_size + extra_hugepages
                utils_memory.set_num_huge_pages(huge_pages_num)
            vmxml.remove_all_device_by_type('filesystem')
            vmxml.sync()
            numa_no = None
            if with_numa:
                numa_no = vmxml.vcpu // vcpus_per_cell if vmxml.vcpu != 1 else 1
            vm_xml.VMXML.set_vm_vcpus(vmxml.vm_name,
                                      vmxml.vcpu,
                                      numa_number=numa_no)
            vm_xml.VMXML.set_memoryBacking_tag(vmxml.vm_name,
                                               access_mode="shared",
                                               hpgs=with_hugepages,
                                               memfd=with_memfd)
            vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_names[index])
            logging.debug(vmxml)
            if launched_mode == "externally":
                launch_externally_virtiofs(source_dir, source_socket)
            if coldplug:
                ret = virsh.attach_device(vm_names[index],
                                          fs_devs[0].xml,
                                          flagstr='--config',
                                          debug=True)
                utils_test.libvirt.check_exit_status(ret, expect_error=False)
            else:
                if not hotplug_unplug:
                    for fs in fs_devs:
                        vmxml.add_device(fs)
                        vmxml.sync()
            logging.debug(vmxml)
            libvirt_pcicontr.reset_pci_num(vm_names[index])
            result = virsh.start(vm_names[index], debug=True)
            if hotplug_unplug:
                if stdio_handler_file:
                    qemu_config = LibvirtQemuConfig()
                    qemu_config.stdio_handler = "file"
                    utils_libvirtd.Libvirtd().restart()
                for fs_dev in fs_devs:
                    ret = virsh.attach_device(vm_names[index],
                                              fs_dev.xml,
                                              ignore_status=True,
                                              debug=True)
                    libvirt.check_exit_status(ret, status_error)

                if status_error:
                    return
            if status_error and not managedsave:
                expected_error = error_msg_start
                utils_test.libvirt.check_exit_status(result, expected_error)
                return
            else:
                utils_test.libvirt.check_exit_status(result,
                                                     expect_error=False)
            expected_results = generate_expected_process_option(
                expected_results)
            if launched_mode == "auto":
                cmd = 'ps aux | grep virtiofsd | head -n 1'
                utils_test.libvirt.check_cmd_output(cmd,
                                                    content=expected_results)

        if managedsave:
            expected_error = error_msg_save
            result = virsh.managedsave(vm_names[0],
                                       ignore_status=True,
                                       debug=True)
            utils_test.libvirt.check_exit_status(result, expected_error)
        else:
            shared_data(vm_names, fs_devs)
            if suspend_resume:
                virsh.suspend(vm_names[0], debug=True, ignore_status=False)
                time.sleep(30)
                virsh.resume(vm_names[0], debug=True, ignore_statue=False)
            elif destroy_start:
                session = vm.wait_for_login(timeout=120)
                # Prepare the guest test script
                script_path = os.path.join(fs_devs[0].source["dir"], "test.py")
                script_content %= (fs_devs[0].source["dir"],
                                   fs_devs[0].source["dir"])
                prepare_stress_script(script_path, script_content)
                # Run guest stress script
                stress_script_thread = threading.Thread(
                    target=run_stress_script, args=(session, script_path))
                stress_script_thread.setDaemon(True)
                stress_script_thread.start()
                # Create a lot of unlink files
                time.sleep(60)
                virsh.destroy(vm_names[0], debug=True, ignore_status=False)
                ret = virsh.start(vm_names[0], debug=True)
                libvirt.check_exit_status(ret)
            elif edit_start:
                vmxml_virtio_backup = vm_xml.VMXML.new_from_inactive_dumpxml(
                    vm_names[0])
                if vm.is_alive():
                    virsh.destroy(vm_names[0])
                    cmd = "virt-xml %s --edit --qemu-commandline '\-foo'" % vm_names[
                        0]
                    cmd_result = process.run(cmd,
                                             ignore_status=True,
                                             shell=True)
                    logging.debug(virsh.dumpxml(vm_names[0]))
                    if cmd_result.exit_status:
                        test.error("virt-xml edit guest failed: %s" %
                                   cmd_result)
                    result = virsh.start(vm_names[0],
                                         ignore_status=True,
                                         debug=True)
                    if error_msg_start:
                        expected_fails_msg.append(error_msg_start)
                    utils_test.libvirt.check_result(
                        result, expected_fails=expected_fails_msg)
                    if not libvirt_version.version_compare(6, 10, 0):
                        # Because of bug #1897105, it was fixed in libvirt-6.10.0,
                        # before this version, need to recover the env manually.
                        cmd = "pkill virtiofsd"
                        process.run(cmd, shell=True)
                    if not vm.is_alive():
                        # Restoring vm and check if vm can start successfully
                        vmxml_virtio_backup.sync()
                        virsh.start(vm_names[0],
                                    ignore_status=False,
                                    shell=True)
            elif socket_file_checking:
                result = virsh.domid(vm_names[0])
                domid = result.stdout.strip()
                domain_dir = "var/lib/libvirt/qemu/domain-" + domid + '-' + vm_names[
                    0]
                if result.exit_status:
                    test.fail("Get domid failed.")
                    for fs_dev in fs_devs:
                        alias = fs_dev.alias['name']
                        expected_pid = domain_dir + alias + '-fs.pid'
                        expected_sock = alias + '-fs.sock'
                        status1 = process.run('ls -l %s' % expected_pid,
                                              shell=True).exit_status
                        status2 = process.run('ls -l %s' % expected_sock,
                                              shell=True).exit_status
                        if not (status1 and status2):
                            test.fail(
                                "The socket and pid file is not as expected")
            elif hotplug_unplug:
                for vm in vms:
                    umount_fs(vm)
                    for fs_dev in fs_devs:
                        if detach_device_alias:
                            alias = fs_dev.alias['name']
                            cmd = 'lsof /var/log/libvirt/qemu/%s-%s-virtiofsd.log' % (
                                vm.name, alias)
                            output = process.run(cmd).stdout_text.splitlines()
                            for item in output[1:]:
                                if stdio_handler_file:
                                    if item.split()[0] != "virtiofsd":
                                        test.fail(
                                            "When setting stdio_handler as file, the command"
                                            "to write log should be virtiofsd!"
                                        )
                                else:
                                    if item.split()[0] != "virtlogd":
                                        test.fail(
                                            "When setting stdio_handler as logd, the command"
                                            "to write log should be virtlogd!")
                            ret = virsh.detach_device_alias(
                                vm.name,
                                alias,
                                ignore_status=True,
                                debug=True,
                                wait_for_event=True,
                                event_timeout=10)
                        else:
                            ret = virsh.detach_device(vm.name,
                                                      fs_dev.xml,
                                                      ignore_status=True,
                                                      debug=True,
                                                      wait_for_event=True)
                        libvirt.check_exit_status(ret, status_error)
                        check_filesystem_in_guest(vm, fs_dev)
                    check_detached_xml(vm)
    finally:
        for vm in vms:
            if vm.is_alive():
                umount_fs(vm)
                vm.destroy(gracefully=False)
        for vmxml_backup in vmxml_backups:
            vmxml_backup.sync()
        for index in range(fs_num):
            process.run('rm -rf %s' % '/var/tmp/' + str(dir_prefix) +
                        str(index),
                        ignore_status=False)
            process.run('rm -rf %s' % source_socket,
                        ignore_status=False,
                        shell=True)
        if launched_mode == "externally":
            process.run('restorecon %s' % path,
                        ignore_status=False,
                        shell=True)
        utils_memory.set_num_huge_pages(backup_huge_pages_num)
        if stdio_handler_file:
            qemu_config.restore()
            utils_libvirtd.Libvirtd().restart()
Пример #8
0
                bash_cmd % (managed_save_file, managed_save_file, "0", flags),
                flags)
            # Wait for VM in running state
            wait_for_state("running")
        elif test_libvirt_guests:
            logging.debug("libvirt-guests status: %s", libvirt_guests.status())
            if multi_guests:
                check_multi_guests(multi_guests, start_delay, libvirt_guests)

            if check_flags:
                check_guest_flags(bash_cmd, flags)

        else:
            # Ensure VM is running
            utils_misc.wait_for(lambda: vm.state() == "running", 10)
            ret = virsh.managedsave(vm_ref, options=option, ignore_status=True)
            status = ret.exit_status
            # The progress information outputed in error message
            error_msg = ret.stderr.strip()
            if move_saved_file:
                cmd = "echo > %s" % managed_save_file
                utils.run(cmd)

            # recover libvirtd service start
            if libvirtd_state == "off":
                libvirtd.start()

            if status_error:
                if not status:
                    raise error.TestFail(
                        "Run successfully with wrong command!")
Пример #9
0
def run(test, params, env):
    """
    Test virsh undefine command.

    Undefine an inactive domain, or convert persistent to transient.
    1.Prepare test environment.
    2.Backup the VM's information to a xml file.
    3.When the libvirtd == "off", stop the libvirtd service.
    4.Perform virsh undefine operation.
    5.Recover test environment.(libvirts service,VM)
    6.Confirm the test result.
    """

    vm_ref = params.get("undefine_vm_ref", "vm_name")
    extra = params.get("undefine_extra", "")
    option = params.get("undefine_option", "")
    libvirtd_state = params.get("libvirtd", "on")
    status_error = ("yes" == params.get("status_error", "no"))
    undefine_twice = ("yes" == params.get("undefine_twice", 'no'))
    local_ip = params.get("local_ip", "LOCAL.EXAMPLE.COM")
    remote_ip = params.get("remote_ip", "REMOTE.EXAMPLE.COM")
    remote_user = params.get("remote_user", "user")
    remote_pwd = params.get("remote_pwd", "password")
    remote_prompt = params.get("remote_prompt", "#")
    pool_type = params.get("pool_type")
    pool_name = params.get("pool_name", "test")
    pool_target = params.get("pool_target")
    volume_size = params.get("volume_size", "1G")
    vol_name = params.get("vol_name", "test_vol")
    emulated_img = params.get("emulated_img", "emulated_img")
    emulated_size = "%sG" % (int(volume_size[:-1]) + 1)
    disk_target = params.get("disk_target", "vdb")
    wipe_data = "yes" == params.get("wipe_data", "no")
    if wipe_data:
        option += " --wipe-storage"

    vm_name = params.get("main_vm", "virt-tests-vm1")
    vm = env.get_vm(vm_name)
    vm_id = vm.get_id()
    vm_uuid = vm.get_uuid()

    # polkit acl related params
    uri = params.get("virsh_uri")
    unprivileged_user = params.get('unprivileged_user')
    if unprivileged_user:
        if unprivileged_user.count('EXAMPLE'):
            unprivileged_user = '******'

    if not libvirt_version.version_compare(1, 1, 1):
        if params.get('setup_libvirt_polkit') == 'yes':
            raise error.TestNAError("API acl test not supported in current"
                                    " libvirt version.")

    # Back up xml file.Xen host has no guest xml file to define a guset.
    backup_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)

    # Confirm how to reference a VM.
    if vm_ref == "vm_name":
        vm_ref = vm_name
    elif vm_ref == "id":
        vm_ref = vm_id
    elif vm_ref == "hex_vm_id":
        vm_ref = hex(int(vm_id))
    elif vm_ref == "uuid":
        vm_ref = vm_uuid
    elif vm_ref.find("invalid") != -1:
        vm_ref = params.get(vm_ref)

    volume = None
    pvtest = None
    status3 = None

    try:
        save_file = "/var/lib/libvirt/qemu/save/%s.save" % vm_name
        if option.count("managedsave") and vm.is_alive():
            virsh.managedsave(vm_name)

        if not vm.is_lxc():
            snp_list = virsh.snapshot_list(vm_name)
            if option.count("snapshot"):
                snp_file_list = []
                if not len(snp_list):
                    virsh.snapshot_create(vm_name)
                    logging.debug("Create a snapshot for test!")
                else:
                    # Backup snapshots for domain
                    for snp_item in snp_list:
                        tmp_file = os.path.join(test.tmpdir, snp_item + ".xml")
                        virsh.snapshot_dumpxml(vm_name, snp_item, to_file=tmp_file)
                        snp_file_list.append(tmp_file)
            else:
                if len(snp_list):
                    raise error.TestNAError("This domain has snapshot(s), "
                                            "cannot be undefined!")
        if option.count("remove-all-storage"):
            pvtest = utlv.PoolVolumeTest(test, params)
            pvtest.pre_pool(pool_name, pool_type, pool_target, emulated_img,
                            emulated_size=emulated_size)
            new_pool = libvirt_storage.PoolVolume(pool_name)
            if not new_pool.create_volume(vol_name, volume_size):
                raise error.TestFail("Creation of volume %s failed." % vol_name)
            volumes = new_pool.list_volumes()
            volume = volumes[vol_name]
            virsh.attach_disk(vm_name, volume, disk_target, "--config")

        # Turn libvirtd into certain state.
        if libvirtd_state == "off":
            utils_libvirtd.libvirtd_stop()

        # Test virsh undefine command.
        output = ""
        if vm_ref != "remote":
            vm_ref = "%s %s" % (vm_ref, extra)
            cmdresult = virsh.undefine(vm_ref, option,
                                       unprivileged_user=unprivileged_user,
                                       uri=uri,
                                       ignore_status=True, debug=True)
            status = cmdresult.exit_status
            output = cmdresult.stdout.strip()
            if status:
                logging.debug("Error status, command output: %s",
                              cmdresult.stderr.strip())
            if undefine_twice:
                status2 = virsh.undefine(vm_ref,
                                         ignore_status=True).exit_status
        else:
            if remote_ip.count("EXAMPLE.COM") or local_ip.count("EXAMPLE.COM"):
                raise error.TestNAError("remote_ip and/or local_ip parameters"
                                        " not changed from default values")
            try:
                uri = libvirt_vm.complete_uri(local_ip)
                session = remote.remote_login("ssh", remote_ip, "22",
                                              remote_user, remote_pwd,
                                              remote_prompt)
                cmd_undefine = "virsh -c %s undefine %s" % (uri, vm_name)
                status, output = session.cmd_status_output(cmd_undefine)
                logging.info("Undefine output: %s", output)
            except (error.CmdError, remote.LoginError, aexpect.ShellError), de:
                logging.error("Detail: %s", de)
                status = 1

        # Recover libvirtd state.
        if libvirtd_state == "off":
            utils_libvirtd.libvirtd_start()

        # Shutdown VM.
        if virsh.domain_exists(vm.name):
            try:
                if vm.is_alive():
                    vm.destroy(gracefully=False)
            except error.CmdError, detail:
                logging.error("Detail: %s", detail)
Пример #10
0
def run(test, params, env):
    """
    Test rbd disk device.

    1.Prepare test environment,destroy or suspend a VM.
    2.Prepare disk image.
    3.Edit disks xml and start the domain.
    4.Perform test operation.
    5.Recover test environment.
    """
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    virsh_dargs = {'debug': True, 'ignore_status': True}
    # Global variable to store max/current memory,
    # it may change after attach/detach
    new_max_mem = None
    new_cur_mem = None

    def consume_vm_mem(size=1000, timeout=360):
        """
        To consume guest memory, default size is 1000M
        """
        session = vm.wait_for_login()
        # Mount tmpfs on /mnt and write to a file on it,
        # it is the memory operation
        sh_cmd = ("swapoff -a; mount -t tmpfs -o size={0}M tmpfs "
                  "/mnt; dd if=/dev/urandom of=/mnt/test bs=1M"
                  " count={0}".format(size))
        session.cmd(sh_cmd, timeout=timeout)
        session.close()

    def mount_hugepages(page_size):
        """
        To mount hugepages

        :param page_size: unit is kB, it can be 4,2048,1048576,etc
        """
        if page_size == 4:
            perm = ""
        else:
            perm = "pagesize=%dK" % page_size

        tlbfs_status = utils_misc.is_mounted("hugetlbfs", "/dev/hugepages",
                                             "hugetlbfs")
        if tlbfs_status:
            utils_misc.umount("hugetlbfs", "/dev/hugepages", "hugetlbfs")
        utils_misc.mount("hugetlbfs", "/dev/hugepages", "hugetlbfs", perm)

    def setup_hugepages(page_size=2048, shp_num=2000):
        """
        To setup hugepages

        :param page_size: unit is kB, it can be 4,2048,1048576,etc
        :param shp_num: number of hugepage, string type
        """
        mount_hugepages(page_size)
        utils_memory.set_num_huge_pages(shp_num)
        config.hugetlbfs_mount = ["/dev/hugepages"]
        utils_libvirtd.libvirtd_restart()

    def restore_hugepages(page_size=4):
        """
        To recover hugepages
        :param page_size: unit is kB, it can be 4,2048,1048576,etc
        """
        mount_hugepages(page_size)
        config.restore()
        utils_libvirtd.libvirtd_restart()

    def check_qemu_cmd(max_mem_rt, tg_size):
        """
        Check qemu command line options.
        :param max_mem_rt: size of max memory
        :param tg_size: Target hotplug memory size
        :return: None
        """
        cmd = ("ps -ef | grep %s | grep -v grep " % vm_name)
        if max_mem_rt:
            cmd += (" | grep 'slots=%s,maxmem=%sk'"
                    % (max_mem_slots, max_mem_rt))
        if tg_size:
            size = int(tg_size) * 1024
            cmd_str = 'memdimm.\|memory-backend-ram,id=ram-node.'
            cmd += (" | grep 'memory-backend-ram,id=%s' | grep 'size=%s"
                    % (cmd_str, size))
            if pg_size:
                cmd += ",host-nodes=%s" % node_mask
                if numa_memnode:
                    for node in numa_memnode:
                        if ('nodeset' in node and
                                node['nodeset'] in node_mask):
                            cmd += ",policy=%s" % node['mode']
                cmd += ".*pc-dimm,node=%s" % tg_node
            if mem_addr:
                cmd += (".*slot=%s,addr=%s" %
                        (mem_addr['slot'], int(mem_addr['base'], 16)))
            cmd += "'"
        # Run the command
        result = process.run(cmd, shell=True, verbose=True, ignore_status=True)
        if result.exit_status:
            test.fail('Qemu command check fail.')

    def check_guest_meminfo(old_mem, check_option):
        """
        Check meminfo on guest.
        """
        assert old_mem is not None
        session = vm.wait_for_login()
        # Hot-plugged memory should be online by udev rules
        udev_file = "/lib/udev/rules.d/80-hotplug-cpu-mem.rules"
        udev_rules = ('SUBSYSTEM=="memory", ACTION=="add", TEST=="state",'
                      ' ATTR{state}=="offline", ATTR{state}="online"')
        cmd = ("grep memory %s || echo '%s' >> %s"
               % (udev_file, udev_rules, udev_file))
        session.cmd(cmd)
        # Wait a while for new memory to be detected.
        utils_misc.wait_for(
            lambda: vm.get_totalmem_sys(online) != int(old_mem), 30, first=20.0)
        new_mem = vm.get_totalmem_sys(online)
        session.close()
        logging.debug("Memtotal on guest: %s", new_mem)
        no_of_times = 1
        if at_times:
            no_of_times = at_times
        if check_option == "attach":
            if new_mem != int(old_mem) + (int(tg_size) * no_of_times):
                test.fail("Total memory on guest couldn't changed after "
                          "attach memory device")

        if check_option == "detach":
            if new_mem != int(old_mem) - (int(tg_size) * no_of_times):
                test.fail("Total memory on guest couldn't changed after "
                          "detach memory device")

    def check_dom_xml(at_mem=False, dt_mem=False):
        """
        Check domain xml options.
        """
        # Global variable to store max/current memory
        global new_max_mem
        global new_cur_mem
        if attach_option.count("config"):
            dom_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
        else:
            dom_xml = vm_xml.VMXML.new_from_dumpxml(vm_name)
        try:
            xml_max_mem_rt = int(dom_xml.max_mem_rt)
            xml_max_mem = int(dom_xml.max_mem)
            xml_cur_mem = int(dom_xml.current_mem)
            assert int(max_mem_rt) == xml_max_mem_rt

            # Check attached/detached memory
            if at_mem:
                if at_times:
                    assert int(max_mem) + (int(tg_size) *
                                           at_times) == xml_max_mem
                else:
                    assert int(max_mem) + int(tg_size) == xml_max_mem
                # Bug 1220702, skip the check for current memory
                if at_times:
                    assert int(cur_mem) + (int(tg_size) *
                                           at_times) == xml_cur_mem
                else:
                    assert int(cur_mem) + int(tg_size) == xml_cur_mem
                new_max_mem = xml_max_mem
                new_cur_mem = xml_cur_mem
                mem_dev = dom_xml.get_devices("memory")
                memory_devices = 1
                if at_times:
                    memory_devices = at_times
                if len(mem_dev) != memory_devices:
                    test.fail("Found wrong number of memory device")
                assert int(tg_size) == int(mem_dev[0].target.size)
                assert int(tg_node) == int(mem_dev[0].target.node)
            elif dt_mem:
                if at_times:
                    assert int(new_max_mem) - (int(tg_size) *
                                               at_times) == xml_max_mem
                    assert int(new_cur_mem) - (int(tg_size) *
                                               at_times) == xml_cur_mem
                else:
                    assert int(new_max_mem) - int(tg_size) == xml_max_mem
                    # Bug 1220702, skip the check for current memory
                    assert int(new_cur_mem) - int(tg_size) == xml_cur_mem
        except AssertionError:
            utils_misc.log_last_traceback()
            test.fail("Found unmatched memory setting from domain xml")

    def check_mem_align():
        """
        Check if set memory align to 256
        """
        dom_xml = vm_xml.VMXML.new_from_dumpxml(vm_name)
        dom_mem = {}
        dom_mem['maxMemory'] = int(dom_xml.max_mem_rt)
        dom_mem['memory'] = int(dom_xml.memory)
        dom_mem['currentMemory'] = int(dom_xml.current_mem)

        cpuxml = dom_xml.cpu
        numa_cell = cpuxml.numa_cell
        dom_mem['numacellMemory'] = int(numa_cell[0]['memory'])
        sum_numa_mem = sum([int(cell['memory']) for cell in numa_cell])

        attached_mem = dom_xml.get_devices(device_type='memory')[0]
        dom_mem['attached_mem'] = attached_mem.target.size

        all_align = True
        for key in dom_mem:
            logging.info('%-20s:%15d', key, dom_mem[key])
            if dom_mem[key] % 256:
                logging.error('%s not align to 256', key)
                all_align = False

        if not all_align:
            test.fail('Memory not align to 256')

        if dom_mem['memory'] == sum_numa_mem + dom_mem['attached_mem']:
            logging.info('Check Pass: Memory is equal to (all numa memory + memory device)')
        else:
            test.fail('Memory is not equal to (all numa memory + memory device)')

        return dom_mem

    def check_save_restore():
        """
        Test save and restore operation
        """
        save_file = os.path.join(data_dir.get_tmp_dir(),
                                 "%s.save" % vm_name)
        ret = virsh.save(vm_name, save_file, **virsh_dargs)
        libvirt.check_exit_status(ret)
        ret = virsh.restore(save_file, **virsh_dargs)
        libvirt.check_exit_status(ret)
        if os.path.exists(save_file):
            os.remove(save_file)
        # Login to check vm status
        vm.wait_for_login().close()

    def add_device(dev_xml, attach, at_error=False):
        """
        Add memory device by attachment or modify domain xml.
        """
        if attach:
            ret = virsh.attach_device(vm_name, dev_xml.xml,
                                      flagstr=attach_option,
                                      debug=True)
            libvirt.check_exit_status(ret, at_error)
        else:
            vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm.name)
            if numa_cells:
                del vmxml.max_mem
                del vmxml.current_mem
            vmxml.add_device(dev_xml)
            vmxml.sync()

    def modify_domain_xml():
        """
        Modify domain xml and define it.
        """
        vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm.name)
        mem_unit = params.get("mem_unit", "KiB")
        vcpu = params.get("vcpu", "4")
        if max_mem_rt:
            vmxml.max_mem_rt = int(max_mem_rt)
            vmxml.max_mem_rt_slots = max_mem_slots
            vmxml.max_mem_rt_unit = mem_unit
        if memory_val:
            vmxml.memory = int(memory_val)
        if vcpu:
            vmxml.vcpu = int(vcpu)
            vcpu_placement = params.get("vcpu_placement", "static")
            vmxml.placement = vcpu_placement
        if numa_memnode:
            vmxml.numa_memory = {}
            vmxml.numa_memnode = numa_memnode
        else:
            try:
                del vmxml.numa_memory
                del vmxml.numa_memnode
            except Exception:
                # Not exists
                pass

        if numa_cells:
            cells = [ast.literal_eval(x) for x in numa_cells]
            # Rounding the numa memory values
            if align_mem_values:
                for cell in range(cells.__len__()):
                    memory_value = str(utils_numeric.align_value(
                        cells[cell]["memory"],
                        align_to_value))
                    cells[cell]["memory"] = memory_value
            cpu_xml = vm_xml.VMCPUXML()
            cpu_xml.xml = "<cpu><numa/></cpu>"
            cpu_mode = params.get("cpu_mode")
            model_fallback = params.get("model_fallback")
            if cpu_mode:
                cpu_xml.mode = cpu_mode
            if model_fallback:
                cpu_xml.fallback = model_fallback
            cpu_xml.numa_cell = cells
            vmxml.cpu = cpu_xml
            # Delete memory and currentMemory tag,
            # libvirt will fill it automatically
            del vmxml.max_mem
            del vmxml.current_mem

        # hugepages setting
        if huge_pages:
            membacking = vm_xml.VMMemBackingXML()
            hugepages = vm_xml.VMHugepagesXML()
            pagexml_list = []
            for i in range(len(huge_pages)):
                pagexml = hugepages.PageXML()
                pagexml.update(huge_pages[i])
                pagexml_list.append(pagexml)
            hugepages.pages = pagexml_list
            membacking.hugepages = hugepages
            vmxml.mb = membacking

        logging.debug("vm xml: %s", vmxml)
        vmxml.sync()

    pre_vm_state = params.get("pre_vm_state", "running")
    attach_device = "yes" == params.get("attach_device", "no")
    detach_device = "yes" == params.get("detach_device", "no")
    attach_error = "yes" == params.get("attach_error", "no")
    start_error = "yes" == params.get("start_error", "no")
    detach_error = "yes" == params.get("detach_error", "no")
    maxmem_error = "yes" == params.get("maxmem_error", "no")
    attach_option = params.get("attach_option", "")
    test_qemu_cmd = "yes" == params.get("test_qemu_cmd", "no")
    test_managedsave = "yes" == params.get("test_managedsave", "no")
    test_save_restore = "yes" == params.get("test_save_restore", "no")
    test_mem_binding = "yes" == params.get("test_mem_binding", "no")
    restart_libvirtd = "yes" == params.get("restart_libvirtd", "no")
    add_mem_device = "yes" == params.get("add_mem_device", "no")
    test_dom_xml = "yes" == params.get("test_dom_xml", "no")
    max_mem = params.get("max_mem")
    max_mem_rt = params.get("max_mem_rt")
    max_mem_slots = params.get("max_mem_slots", "16")
    memory_val = params.get('memory_val', '')
    mem_align = 'yes' == params.get('mem_align', 'no')
    hot_plug = 'yes' == params.get('hot_plug', 'no')
    cur_mem = params.get("current_mem")
    numa_cells = params.get("numa_cells", "").split()
    set_max_mem = params.get("set_max_mem")
    align_mem_values = "yes" == params.get("align_mem_values", "no")
    align_to_value = int(params.get("align_to_value", "65536"))
    hot_reboot = "yes" == params.get("hot_reboot", "no")
    rand_reboot = "yes" == params.get("rand_reboot", "no")
    guest_known_unplug_errors = []
    guest_known_unplug_errors.append(params.get("guest_known_unplug_errors"))
    host_known_unplug_errors = []
    host_known_unplug_errors.append(params.get("host_known_unplug_errors"))

    # params for attached device
    mem_model = params.get("mem_model", "dimm")
    tg_size = params.get("tg_size")
    tg_sizeunit = params.get("tg_sizeunit", 'KiB')
    tg_node = params.get("tg_node", 0)
    pg_size = params.get("page_size")
    pg_unit = params.get("page_unit", "KiB")
    node_mask = params.get("node_mask", "0")
    mem_addr = ast.literal_eval(params.get("memory_addr", "{}"))
    huge_pages = [ast.literal_eval(x)
                  for x in params.get("huge_pages", "").split()]
    numa_memnode = [ast.literal_eval(x)
                    for x in params.get("numa_memnode", "").split()]
    at_times = int(params.get("attach_times", 1))
    online = params.get("mem_online", "no")

    config = utils_config.LibvirtQemuConfig()
    setup_hugepages_flag = params.get("setup_hugepages")
    if (setup_hugepages_flag == "yes"):
        setup_hugepages(int(pg_size))

    # Back up xml file.
    vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)

    if not libvirt_version.version_compare(1, 2, 14):
        test.cancel("Memory hotplug not supported in current libvirt version.")

    if 'align_256m' in params.get('name', ''):
        arch = platform.machine()
        if arch.lower() != 'ppc64le':
            test.cancel('This case is for ppc64le only.')

    if align_mem_values:
        # Rounding the following values to 'align'
        max_mem = utils_numeric.align_value(max_mem, align_to_value)
        max_mem_rt = utils_numeric.align_value(max_mem_rt, align_to_value)
        cur_mem = utils_numeric.align_value(cur_mem, align_to_value)
        tg_size = utils_numeric.align_value(tg_size, align_to_value)

    try:
        # Drop caches first for host has enough memory
        drop_caches()
        # Destroy domain first
        if vm.is_alive():
            vm.destroy(gracefully=False)
        modify_domain_xml()

        # Start the domain any way if attach memory device
        old_mem_total = None
        if attach_device:
            vm.start()
            session = vm.wait_for_login()
            old_mem_total = vm.get_totalmem_sys(online)
            logging.debug("Memtotal on guest: %s", old_mem_total)
            session.close()
        dev_xml = None

        # To attach the memory device.
        if add_mem_device and not hot_plug:
            at_times = int(params.get("attach_times", 1))
            dev_xml = utils_hotplug.create_mem_xml(tg_size, pg_size, mem_addr,
                                                   tg_sizeunit, pg_unit, tg_node,
                                                   node_mask, mem_model)
            randvar = 0
            rand_value = random.randint(15, 25)
            logging.debug("reboots at %s", rand_value)
            for x in xrange(at_times):
                # If any error excepted, command error status should be
                # checked in the last time
                randvar = randvar + 1
                logging.debug("attaching device count = %s", x)
                if x == at_times - 1:
                    add_device(dev_xml, attach_device, attach_error)
                else:
                    add_device(dev_xml, attach_device)
                if hot_reboot:
                    vm.reboot()
                    vm.wait_for_login()
                if rand_reboot and randvar == rand_value:
                    randvar = 0
                    rand_value = random.randint(15, 25)
                    logging.debug("reboots at %s", rand_value)
                    vm.reboot()
                    vm.wait_for_login()

        # Check domain xml after attach device.
        if test_dom_xml:
            check_dom_xml(at_mem=attach_device)

        # Set domain state
        if pre_vm_state == "transient":
            logging.info("Creating %s...", vm_name)
            vmxml_for_test = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
            if vm.is_alive():
                vm.destroy(gracefully=False)
            vm.undefine()
            if virsh.create(vmxml_for_test.xml,
                            **virsh_dargs).exit_status:
                vmxml_backup.define()
                test.fail("Cann't create the domain")
        elif vm.is_dead():
            try:
                vm.start()
                vm.wait_for_login().close()
            except virt_vm.VMStartError as detail:
                if start_error:
                    pass
                else:
                    except_msg = "memory hotplug isn't supported by this QEMU binary"
                    if except_msg in detail.reason:
                        test.cancel(detail)
                    test.fail(detail)

        # Set memory operation
        if set_max_mem:
            max_mem_option = params.get("max_mem_option", "")
            ret = virsh.setmaxmem(vm_name, set_max_mem,
                                  flagstr=max_mem_option)
            libvirt.check_exit_status(ret, maxmem_error)

        # Hotplug memory device
        if add_mem_device and hot_plug:
            process.run('ps -ef|grep qemu', shell=True, verbose=True)
            session = vm.wait_for_login()
            original_mem = vm.get_totalmem_sys()
            dev_xml = utils_hotplug.create_mem_xml(tg_size, pg_size,
                                                   mem_addr, tg_sizeunit,
                                                   pg_unit, tg_node,
                                                   node_mask, mem_model)
            add_device(dev_xml, True)
            mem_after = vm.get_totalmem_sys()
            params['delta'] = mem_after - original_mem

        # Check domain xml after start the domain.
        if test_dom_xml:
            check_dom_xml(at_mem=attach_device)

        if mem_align:
            dom_mem = check_mem_align()
            check_qemu_cmd(dom_mem['maxMemory'], dom_mem['attached_mem'])
            if hot_plug and params['delta'] != dom_mem['attached_mem']:
                test.fail('Memory after attach not equal to original mem + attached mem')

        # Check qemu command line
        if test_qemu_cmd:
            check_qemu_cmd(max_mem_rt, tg_size)

        # Check guest meminfo after attachment
        if (attach_device and not attach_option.count("config") and
                not any([attach_error, start_error])):
            check_guest_meminfo(old_mem_total, check_option="attach")

        # Consuming memory on guest,
        # to verify memory changes by numastat
        if test_mem_binding:
            pid = vm.get_pid()
            old_numastat = read_from_numastat(pid, "Total")
            logging.debug("Numastat: %s", old_numastat)
            consume_vm_mem()
            new_numastat = read_from_numastat(pid, "Total")
            logging.debug("Numastat: %s", new_numastat)
            # Only check total memory which is the last element
            if float(new_numastat[-1]) - float(old_numastat[-1]) < 0:
                test.fail("Numa memory can't be consumed on guest")

        # Run managedsave command to check domain xml.
        if test_managedsave:
            ret = virsh.managedsave(vm_name, **virsh_dargs)
            libvirt.check_exit_status(ret)
            vm.start()
            vm.wait_for_login().close()
            if test_dom_xml:
                check_dom_xml(at_mem=attach_device)

        # Run save and restore command to check domain xml
        if test_save_restore:
            check_save_restore()
            if test_dom_xml:
                check_dom_xml(at_mem=attach_device)

        # Check domain xml after restarting libvirtd
        if restart_libvirtd:
            libvirtd = utils_libvirtd.Libvirtd()
            libvirtd.restart()
            if test_dom_xml:
                check_dom_xml(at_mem=attach_device)

        # Detach the memory device
        unplug_failed_with_known_error = False
        if detach_device:
            if not dev_xml:
                dev_xml = utils_hotplug.create_mem_xml(tg_size, pg_size,
                                                       mem_addr, tg_sizeunit,
                                                       pg_unit, tg_node,
                                                       node_mask, mem_model)
            for x in xrange(at_times):
                ret = virsh.detach_device(vm_name, dev_xml.xml,
                                          flagstr=attach_option)
                if ret.stderr and host_known_unplug_errors:
                    for known_error in host_known_unplug_errors:
                        if (known_error[0] == known_error[-1]) and \
                           known_error.startswith(("'")):
                            known_error = known_error[1:-1]
                        if known_error in ret.stderr:
                            unplug_failed_with_known_error = True
                            logging.debug("Known error occured in Host, while"
                                          " hot unplug: %s", known_error)
                if unplug_failed_with_known_error:
                    break
                try:
                    libvirt.check_exit_status(ret, detach_error)
                except Exception as detail:
                    dmesg_file = tempfile.mktemp(dir=data_dir.get_tmp_dir())
                    try:
                        session = vm.wait_for_login()
                        utils_misc.verify_dmesg(dmesg_log_file=dmesg_file,
                                                ignore_result=True,
                                                session=session, level_check=5)
                    except Exception:
                        session.close()
                        test.fail("After memory unplug Unable to connect to VM"
                                  " or unable to collect dmesg")
                    session.close()
                    if os.path.exists(dmesg_file):
                        with open(dmesg_file, 'r') as f:
                            flag = re.findall(
                                r'memory memory\d+?: Offline failed', f.read())
                        if not flag:
                            # The attached memory is used by vm, and it could not be unplugged
                            # The result is expected
                            os.remove(dmesg_file)
                            test.fail(detail)
                        unplug_failed_with_known_error = True
                        os.remove(dmesg_file)
            # Check whether a known error occured or not
            dmesg_file = tempfile.mktemp(dir=data_dir.get_tmp_dir())
            try:
                session = vm.wait_for_login()
                utils_misc.verify_dmesg(dmesg_log_file=dmesg_file,
                                        ignore_result=True, session=session,
                                        level_check=4)
            except Exception:
                session.close()
                test.fail("After memory unplug Unable to connect to VM"
                          " or unable to collect dmesg")
            session.close()
            if guest_known_unplug_errors and os.path.exists(dmesg_file):
                for known_error in guest_known_unplug_errors:
                    if (known_error[0] == known_error[-1]) and \
                       known_error.startswith(("'")):
                        known_error = known_error[1:-1]
                    with open(dmesg_file, 'r') as f:
                        if known_error in f.read():
                            unplug_failed_with_known_error = True
                            logging.debug("Known error occured, while hot unplug"
                                          ": %s", known_error)
            if test_dom_xml and not unplug_failed_with_known_error:
                check_dom_xml(dt_mem=detach_device)
                # Remove dmesg temp file
                if os.path.exists(dmesg_file):
                    os.remove(dmesg_file)

    finally:
        # Delete snapshots.
        snapshot_lists = virsh.snapshot_list(vm_name)
        if len(snapshot_lists) > 0:
            libvirt.clean_up_snapshots(vm_name, snapshot_lists)
            for snap in snapshot_lists:
                virsh.snapshot_delete(vm_name, snap, "--metadata")

        # Recover VM.
        if vm.is_alive():
            vm.destroy(gracefully=False)
        logging.info("Restoring vm...")
        if (setup_hugepages_flag == "yes"):
            restore_hugepages()
        vmxml_backup.sync()
Пример #11
0
def run(test, params, env):
    """
    Test mtu feature from virtual network
    """
    vm_name = params.get('main_vm')
    vm = env.get_vm(vm_name)
    mtu_type = params.get('mtu_type')
    mtu_size = params.get('mtu_size', '')
    net = params.get('net', DEFAULT_NET)
    net_type = params.get('net_type', '')
    with_iface = 'yes' == params.get('with_iface', 'no')
    with_net = 'yes' == params.get('with_net', 'no')
    status_error = 'yes' == params.get('status_error', 'no')
    check = params.get('check', '')
    error_msg = params.get('error_msg', '')
    bridge_name = 'br_mtu' + utils_misc.generate_random_string(3)
    add_pkg = params.get('add_pkg', '')
    model = params.get('model', 'virtio')

    def set_network(size, net='default'):
        """
        Set mtu size to a certain network
        """
        logging.info('Set mtu size of network "%s" to %s', net, size)
        default_xml = NetworkXML.new_from_net_dumpxml(net)
        default_xml.mtu = size
        default_xml.sync()
        logging.debug(virsh.net_dumpxml(net))

    def set_interface(mtu_size='', source_network='default',
                      iface_type='network', iface_model='virtio'):
        """
        Set mtu size to a certain interface
        """
        interface_type = 'bridge' if iface_type in ('bridge', 'openvswitch') else iface_type
        iface_dict = {
            'type': interface_type,
            'source': "{'%s': '%s'}" % (interface_type, source_network),
            'model': iface_model
        }

        if iface_type == 'openvswitch':
            iface_dict.update({'virtualport_type': 'openvswitch'})

        if mtu_size:
            iface_dict.update({'mtu': "{'size': %s}" % mtu_size})

        libvirt.modify_vm_iface(vm_name, 'update_iface', iface_dict)
        logging.debug(virsh.dumpxml(vm_name).stdout)

    def get_default_if():
        """
        Get default interface that is using by vm
        """
        ifaces = utils_net.get_sorted_net_if()
        logging.debug('Interfaces on host: %s', ifaces)
        for iface in ifaces[0]:
            if 'Link detected: yes' in process.run('ethtool %s' % iface).stdout_text:
                logging.debug('Found host interface "%s"', iface)
                return iface

    def create_bridge():
        """
        Create a bridge on host for test
        """
        cmd_create_br = 'nmcli con add type bridge con-name %s ifname %s'
        con_name = 'con_' + utils_misc.generate_random_string(3)
        bridge_name = 'br_' + utils_misc.generate_random_string(3)
        process.run(cmd_create_br % (con_name, bridge_name), verbose=True)
        return con_name, bridge_name

    def create_network_xml(name, network_type, base_if='', **kwargs):
        """
        Create a network xml to be defined
        """
        m_net = NetworkXML(name)
        m_net.forward = {'mode': 'bridge'}
        if network_type in ('bridge', 'openvswitch'):
            m_net.bridge = {'name': kwargs['bridge_name']}
        elif network_type == 'macvtap':
            if base_if:
                m_net.forward_interface = [{'dev': base_if}]
        if network_type == 'openvswitch':
            m_net.virtualport_type = 'openvswitch'
        if 'mtu' in kwargs:
            m_net.mtu = kwargs['mtu']
        logging.debug(m_net)
        return m_net.xml

    def create_iface(iface_type, **kwargs):
        """
        Create a interface to be attached to vm
        """
        m_iface = Interface(iface_type)
        m_iface.mac_address = utils_net.generate_mac_address_simple()
        if 'base_if' in kwargs:
            m_iface.source = {'dev': kwargs['base_if'],
                              'mode': 'vepa'}
        if 'source_net' in kwargs:
            m_iface.source = {'network': kwargs['source_net']}
        if 'mtu' in kwargs:
            m_iface.mtu = {'size': kwargs['mtu']}
        if 'model_net' in kwargs:
            m_iface.model = kwargs['model_net']
        logging.debug(m_iface.get_xml())
        logging.debug(m_iface)
        return m_iface

    def check_mtu(mtu_size, qemu=False):
        """
        Check if mtu meets expectation on host
        """
        error = ''
        live_vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
        iface_xml = live_vmxml.get_devices('interface')[0]
        logging.debug(iface_xml.target)
        dev = iface_xml.target['dev']
        ifconfig_info = process.run('ifconfig|grep mtu|grep %s' % dev,
                                    shell=True, verbose=True).stdout_text
        if 'mtu %s' % mtu_size in ifconfig_info:
            logging.info('PASS on ifconfig check for vnet.')
        else:
            error += 'Fail on ifconfig check for vnet.'
        if qemu:
            qemu_mtu_info = process.run('ps aux|grep qemu-kvm',
                                        shell=True, verbose=True).stdout_text
            if 'host_mtu=%s' % mtu_size in qemu_mtu_info:
                logging.info('PASS on qemu cmd line check.')
            else:
                error += 'Fail on qemu cmd line check.'
        if error:
            test.fail(error)

    def check_mtu_in_vm(fn_login, mtu_size):
        """
        Check if mtu meets expectations in vm
        """
        session = fn_login()
        check_cmd = 'ifconfig'
        output = session.cmd(check_cmd)
        session.close()
        logging.debug(output)
        if 'mtu %s' % mtu_size not in output:
            test.fail('MTU check inside vm failed.')
        else:
            logging.debug("MTU check inside vm passed.")

    try:
        bk_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
        bk_netxml = NetworkXML.new_from_net_dumpxml(DEFAULT_NET)
        if add_pkg:
            add_pkg = add_pkg.split()
            utils_package.package_install(add_pkg)
        if 'openvswitch' in add_pkg:
            br = 'ovsbr0' + utils_misc.generate_random_string(3)
            process.run('systemctl start openvswitch.service', shell=True, verbose=True)
            process.run('ovs-vsctl add-br %s' % br, shell=True, verbose=True)
            process.run('ovs-vsctl show', shell=True, verbose=True)

        if not check or check in ['save', 'managedsave', 'hotplug_save']:
            # Create bridge or network and set mtu
            iface_type = 'network'
            if net_type in ('bridge', 'openvswitch'):
                if net_type == 'bridge':
                    params['con_name'], br = create_bridge()
                if mtu_type == 'network':
                    test_net = create_network_xml(
                        bridge_name, net_type,
                        bridge_name=br
                    )
                    virsh.net_create(test_net, debug=True)
                    virsh.net_dumpxml(bridge_name, debug=True)
                if mtu_type == 'interface':
                    iface_type = net_type
                    bridge_name = br
            elif net_type == 'network':
                if mtu_type == 'network':
                    set_network(mtu_size)

            iface_mtu = 0
            if mtu_type == 'interface':
                iface_mtu = mtu_size
            if mtu_type == 'network' and with_iface:
                mtu_size = str(int(mtu_size)//2)
                iface_mtu = mtu_size

            source_net = bridge_name if net_type in ('bridge', 'openvswitch') else 'default'

            # set mtu in vm interface
            set_interface(iface_mtu, source_network=source_net, iface_type=iface_type, iface_model=model)
            vm.start()
            vm_login = vm.wait_for_serial_login if net_type in ('bridge', 'openvswitch') else vm.wait_for_login
            vm_login().close()
            check_qemu = True if mtu_type == 'interface' else False

            # Test mtu after save vm
            if check in ('save', 'hotplug_save'):
                if check == 'hotplug_save':
                    iface = create_iface('network', source_net='default',
                                         mtu=mtu_size, model_net=model)
                    params['mac'] = iface.mac_address
                    virsh.attach_device(vm_name, iface.xml, debug=True)
                    virsh.dumpxml(vm_name, debug=True)
                    dom_xml = vm_xml.VMXML.new_from_dumpxml(vm_name)
                    if params['mac'] not in str(dom_xml):
                        test.fail('Failed to attach interface with mtu')
                save_path = os.path.join(data_dir.get_tmp_dir(), vm_name + '.save')
                virsh.save(vm_name, save_path, debug=True)
                virsh.restore(save_path, debug=True)
            if check == 'managedsave':
                virsh.managedsave(vm_name, debug=True)
                virsh.start(vm_name, debug=True)

            # Check in both host and vm
            check_mtu(mtu_size, check_qemu)
            check_mtu_in_vm(vm_login, mtu_size)
            vm_login(timeout=60).close()

            if check == 'hotplug_save':
                virsh.detach_interface(vm_name, 'network %s' % params['mac'], debug=True)
                time.sleep(5)
                dom_xml = vm_xml.VMXML.new_from_dumpxml(vm_name)
                if params['mac'] in str(dom_xml):
                    test.fail('Failed to detach interface with mtu after save-restore')

        else:
            hotplug = 'yes' == params.get('hotplug', 'False')
            if check == 'net_update':
                result = virsh.net_update(
                    DEFAULT_NET, 'modify', 'mtu',
                    '''"<mtu size='%s'/>"''' % mtu_size,
                    debug=True
                )
            if check in ('macvtap', 'bridge_net', 'ovswitch_net'):
                base_if = get_default_if()
                macv_name = 'direct-macvtap' + utils_misc.generate_random_string(3)

                # Test mtu in different type of network
                if mtu_type == 'network':
                    if check == 'macvtap':
                        test_net = create_network_xml(macv_name, 'macvtap',
                                                      base_if, mtu=mtu_size)
                    if check == 'bridge_net':
                        params['con_name'], br = create_bridge()
                        test_net = create_network_xml(
                            bridge_name, 'bridge', mtu=mtu_size,
                            bridge_name=br
                        )
                    if check == 'ovswitch_net':
                        test_net = create_network_xml(
                            bridge_name, 'openvswitch', mtu=mtu_size,
                            bridge_name=br
                        )
                    if 'net_create' in params['id']:
                        result = virsh.net_create(test_net, debug=True)
                    if 'net_define' in params['id']:
                        result = virsh.net_define(test_net, debug=True)

                # Test mtu with or without a binding network
                elif mtu_type == 'interface':
                    vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
                    if with_net:
                        test_net = create_network_xml(macv_name, 'macvtap', base_if)
                        virsh.net_create(test_net, debug=True)
                        iface = create_iface('network', source_net=macv_name, mtu=mtu_size)
                        if hotplug:
                            result = virsh.attach_device(vm_name, iface.xml, debug=True)
                        else:
                            vmxml.add_device(iface)
                            vmxml.sync()
                            result = virsh.start(vm_name)
                    else:
                        iface = create_iface('direct', base_if=base_if, mtu=mtu_size)
                        if hotplug:
                            result = virsh.attach_device(vm_name, iface.xml, debug=True)
                        else:
                            vmxml.add_device(iface)
                            result = virsh.define(vmxml.xml, debug=True)
            if check == 'invalid_val':
                iface = create_iface('network', source_net='default', mtu=mtu_size)
                result = virsh.attach_device(vm_name, iface.xml, debug=True)

            # Check result
            libvirt.check_exit_status(result, status_error)
            libvirt.check_result(result, [error_msg])

    finally:
        bk_xml.sync()
        bk_netxml.sync()
        if 'test_net' in locals():
            virsh.net_destroy(bridge_name, debug=True)
        if params.get('con_name'):
            process.run('nmcli con del %s' % params['con_name'], verbose=True)
        if add_pkg:
            process.run("ovs-vsctl del-br %s" % br, verbose=True)
            utils_package.package_remove(add_pkg)
Пример #12
0
    if list_ref and not re.search(list_ref, help_info):
        raise exceptions.TestSkipError("This version do not support list"
                                       " type:%s" % list_ref)

    # If a transient domain is destroyed, it will disappear.
    if vm_ref == "transient" and options_ref == "inactive":
        logging.info("Set addition_status_error to yes")
        logging.info(
            "because transient domain will disappear after destroyed.")
        addition_status_error = "yes"

    if vm_ref == "transient":
        vm_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
        vm.undefine()
    elif vm_ref == "managed-save":
        virsh.managedsave(vm_name, ignore_status=True, print_info=True)

    try:
        # run test case
        if list_ref == "--uuid":
            result_expected = domuuid
            logging.info("%s's uuid is: %s", vm_name, domuuid)
        elif list_ref == "--title":
            vm_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
            if options_ref == "inactive":
                virsh.desc(vm_name, "--config --title", desc)
            else:
                virsh.desc(vm_name, "--live --title", desc)
            result_expected = desc
            logging.info("%s's title is: %s", vm_name, desc)
        else:
Пример #13
0
def run(test, params, env):
    """
    Test rbd disk device.

    1.Prepare test environment,destroy or suspend a VM.
    2.Prepare disk image.
    3.Edit disks xml and start the domain.
    4.Perform test operation.
    5.Recover test environment.
    """
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    virsh_dargs = {'debug': True, 'ignore_status': True}
    # Global variable to store max/current memory,
    # it may change after attach/detach
    new_max_mem = None
    new_cur_mem = None

    def consume_vm_mem(size=1000, timeout=360):
        """
        To consume guest memory, default size is 1000M
        """
        session = vm.wait_for_login()
        # Mount tmpfs on /mnt and write to a file on it,
        # it is the memory operation
        sh_cmd = ("swapoff -a; mount -t tmpfs -o size={0}M tmpfs "
                  "/mnt; dd if=/dev/urandom of=/mnt/test bs=1M"
                  " count={0}".format(size))
        session.cmd(sh_cmd, timeout=timeout)
        session.close()

    def mount_hugepages(page_size):
        """
        To mount hugepages

        :param page_size: unit is kB, it can be 4,2048,1048576,etc
        """
        if page_size == 4:
            perm = ""
        else:
            perm = "pagesize=%dK" % page_size

        tlbfs_status = utils_misc.is_mounted("hugetlbfs", "/dev/hugepages",
                                             "hugetlbfs")
        if tlbfs_status:
            utils_misc.umount("hugetlbfs", "/dev/hugepages", "hugetlbfs")
        utils_misc.mount("hugetlbfs", "/dev/hugepages", "hugetlbfs", perm)

    def setup_hugepages(page_size=2048, shp_num=2000):
        """
        To setup hugepages

        :param page_size: unit is kB, it can be 4,2048,1048576,etc
        :param shp_num: number of hugepage, string type
        """
        mount_hugepages(page_size)
        utils_memory.set_num_huge_pages(shp_num)
        config.hugetlbfs_mount = ["/dev/hugepages"]
        utils_libvirtd.libvirtd_restart()

    def restore_hugepages(page_size=4):
        """
        To recover hugepages
        :param page_size: unit is kB, it can be 4,2048,1048576,etc
        """
        mount_hugepages(page_size)
        config.restore()
        utils_libvirtd.libvirtd_restart()

    def check_qemu_cmd(max_mem_rt, tg_size):
        """
        Check qemu command line options.
        :param max_mem_rt: size of max memory
        :param tg_size: Target hotplug memory size
        :return: None
        """
        cmd = ("ps -ef | grep %s | grep -v grep " % vm_name)
        if discard:
            cmd += " | grep 'discard-data=yes'"
        elif max_mem_rt:
            cmd += (" | grep 'slots=%s,maxmem=%sk'" %
                    (max_mem_slots, max_mem_rt))
            if tg_size:
                size = int(tg_size) * 1024
                if huge_pages or discard or cold_plug_discard:
                    cmd_str = 'memdimm.\|memory-backend-file,id=ram-node.'
                    cmd += (
                        " | grep 'memory-backend-file,id=%s' | grep 'size=%s" %
                        (cmd_str, size))
                else:
                    cmd_str = 'mem.\|memory-backend-ram,id=ram-node.'
                    cmd += (
                        " | grep 'memory-backend-ram,id=%s' | grep 'size=%s" %
                        (cmd_str, size))

                if pg_size:
                    cmd += ",host-nodes=%s" % node_mask
                    if numa_memnode:
                        for node in numa_memnode:
                            if ('nodeset' in node
                                    and node['nodeset'] in node_mask):
                                cmd += ",policy=%s" % node['mode']
                    cmd += ".*pc-dimm,node=%s" % tg_node
                if mem_addr:
                    cmd += (".*slot=%s" % (mem_addr['slot']))
                cmd += "'"
            if cold_plug_discard:
                cmd += " | grep 'discard-data=yes'"

        # Run the command
        result = process.run(cmd, shell=True, verbose=True, ignore_status=True)
        if result.exit_status:
            test.fail('Qemu command check fail.')

    def check_guest_meminfo(old_mem, check_option):
        """
        Check meminfo on guest.
        """
        assert old_mem is not None
        session = vm.wait_for_login()
        # Hot-plugged memory should be online by udev rules
        udev_file = "/lib/udev/rules.d/80-hotplug-cpu-mem.rules"
        udev_rules = ('SUBSYSTEM=="memory", ACTION=="add", TEST=="state",'
                      ' ATTR{state}=="offline", ATTR{state}="online"')
        cmd = ("grep memory %s || echo '%s' >> %s" %
               (udev_file, udev_rules, udev_file))
        session.cmd(cmd)
        # Wait a while for new memory to be detected.
        utils_misc.wait_for(
            lambda: vm.get_totalmem_sys(online) != int(old_mem),
            30,
            first=20.0)
        new_mem = vm.get_totalmem_sys(online)
        session.close()
        logging.debug("Memtotal on guest: %s", new_mem)
        no_of_times = 1
        if at_times:
            no_of_times = at_times
        if check_option == "attach":
            if new_mem != int(old_mem) + (int(tg_size) * no_of_times):
                test.fail("Total memory on guest couldn't changed after "
                          "attach memory device")

        if check_option == "detach":
            if new_mem != int(old_mem) - (int(tg_size) * no_of_times):
                test.fail("Total memory on guest couldn't changed after "
                          "detach memory device")

    def check_dom_xml(at_mem=False, dt_mem=False):
        """
        Check domain xml options.
        """
        # Global variable to store max/current memory
        global new_max_mem
        global new_cur_mem
        if attach_option.count("config"):
            dom_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
        else:
            dom_xml = vm_xml.VMXML.new_from_dumpxml(vm_name)
        try:
            xml_max_mem_rt = int(dom_xml.max_mem_rt)
            xml_max_mem = int(dom_xml.max_mem)
            xml_cur_mem = int(dom_xml.current_mem)
            assert int(max_mem_rt) == xml_max_mem_rt

            # Check attached/detached memory
            logging.info("at_mem=%s,dt_mem=%s", at_mem, dt_mem)
            logging.info("detach_device is %s", detach_device)
            if at_mem:
                if at_times:
                    assert int(max_mem) + (int(tg_size) *
                                           at_times) == xml_max_mem
                else:
                    assert int(max_mem) + int(tg_size) == xml_max_mem
                # Bug 1220702, skip the check for current memory
                if at_times:
                    assert int(cur_mem) + (int(tg_size) *
                                           at_times) == xml_cur_mem
                else:
                    assert int(cur_mem) + int(tg_size) == xml_cur_mem
                new_max_mem = xml_max_mem
                new_cur_mem = xml_cur_mem
                mem_dev = dom_xml.get_devices("memory")
                memory_devices = 1
                if at_times:
                    memory_devices = at_times
                if len(mem_dev) != memory_devices:
                    test.fail("Found wrong number of memory device")
                assert int(tg_size) == int(mem_dev[0].target.size)
                assert int(tg_node) == int(mem_dev[0].target.node)
            elif dt_mem:
                if at_times:
                    assert int(new_max_mem) - (int(tg_size) *
                                               at_times) == xml_max_mem
                    assert int(new_cur_mem) - (int(tg_size) *
                                               at_times) == xml_cur_mem
                else:
                    assert int(new_max_mem) - int(tg_size) == xml_max_mem
                    # Bug 1220702, skip the check for current memory
                    assert int(new_cur_mem) - int(tg_size) == xml_cur_mem
        except AssertionError:
            utils_misc.log_last_traceback()
            test.fail("Found unmatched memory setting from domain xml")

    def check_mem_align():
        """
        Check if set memory align to 256
        """
        dom_xml = vm_xml.VMXML.new_from_dumpxml(vm_name)
        dom_mem = {}
        dom_mem['maxMemory'] = int(dom_xml.max_mem_rt)
        dom_mem['memory'] = int(dom_xml.memory)
        dom_mem['currentMemory'] = int(dom_xml.current_mem)

        cpuxml = dom_xml.cpu
        numa_cell = cpuxml.numa_cell
        dom_mem['numacellMemory'] = int(numa_cell[0]['memory'])
        sum_numa_mem = sum([int(cell['memory']) for cell in numa_cell])

        attached_mem = dom_xml.get_devices(device_type='memory')[0]
        dom_mem['attached_mem'] = attached_mem.target.size

        all_align = True
        for key in dom_mem:
            logging.info('%-20s:%15d', key, dom_mem[key])
            if dom_mem[key] % 262144:
                logging.error('%s not align to 256', key)
                if key == 'currentMemory':
                    continue
                all_align = False

        if not all_align:
            test.fail('Memory not align to 256')

        if dom_mem['memory'] == sum_numa_mem + dom_mem['attached_mem']:
            logging.info(
                'Check Pass: Memory is equal to (all numa memory + memory device)'
            )
        else:
            test.fail(
                'Memory is not equal to (all numa memory + memory device)')

        return dom_mem

    def check_save_restore():
        """
        Test save and restore operation
        """
        save_file = os.path.join(data_dir.get_tmp_dir(), "%s.save" % vm_name)
        ret = virsh.save(vm_name, save_file, **virsh_dargs)
        libvirt.check_exit_status(ret)

        def _wait_for_restore():
            try:
                virsh.restore(save_file, debug=True, ignore_status=False)
                return True
            except Exception as e:
                logging.error(e)

        utils_misc.wait_for(_wait_for_restore, 30, step=5)
        if os.path.exists(save_file):
            os.remove(save_file)
        # Login to check vm status
        vm.wait_for_login().close()

    def add_device(dev_xml, attach, at_error=False):
        """
        Add memory device by attachment or modify domain xml.
        """
        if attach:
            ret = virsh.attach_device(vm_name,
                                      dev_xml.xml,
                                      flagstr=attach_option,
                                      debug=True)
            libvirt.check_exit_status(ret, at_error)
        else:
            vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm.name)
            if numa_cells:
                del vmxml.max_mem
                del vmxml.current_mem
            vmxml.add_device(dev_xml)
            vmxml.sync()

    def modify_domain_xml():
        """
        Modify domain xml and define it.
        """
        vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm.name)
        mem_unit = params.get("mem_unit", "KiB")
        vcpu = params.get("vcpu", "4")
        if max_mem_rt:
            vmxml.max_mem_rt = int(max_mem_rt)
            vmxml.max_mem_rt_slots = max_mem_slots
            vmxml.max_mem_rt_unit = mem_unit
        if memory_val:
            vmxml.memory = int(memory_val)
        if vcpu:
            vmxml.vcpu = int(vcpu)
            vcpu_placement = params.get("vcpu_placement", "static")
            vmxml.placement = vcpu_placement
        if numa_memnode:
            vmxml.numa_memory = {}
            vmxml.numa_memnode = numa_memnode
        else:
            try:
                del vmxml.numa_memory
                del vmxml.numa_memnode
            except Exception:
                # Not exists
                pass

        if numa_cells:
            cells = [ast.literal_eval(x) for x in numa_cells]
            # Rounding the numa memory values
            if align_mem_values:
                for cell in range(cells.__len__()):
                    memory_value = str(
                        utils_numeric.align_value(cells[cell]["memory"],
                                                  align_to_value))
                    cells[cell]["memory"] = memory_value
            cpu_xml = vm_xml.VMCPUXML()
            cpu_xml.xml = "<cpu><numa/></cpu>"
            cpu_mode = params.get("cpu_mode")
            model_fallback = params.get("model_fallback")
            if cpu_mode:
                cpu_xml.mode = cpu_mode
            if model_fallback:
                cpu_xml.fallback = model_fallback
            cpu_xml.numa_cell = cells
            vmxml.cpu = cpu_xml
            # Delete memory and currentMemory tag,
            # libvirt will fill it automatically
            del vmxml.max_mem
            del vmxml.current_mem

        # hugepages setting
        if huge_pages or discard or cold_plug_discard:
            membacking = vm_xml.VMMemBackingXML()
            membacking.discard = True
            membacking.source = ''
            membacking.source_type = 'file'
            if huge_pages:
                hugepages = vm_xml.VMHugepagesXML()
                pagexml_list = []
                for i in range(len(huge_pages)):
                    pagexml = hugepages.PageXML()
                    pagexml.update(huge_pages[i])
                    pagexml_list.append(pagexml)
                hugepages.pages = pagexml_list
                membacking.hugepages = hugepages
            vmxml.mb = membacking

        logging.debug("vm xml: %s", vmxml)
        vmxml.sync()

    pre_vm_state = params.get("pre_vm_state", "running")
    attach_device = "yes" == params.get("attach_device", "no")
    detach_device = "yes" == params.get("detach_device", "no")
    detach_alias = "yes" == params.get("detach_alias", "no")
    detach_alias_options = params.get("detach_alias_options")
    attach_error = "yes" == params.get("attach_error", "no")
    start_error = "yes" == params.get("start_error", "no")
    detach_error = "yes" == params.get("detach_error", "no")
    maxmem_error = "yes" == params.get("maxmem_error", "no")
    attach_option = params.get("attach_option", "")
    test_qemu_cmd = "yes" == params.get("test_qemu_cmd", "no")
    wait_before_save_secs = int(params.get("wait_before_save_secs", 0))
    test_managedsave = "yes" == params.get("test_managedsave", "no")
    test_save_restore = "yes" == params.get("test_save_restore", "no")
    test_mem_binding = "yes" == params.get("test_mem_binding", "no")
    restart_libvirtd = "yes" == params.get("restart_libvirtd", "no")
    add_mem_device = "yes" == params.get("add_mem_device", "no")
    test_dom_xml = "yes" == params.get("test_dom_xml", "no")
    max_mem = params.get("max_mem")
    max_mem_rt = params.get("max_mem_rt")
    max_mem_slots = params.get("max_mem_slots", "16")
    memory_val = params.get('memory_val', '')
    mem_align = 'yes' == params.get('mem_align', 'no')
    hot_plug = 'yes' == params.get('hot_plug', 'no')
    cur_mem = params.get("current_mem")
    numa_cells = params.get("numa_cells", "").split()
    set_max_mem = params.get("set_max_mem")
    align_mem_values = "yes" == params.get("align_mem_values", "no")
    align_to_value = int(params.get("align_to_value", "65536"))
    hot_reboot = "yes" == params.get("hot_reboot", "no")
    rand_reboot = "yes" == params.get("rand_reboot", "no")
    guest_known_unplug_errors = []
    guest_known_unplug_errors.append(params.get("guest_known_unplug_errors"))
    host_known_unplug_errors = []
    host_known_unplug_errors.append(params.get("host_known_unplug_errors"))
    discard = "yes" == params.get("discard", "no")
    cold_plug_discard = "yes" == params.get("cold_plug_discard", "no")
    if cold_plug_discard or discard:
        mem_discard = 'yes'
    else:
        mem_discard = 'no'

    # params for attached device
    mem_model = params.get("mem_model", "dimm")
    tg_size = params.get("tg_size")
    tg_sizeunit = params.get("tg_sizeunit", 'KiB')
    tg_node = params.get("tg_node", 0)
    pg_size = params.get("page_size")
    pg_unit = params.get("page_unit", "KiB")
    huge_page_num = int(params.get('huge_page_num', 2000))
    node_mask = params.get("node_mask", "0")
    mem_addr = ast.literal_eval(params.get("memory_addr", "{}"))
    huge_pages = [
        ast.literal_eval(x) for x in params.get("huge_pages", "").split()
    ]
    numa_memnode = [
        ast.literal_eval(x) for x in params.get("numa_memnode", "").split()
    ]
    at_times = int(params.get("attach_times", 1))
    online = params.get("mem_online", "no")

    config = utils_config.LibvirtQemuConfig()
    setup_hugepages_flag = params.get("setup_hugepages")
    if (setup_hugepages_flag == "yes"):
        cpu_arch = cpu_util.get_cpu_arch()
        if cpu_arch == 'power8':
            pg_size = '16384'
            huge_page_num = 200
        elif cpu_arch == 'power9':
            pg_size = '2048'
            huge_page_num = 2000
        [x.update({'size': pg_size}) for x in huge_pages]
        setup_hugepages(int(pg_size), shp_num=huge_page_num)

    # Back up xml file.
    vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)

    if not libvirt_version.version_compare(1, 2, 14):
        test.cancel("Memory hotplug not supported in current libvirt version.")

    if 'align_256m' in params.get('name', ''):
        arch = platform.machine()
        if arch.lower() != 'ppc64le':
            test.cancel('This case is for ppc64le only.')

    if align_mem_values:
        # Rounding the following values to 'align'
        max_mem = utils_numeric.align_value(max_mem, align_to_value)
        max_mem_rt = utils_numeric.align_value(max_mem_rt, align_to_value)
        cur_mem = utils_numeric.align_value(cur_mem, align_to_value)
        tg_size = utils_numeric.align_value(tg_size, align_to_value)

    try:
        # Drop caches first for host has enough memory
        drop_caches()
        # Destroy domain first
        if vm.is_alive():
            vm.destroy(gracefully=False)
        modify_domain_xml()
        numa_info = utils_misc.NumaInfo()
        logging.debug(numa_info.get_all_node_meminfo())

        # Start the domain any way if attach memory device
        old_mem_total = None
        if attach_device:
            vm.start()
            session = vm.wait_for_login()
            old_mem_total = vm.get_totalmem_sys(online)
            logging.debug("Memtotal on guest: %s", old_mem_total)
            session.close()
        elif discard:
            vm.start()
            session = vm.wait_for_login()
            check_qemu_cmd(max_mem_rt, tg_size)
        dev_xml = None

        # To attach the memory device.
        if (add_mem_device and not hot_plug) or cold_plug_discard:
            at_times = int(params.get("attach_times", 1))
            randvar = 0
            if rand_reboot:
                rand_value = random.randint(15, 25)
                logging.debug("reboots at %s", rand_value)
            for x in xrange(at_times):
                # If any error excepted, command error status should be
                # checked in the last time
                device_alias = "ua-" + str(uuid.uuid4())
                dev_xml = utils_hotplug.create_mem_xml(
                    tg_size, pg_size, mem_addr, tg_sizeunit, pg_unit, tg_node,
                    node_mask, mem_model, mem_discard, device_alias)
                randvar = randvar + 1
                logging.debug("attaching device count = %s", x)
                if x == at_times - 1:
                    add_device(dev_xml, attach_device, attach_error)
                else:
                    add_device(dev_xml, attach_device)
                if hot_reboot:
                    vm.reboot()
                    vm.wait_for_login()
                if rand_reboot and randvar == rand_value:
                    vm.reboot()
                    vm.wait_for_login()
                    randvar = 0
                    rand_value = random.randint(15, 25)
                    logging.debug("reboots at %s", rand_value)

        # Check domain xml after attach device.
        if test_dom_xml:
            check_dom_xml(at_mem=attach_device)

        # Set domain state
        if pre_vm_state == "transient":
            logging.info("Creating %s...", vm_name)
            vmxml_for_test = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
            if vm.is_alive():
                vm.destroy(gracefully=False)
            vm.undefine()
            if virsh.create(vmxml_for_test.xml, **virsh_dargs).exit_status:
                vmxml_backup.define()
                test.fail("Cann't create the domain")
        elif vm.is_dead():
            try:
                vm.start()
                vm.wait_for_login().close()
            except virt_vm.VMStartError as detail:
                if start_error:
                    pass
                else:
                    except_msg = "memory hotplug isn't supported by this QEMU binary"
                    if except_msg in detail.reason:
                        test.cancel(detail)
                    test.fail(detail)

        # Set memory operation
        if set_max_mem:
            max_mem_option = params.get("max_mem_option", "")
            ret = virsh.setmaxmem(vm_name, set_max_mem, flagstr=max_mem_option)
            libvirt.check_exit_status(ret, maxmem_error)

        # Hotplug memory device
        if add_mem_device and hot_plug:
            process.run('ps -ef|grep qemu', shell=True, verbose=True)
            session = vm.wait_for_login()
            original_mem = vm.get_totalmem_sys()
            dev_xml = utils_hotplug.create_mem_xml(tg_size, pg_size, mem_addr,
                                                   tg_sizeunit, pg_unit,
                                                   tg_node, node_mask,
                                                   mem_model)
            add_device(dev_xml, True)
            mem_after = vm.get_totalmem_sys()
            params['delta'] = mem_after - original_mem

        # Check domain xml after start the domain.
        if test_dom_xml:
            check_dom_xml(at_mem=attach_device)

        if mem_align:
            dom_mem = check_mem_align()
            check_qemu_cmd(dom_mem['maxMemory'], dom_mem['attached_mem'])
            if hot_plug and params['delta'] != dom_mem['attached_mem']:
                test.fail(
                    'Memory after attach not equal to original mem + attached mem'
                )

        # Check qemu command line
        if test_qemu_cmd:
            check_qemu_cmd(max_mem_rt, tg_size)

        # Check guest meminfo after attachment
        if (attach_device and not attach_option.count("config")
                and not any([attach_error, start_error])):
            check_guest_meminfo(old_mem_total, check_option="attach")

        # Consuming memory on guest,
        # to verify memory changes by numastat
        if test_mem_binding:
            pid = vm.get_pid()
            old_numastat = read_from_numastat(pid, "Total")
            logging.debug("Numastat: %s", old_numastat)
            # Increase the memory consumed to  1500
            consume_vm_mem(1500)
            new_numastat = read_from_numastat(pid, "Total")
            logging.debug("Numastat: %s", new_numastat)
            # Only check total memory which is the last element
            if float(new_numastat[-1]) - float(old_numastat[-1]) < 0:
                test.fail("Numa memory can't be consumed on guest")

        # Run managedsave command to check domain xml.
        if test_managedsave:
            # Wait 10s for vm to be ready before managedsave
            time.sleep(wait_before_save_secs)
            ret = virsh.managedsave(vm_name, **virsh_dargs)
            libvirt.check_exit_status(ret)

            def _wait_for_vm_start():
                try:
                    vm.start()
                    return True
                except Exception as e:
                    logging.error(e)

            utils_misc.wait_for(_wait_for_vm_start, timeout=30, step=5)
            vm.wait_for_login().close()
            if test_dom_xml:
                check_dom_xml(at_mem=attach_device)

        # Run save and restore command to check domain xml
        if test_save_restore:
            # Wait 10s for vm to be ready before save
            time.sleep(wait_before_save_secs)
            check_save_restore()
            if test_dom_xml:
                check_dom_xml(at_mem=attach_device)

        # Check domain xml after restarting libvirtd
        if restart_libvirtd:
            libvirtd = utils_libvirtd.Libvirtd()
            libvirtd.restart()
            if test_dom_xml:
                check_dom_xml(at_mem=attach_device)

        # Detach the memory device
        unplug_failed_with_known_error = False
        if detach_device:
            dev_xml = utils_hotplug.create_mem_xml(tg_size, pg_size, mem_addr,
                                                   tg_sizeunit, pg_unit,
                                                   tg_node, node_mask,
                                                   mem_model, mem_discard)
            for x in xrange(at_times):
                if not detach_alias:
                    ret = virsh.detach_device(vm_name,
                                              dev_xml.xml,
                                              flagstr=attach_option,
                                              debug=True)
                else:
                    ret = virsh.detach_device_alias(vm_name,
                                                    device_alias,
                                                    detach_alias_options,
                                                    debug=True)
                if ret.stderr and host_known_unplug_errors:
                    for known_error in host_known_unplug_errors:
                        if (known_error[0] == known_error[-1]) and \
                           known_error.startswith(("'")):
                            known_error = known_error[1:-1]
                        if known_error in ret.stderr:
                            unplug_failed_with_known_error = True
                            logging.debug(
                                "Known error occured in Host, while"
                                " hot unplug: %s", known_error)
                if unplug_failed_with_known_error:
                    break
                try:
                    libvirt.check_exit_status(ret, detach_error)
                except Exception as detail:
                    dmesg_file = tempfile.mktemp(dir=data_dir.get_tmp_dir())
                    try:
                        session = vm.wait_for_login()
                        utils_misc.verify_dmesg(dmesg_log_file=dmesg_file,
                                                ignore_result=True,
                                                session=session,
                                                level_check=5)
                    except Exception:
                        session.close()
                        test.fail("After memory unplug Unable to connect to VM"
                                  " or unable to collect dmesg")
                    session.close()
                    if os.path.exists(dmesg_file):
                        with open(dmesg_file, 'r') as f:
                            flag = re.findall(
                                r'memory memory\d+?: Offline failed', f.read())
                        if not flag:
                            # The attached memory is used by vm, and it could
                            #  not be unplugged.The result is expected
                            os.remove(dmesg_file)
                            test.fail(detail)
                        unplug_failed_with_known_error = True
                        os.remove(dmesg_file)
            # Check whether a known error occured or not
            dmesg_file = tempfile.mktemp(dir=data_dir.get_tmp_dir())
            try:
                session = vm.wait_for_login()
                utils_misc.verify_dmesg(dmesg_log_file=dmesg_file,
                                        ignore_result=True,
                                        session=session,
                                        level_check=4)
            except Exception:
                session.close()
                test.fail("After memory unplug Unable to connect to VM"
                          " or unable to collect dmesg")
            session.close()
            if guest_known_unplug_errors and os.path.exists(dmesg_file):
                for known_error in guest_known_unplug_errors:
                    if (known_error[0] == known_error[-1]) and \
                       known_error.startswith(("'")):
                        known_error = known_error[1:-1]
                    with open(dmesg_file, 'r') as f:
                        if known_error in f.read():
                            unplug_failed_with_known_error = True
                            logging.debug(
                                "Known error occured, while hot"
                                " unplug: %s", known_error)
            if test_dom_xml and not unplug_failed_with_known_error:
                check_dom_xml(dt_mem=detach_device)
                # Remove dmesg temp file
                if os.path.exists(dmesg_file):
                    os.remove(dmesg_file)

    finally:
        # Delete snapshots.
        snapshot_lists = virsh.snapshot_list(vm_name)
        if len(snapshot_lists) > 0:
            libvirt.clean_up_snapshots(vm_name, snapshot_lists)
            for snap in snapshot_lists:
                virsh.snapshot_delete(vm_name, snap, "--metadata")

        # Recover VM.
        if vm.is_alive():
            vm.destroy(gracefully=False)
        logging.info("Restoring vm...")
        if (setup_hugepages_flag == "yes"):
            restore_hugepages()
        vmxml_backup.sync()
Пример #14
0
    if list_ref and not re.search(list_ref, help_info):
        raise exceptions.TestSkipError("This version do not support list"
                                       " type:%s" % list_ref)

    # If a transient domain is destroyed, it will disappear.
    if vm_ref == "transient" and options_ref == "inactive":
        logging.info("Set addition_status_error to yes")
        logging.info(
            "because transient domain will disappear after destroyed.")
        addition_status_error = "yes"

    if vm_ref == "transient":
        vm_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
        vm.undefine()
    elif vm_ref == "managed-save":
        virsh.managedsave(vm_name, ignore_status=True, print_info=True)

    try:
        # run test case
        if list_ref == "--uuid":
            result_expected = domuuid
            logging.info("%s's uuid is: %s", vm_name, domuuid)
        elif list_ref == "--title":
            vm_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
            if options_ref == "inactive":
                virsh.desc(vm_name, "--config --title", desc)
            else:
                virsh.desc(vm_name, "--live --title", desc)
            result_expected = desc
            logging.info("%s's title is: %s", vm_name, desc)
        else:
Пример #15
0
def run(test, params, env):
    """
    Test command: virsh dompmsuspend <domain> <target>
    The command suspends a running domain using guest OS's power management.
    """
    def check_vm_guestagent(session):
        # Check if qemu-ga already started automatically
        cmd = "rpm -q qemu-guest-agent || yum install -y qemu-guest-agent"
        stat_install, output = session.cmd_status_output(cmd, 300)
        logging.debug(output)
        if stat_install != 0:
            raise error.TestError("Fail to install qemu-guest-agent, make"
                                  "sure that you have usable repo in guest")

        # Check if qemu-ga already started
        stat_ps = session.cmd_status("ps aux |grep [q]emu-ga | grep -v grep")
        if stat_ps != 0:
            session.cmd("service qemu-ga start")
            # Check if the qemu-ga really started
            stat_ps = session.cmd_status(
                "ps aux |grep [q]emu-ga | grep -v grep")
            if stat_ps != 0:
                raise error.TestError("Fail to run qemu-ga in guest")

    # MAIN TEST CODE ###
    # Process cartesian parameters
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)

    vm_state = params.get("vm_state", "running")
    suspend_target = params.get("pm_suspend_target", "mem")
    pm_enabled = params.get("pm_enabled", "not_set")
    test_managedsave = "yes" == params.get("test_managedsave", "no")
    test_save_restore = "yes" == params.get("test_save_restore", "no")

    # A backup of original vm
    vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    vmxml_backup = vmxml.copy()

    # Expected possible fail patterns.
    # Error output should match one of these patterns.
    # An empty list mean test should succeed.
    fail_pat = []
    virsh_dargs = {'debug': True, 'ignore_status': True}

    # Setup possible failure patterns
    if pm_enabled == 'not_set':
        fail_pat.append('not supported')
    if pm_enabled == 'no':
        fail_pat.append('disabled')

    if vm_state == 'paused':
        fail_pat.append('not responding')
    elif vm_state == 'shutoff':
        fail_pat.append('not running')

    try:
        if vm.is_alive():
            vm.destroy()

        # Set pm tag in domain's XML if needed.
        if pm_enabled == 'not_set':
            try:
                if vmxml.pm:
                    del vmxml.pm
            except xcepts.LibvirtXMLNotFoundError:
                pass
        else:
            pm_xml = vm_xml.VMPMXML()
            if suspend_target == 'mem':
                pm_xml.mem_enabled = pm_enabled
            elif suspend_target == 'disk':
                pm_xml.disk_enabled = pm_enabled
            elif suspend_target == 'hybrid':
                if 'hybrid_enabled' in dir(pm_xml):
                    pm_xml.hybrid_enabled = pm_enabled
                else:
                    raise error.TestNAError("PM suspend type 'hybrid' is not "
                                            "supported yet.")
            vmxml.pm = pm_xml
        vmxml.sync()

        vm_xml.VMXML.set_agent_channel(vm_name)
        vm.start()

        # Create swap partition/file if nessesary.
        need_mkswap = False
        if suspend_target in ['disk', 'hybrid']:
            need_mkswap = not vm.has_swap()
        if need_mkswap:
            logging.debug("Creating swap partition.")
            vm.create_swap_partition()

        try:
            libvirtd = utils_libvirtd.Libvirtd()
            savefile = os.path.join(test.tmpdir, "%s.save" % vm_name)
            session = vm.wait_for_login()
            check_vm_guestagent(session)
            # Touch a file on guest to test managed save command.
            if test_managedsave:
                session.cmd_status("touch pmtest")

            # Set vm state
            if vm_state == "paused":
                vm.pause()
            elif vm_state == "shutoff":
                vm.destroy()

            # Run test case
            result = virsh.dompmsuspend(vm_name, suspend_target, debug=True)
            if result.exit_status == 0:
                if fail_pat:
                    raise error.TestFail(
                        "Expected failed with %s, but run succeed"
                        ":\n%s" % (fail_pat, result))
            else:
                if not fail_pat:
                    raise error.TestFail(
                        "Expected success, but run failed:\n%s" % result)
                #if not any_pattern_match(fail_pat, result.stderr):
                if not any(p in result.stderr for p in fail_pat):
                    raise error.TestFail("Expected failed with one of %s, but "
                                         "failed with:\n%s" %
                                         (fail_pat, result))
            if test_managedsave:
                ret = virsh.managedsave(vm_name, **virsh_dargs)
                libvirt.check_exit_status(ret)
                # Dompmwakeup should return false here
                ret = virsh.dompmwakeup(vm_name, **virsh_dargs)
                libvirt.check_exit_status(ret, True)
                ret = virsh.start(vm_name)
                libvirt.check_exit_status(ret)
                if not vm.is_paused():
                    raise error.TestFail(
                        "Vm status is not paused before pm wakeup")
                ret = virsh.dompmwakeup(vm_name, **virsh_dargs)
                libvirt.check_exit_status(ret)
                if not vm.is_paused():
                    raise error.TestFail(
                        "Vm status is not paused after pm wakeup")
                ret = virsh.resume(vm_name, **virsh_dargs)
                libvirt.check_exit_status(ret)
                sess = vm.wait_for_login()
                if sess.cmd_status("ls pmtest && rm -f pmtest"):
                    raise error.TestFail("Check managed save failed on guest")
                sess.close()
            if test_save_restore:
                # Run a series of operations to check libvirtd status.
                ret = virsh.dompmwakeup(vm_name, **virsh_dargs)
                libvirt.check_exit_status(ret)
                ret = virsh.save(vm_name, savefile, **virsh_dargs)
                libvirt.check_exit_status(ret)
                ret = virsh.restore(savefile, **virsh_dargs)
                libvirt.check_exit_status(ret)
                # run pmsuspend again
                ret = virsh.dompmsuspend(vm_name, suspend_target,
                                         **virsh_dargs)
                libvirt.check_exit_status(ret)
                # save and restore the guest again.
                ret = virsh.save(vm_name, savefile, **virsh_dargs)
                libvirt.check_exit_status(ret)
                ret = virsh.restore(savefile, **virsh_dargs)
                libvirt.check_exit_status(ret)
                ret = virsh.destroy(vm_name, **virsh_dargs)
                libvirt.check_exit_status(ret)
                if not libvirtd.is_running():
                    raise error.TestFail("libvirtd crashed")

        finally:
            libvirtd.restart()
            # Remove the tmp file
            if os.path.exists(savefile):
                os.remove(savefile)
            # Restore VM state
            if vm_state == "paused":
                vm.resume()

            if suspend_target in ['mem', 'hybrid']:
                if vm.state() == "pmsuspended":
                    virsh.dompmwakeup(vm_name)
            else:
                if vm.state() == "in shutdown":
                    vm.wait_for_shutdown()
                if vm.is_dead():
                    vm.start()

            # Cleanup
            session.close()

            if need_mkswap:
                vm.cleanup_swap()

    finally:
        # Destroy the vm.
        if vm.is_alive():
            vm.destroy()
        # Recover xml of vm.
        vmxml_backup.sync()
Пример #16
0
def run(test, params, env):
    """
    Test misc tests of virtual cpu features

    1) check dumpxml after snapshot-create/revert
    2) check vendor_id
    3) check maximum vcpus with topology settings

    :param test: test object
    :param params: Dictionary with the test parameters
    :param env: Dictionary with test environment.
    """
    def update_cpu_xml():
        """
        Update cpu xml for test
        """
        vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)

        # Create cpu xml for test
        if vmxml.xmltreefile.find('cpu'):
            cpu_xml = vmxml.cpu
        else:
            cpu_xml = vm_xml.VMCPUXML()

        if customize_cpu_features:
            for idx in range(len(cpu_xml.get_feature_list()) - 1, -1, -1):
                cpu_xml.remove_feature(idx)
            domcapa_xml = domcapability_xml.DomCapabilityXML()
            features = domcapa_xml.get_additional_feature_list(
                'host-model', ignore_features=None)
            for feature in features:
                for feature_name, feature_policy in feature.items():
                    # For host-passthrough mode, adding "invtsc" requires
                    # more settings, so it will be ignored.
                    if feature_name != "invtsc":
                        cpu_xml.add_feature(feature_name, feature_policy)

        if cpu_mode:
            cpu_xml.mode = cpu_mode
        if cpu_vendor_id:
            cpu_xml.vendor_id = cpu_vendor_id

        # Update vm's cpu
        vmxml.cpu = cpu_xml
        vmxml.sync()

        if vcpu_max:
            if with_topology:
                vm_xml.VMXML.set_vm_vcpus(vm_name,
                                          int(vcpu_max),
                                          cores=int(vcpu_max),
                                          sockets=1,
                                          threads=1,
                                          add_topology=with_topology,
                                          topology_correction=with_topology)
            else:
                vm_xml.VMXML.set_vm_vcpus(vm_name, int(vcpu_max))

    def do_snapshot(vm_name, expected_str):
        """
        Run snapshot related commands: snapshot-create-as, snapshot-list
        snapshot-dumpxml, snapshot-revert

        :param vm_name: vm name
        :param expected_str: expected string in snapshot-dumpxml
        :raise: test.fail if virsh command failed
        """
        snapshot_name = vm_name + "-snap"
        virsh_dargs = {'debug': True}

        cmd_result = virsh.snapshot_create_as(vm_name, snapshot_name,
                                              **virsh_dargs)
        libvirt.check_exit_status(cmd_result)

        try:
            snapshots = virsh.snapshot_list(vm_name, **virsh_dargs)
        except process.CmdError:
            test.fail("Failed to get snapshots list for %s" % vm_name)
        if snapshot_name not in snapshots:
            test.fail("The snapshot '%s' was not in snapshot-list." %
                      snapshot_name)
        cmd_result = virsh.snapshot_dumpxml(vm_name, snapshot_name,
                                            **virsh_dargs)
        libvirt.check_result(cmd_result, expected_match=expected_str)

        cmd_result = virsh.snapshot_revert(vm_name, "", "--current",
                                           **virsh_dargs)
        libvirt.check_exit_status(cmd_result)

    def check_feature_list(vm, original_dict):
        """
        Compare new cpu feature list and original cpu

        :param vm: VM object
        :original_dict: Cpu feature dict , {"name1":"policy1","name2":"policy2"}
        """
        new_cpu_xml = vm_xml.VMXML.new_from_dumpxml(vm.name).cpu
        new_feature_dict = new_cpu_xml.get_dict_type_feature()
        if new_feature_dict != original_dict:
            test.fail('CPU feature lists are different, original is :%s,'
                      ' new is %s:' % (original_dict, new_feature_dict))

    libvirt_version.is_libvirt_feature_supported(params)
    vm_name = params.get('main_vm')
    vm = env.get_vm(vm_name)

    cpu_mode = params.get('cpu_mode')
    vcpu_max = params.get('vcpu_max')
    expected_str_before_startup = params.get("expected_str_before_startup")
    expected_str_after_startup = params.get("expected_str_after_startup")

    test_operations = params.get("test_operations")
    check_vendor_id = "yes" == params.get("check_vendor_id", "no")
    virsh_edit_cmd = params.get("virsh_edit_cmd")
    with_topology = "yes" == params.get("with_topology", "no")

    status_error = "yes" == params.get("status_error", "no")
    err_msg = params.get("err_msg")

    cpu_vendor_id = None
    expected_qemuline = None
    cmd_in_guest = params.get("cmd_in_guest")
    customize_cpu_features = "yes" == params.get("customize_cpu_features",
                                                 "no")
    bkxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    managed_save_file = "/var/lib/libvirt/qemu/save/%s.save" % vm_name

    try:
        if check_vendor_id:
            output = virsh.capabilities(debug=True)
            host_vendor = re.findall(r'<vendor>(\w+)<', output)[0]

            cpu_vendor_id = 'GenuineIntel'
            if host_vendor != "Intel":
                cpu_vendor_id = 'AuthenticAMD'
            logging.debug("Set cpu vendor_id to %s on this host.",
                          cpu_vendor_id)

            expected_qemuline = "vendor=" + cpu_vendor_id
            cmd_in_guest = ("cat /proc/cpuinfo | grep vendor_id | grep {}".
                            format(cpu_vendor_id))

        # Update xml for test
        update_cpu_xml()

        vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
        logging.debug("Pre-test xml is %s", vmxml.xmltreefile)
        cpu_xml = vmxml.cpu
        feature_dict = cpu_xml.get_dict_type_feature()

        if expected_str_before_startup:
            libvirt.check_dumpxml(vm, expected_str_before_startup)

        if test_operations:
            for action in test_operations.split(","):
                if action == "do_snapshot":
                    do_snapshot(vm_name, expected_str_before_startup)

        if virsh_edit_cmd:
            status = libvirt.exec_virsh_edit(vm_name,
                                             virsh_edit_cmd.split(","))
            if status == status_error:
                test.fail("Virsh edit got unexpected result.")

        # Check if vm could start successfully
        if not status_error:
            result = virsh.start(vm_name, debug=True)
            libvirt.check_exit_status(result)

            if expected_str_after_startup:
                libvirt.check_dumpxml(vm, expected_str_after_startup)

            if expected_qemuline:
                libvirt.check_qemu_cmd_line(expected_qemuline)

            if cmd_in_guest:
                vm_session = vm.wait_for_login()
                status, output = vm_session.cmd_status_output(cmd_in_guest)
                if status:
                    vm_session.close()
                    test.fail("Failed to run '{}' in vm with "
                              "messages:\n{}".format(cmd_in_guest, output))
                vm_session.close()
                if cpu_mode == 'maximum':
                    check_vm_cpu_model(output.strip(), cmd_in_guest, test)

            # Add case: Check cpu xml after domain Managedsaved and restored
            if test_operations:
                for item in test_operations.split(','):
                    if item == "managedsave_restore":
                        # (1)Domain Manage saved
                        virsh.managedsave(vm_name,
                                          ignore_status=False,
                                          debug=True)
                        check_feature_list(vm, feature_dict)
                        # (2)Domain Restore
                        virsh.restore(managed_save_file,
                                      ignore_status=False,
                                      debug=True)
                        # (5)Check mode and feature list here
                        libvirt.check_dumpxml(vm, cpu_mode)
                        check_feature_list(vm, feature_dict)

    finally:
        logging.debug("Recover test environment")
        if os.path.exists(managed_save_file):
            virsh.managedsave_remove(vm_name, debug=True)
        if vm.is_alive():
            vm.destroy()
        libvirt.clean_up_snapshots(vm_name, domxml=bkxml)
        bkxml.sync()
Пример #17
0
def run(test, params, env):
    """
    Test command: virsh dompmsuspend <domain> <target>
    The command suspends a running domain using guest OS's power management.
    """

    # MAIN TEST CODE ###
    # Process cartesian parameters
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)

    vm_state = params.get("vm_state", "running")
    suspend_target = params.get("pm_suspend_target", "mem")
    pm_enabled = params.get("pm_enabled", "not_set")
    pm_enabled_disk = params.get("pm_enabled_disk", "no")
    pm_enabled_mem = params.get("pm_enabled_mem", "no")
    test_managedsave = "yes" == params.get("test_managedsave", "no")
    test_save_restore = "yes" == params.get("test_save_restore", "no")
    test_suspend_resume = "yes" == params.get("test_suspend_resume", "no")
    pmsuspend_error = 'yes' == params.get("pmsuspend_error", 'no')
    pmsuspend_error_msg = params.get("pmsuspend_error_msg")
    agent_error_test = 'yes' == params.get("agent_error_test", 'no')
    arch = platform.processor()
    duration_value = int(params.get("duration", "0"))

    # Libvirt acl test related params
    uri = params.get("virsh_uri")
    unprivileged_user = params.get('unprivileged_user')
    if unprivileged_user:
        if unprivileged_user.count('EXAMPLE'):
            unprivileged_user = '******'

    if not libvirt_version.version_compare(1, 1, 1):
        if params.get('setup_libvirt_polkit') == 'yes':
            test.cancel("API acl test not supported in current"
                        " libvirt version.")

    # A backup of original vm
    vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    vmxml_backup = vmxml.copy()

    # Expected possible fail patterns.
    # Error output should match one of these patterns.
    # An empty list mean test should succeed.
    fail_pat = []
    virsh_dargs = {'debug': True, 'ignore_status': True}
    if params.get('setup_libvirt_polkit') == 'yes':
        virsh_dargs_copy = virsh_dargs.copy()
        virsh_dargs_copy['uri'] = uri
        virsh_dargs_copy['unprivileged_user'] = unprivileged_user
        if pmsuspend_error:
            fail_pat.append('access denied')

    # Setup possible failure patterns excluding ppc
    if "ppc64" not in arch:
        if pm_enabled == 'not_set':
            fail_pat.append('not supported')
        if pm_enabled == 'no':
            fail_pat.append('disabled')

    if vm_state == 'paused':
        # For older version
        fail_pat.append('not responding')
        # For newer version
        fail_pat.append('not running')
    elif vm_state == 'shutoff':
        fail_pat.append('not running')
    if agent_error_test:
        fail_pat.append('not running')
        fail_pat.append('agent not available')
    if pmsuspend_error_msg:
        fail_pat.append(pmsuspend_error_msg)

    # RHEL6 or older releases
    unsupported_guest_err = 'suspend mode is not supported by the guest'

    try:
        if vm.is_alive():
            vm.destroy()

        # Set pm tag in domain's XML if needed.
        if "ppc64" not in arch:
            if pm_enabled == 'not_set':
                try:
                    if vmxml.pm:
                        del vmxml.pm
                except xcepts.LibvirtXMLNotFoundError:
                    pass
            else:
                pm_xml = vm_xml.VMPMXML()
                pm_xml.mem_enabled = pm_enabled_mem
                pm_xml.disk_enabled = pm_enabled_disk
                vmxml.pm = pm_xml
            vmxml.sync()

        try:
            vm.prepare_guest_agent()
        except virt_vm.VMStartError as info:
            if "not supported" in str(info).lower():
                test.cancel(info)
            else:
                test.error(info)
        # Selinux should be enforcing
        vm.setenforce(1)

        # Create swap partition/file if nessesary.
        need_mkswap = False
        if suspend_target in ['disk', 'hybrid']:
            need_mkswap = not vm.has_swap()
        if need_mkswap:
            logging.debug("Creating swap partition.")
            vm.create_swap_partition()

        try:
            libvirtd = utils_libvirtd.Libvirtd()
            savefile = os.path.join(data_dir.get_tmp_dir(), "%s.save" % vm_name)
            session = vm.wait_for_login()
            # Touch a file on guest to test managed save command.
            if test_managedsave:
                session.cmd_status("touch pmtest")
            session.close()

            # Set vm state
            if vm_state == "paused":
                vm.pause()
            elif vm_state == "shutoff":
                vm.destroy()

            # Run test case
            result = virsh.dompmsuspend(vm_name, suspend_target, duration=duration_value, debug=True,
                                        uri=uri,
                                        unprivileged_user=unprivileged_user)
            if result.exit_status == 0:
                if fail_pat:
                    test.fail("Expected failed with %s, but run succeed:\n%s" %
                              (fail_pat, result.stdout))
            else:
                if unsupported_guest_err in result.stderr:
                    test.cancel("Unsupported suspend mode:\n%s" % result.stderr)
                if not fail_pat:
                    test.fail("Expected success, but run failed:\n%s" % result.stderr)
                #if not any_pattern_match(fail_pat, result.stderr):
                if not any(p in result.stderr for p in fail_pat):
                    test.fail("Expected failed with one of %s, but "
                              "failed with:\n%s" % (fail_pat, result.stderr))

            # If pmsuspend_error is True, just skip below checking, and return directly.
            if pmsuspend_error:
                return
            # check whether the state changes to pmsuspended
            if not utils_misc.wait_for(lambda: vm.state() == 'pmsuspended', 30):
                test.fail("VM failed to change its state, expected state: "
                          "pmsuspended, but actual state: %s" % vm.state())

            if agent_error_test:
                ret = virsh.dompmsuspend(vm_name, "mem", **virsh_dargs)
                libvirt.check_result(ret, fail_pat)
                ret = virsh.dompmsuspend(vm_name, "disk", **virsh_dargs)
                libvirt.check_result(ret, fail_pat)
                ret = virsh.domtime(vm_name, **virsh_dargs)
                libvirt.check_result(ret, fail_pat)
            if test_managedsave:
                ret = virsh.managedsave(vm_name, **virsh_dargs)
                libvirt.check_exit_status(ret)
                # Dompmwakeup should return false here
                ret = virsh.dompmwakeup(vm_name, **virsh_dargs)
                libvirt.check_exit_status(ret, True)
                ret = virsh.start(vm_name)
                libvirt.check_exit_status(ret)
                if not vm.is_paused():
                    test.fail("Vm status is not paused before pm wakeup")
                if params.get('setup_libvirt_polkit') == 'yes':
                    ret = virsh.dompmwakeup(vm_name, **virsh_dargs_copy)
                else:
                    ret = virsh.dompmwakeup(vm_name, **virsh_dargs)
                libvirt.check_exit_status(ret)
                if not vm.is_paused():
                    test.fail("Vm status is not paused after pm wakeup")
                ret = virsh.resume(vm_name, **virsh_dargs)
                libvirt.check_exit_status(ret)
                sess = vm.wait_for_login()
                if sess.cmd_status("ls pmtest && rm -f pmtest"):
                    test.fail("Check managed save failed on guest")
                sess.close()
            if test_save_restore:
                # Run a series of operations to check libvirtd status.
                ret = virsh.dompmwakeup(vm_name, **virsh_dargs)
                libvirt.check_exit_status(ret)
                # Wait for vm is started
                vm.wait_for_login()
                ret = virsh.save(vm_name, savefile, **virsh_dargs)
                libvirt.check_exit_status(ret)
                ret = virsh.restore(savefile, **virsh_dargs)
                libvirt.check_exit_status(ret)
                # Wait for vm is started
                vm.wait_for_login()
                # run pmsuspend again
                ret = virsh.dompmsuspend(vm_name, suspend_target, **virsh_dargs)
                libvirt.check_exit_status(ret)
                # save and restore the guest again.
                ret = virsh.save(vm_name, savefile, **virsh_dargs)
                libvirt.check_exit_status(ret)
                ret = virsh.restore(savefile, **virsh_dargs)
                libvirt.check_exit_status(ret)
                ret = virsh.destroy(vm_name, **virsh_dargs)
                libvirt.check_exit_status(ret)
                if not libvirtd.is_running():
                    test.fail("libvirtd crashed")
            if test_suspend_resume:
                ret = virsh.suspend(vm_name)
                libvirt.check_exit_status(ret, expect_error=True)
                if vm.state() != 'pmsuspended':
                    test.fail("VM state should be pmsuspended")
                ret = virsh.resume(vm_name)
                libvirt.check_exit_status(ret, expect_error=True)
                if vm.state() != 'pmsuspended':
                    test.fail("VM state should be pmsuspended")
        finally:
            libvirtd.restart()
            # Remove the tmp file
            if os.path.exists(savefile):
                os.remove(savefile)
            # Restore VM state
            if vm_state == "paused":
                vm.resume()

            if suspend_target in ['mem', 'hybrid']:
                if vm.state() == "pmsuspended":
                    virsh.dompmwakeup(vm_name)
            else:
                if vm.state() == "in shutdown":
                    vm.wait_for_shutdown()
                if vm.is_dead():
                    vm.start()

            if need_mkswap:
                vm.cleanup_swap()

    finally:
        # Destroy the vm.
        if vm.is_alive():
            vm.destroy()
        # Recover xml of vm.
        vmxml_backup.sync()
    def manipulate_domain(vm_name, vm_operation, recover=False):
        """
        Operate domain to given state or recover it.

        :params vm_name: Name of the VM domain
        :params vm_operation: Operation to be performed on VM domain
                              like save, managedsave, suspend
        :params recover: flag to inform whether to set or reset
                         vm_operation
        """
        save_file = os.path.join(data_dir.get_tmp_dir(), vm_name + ".save")
        if not recover:
            if vm_operation == "save":
                save_option = ""
                result = virsh.save(vm_name, save_file, save_option,
                                    ignore_status=True, debug=True)
                libvirt.check_exit_status(result)
            elif vm_operation == "managedsave":
                managedsave_option = ""
                result = virsh.managedsave(vm_name, managedsave_option,
                                           ignore_status=True, debug=True)
                libvirt.check_exit_status(result)
            elif vm_operation == "s3":
                suspend_target = "mem"
                result = virsh.dompmsuspend(vm_name, suspend_target,
                                            ignore_status=True, debug=True)
                libvirt.check_exit_status(result)
            elif vm_operation == "s4":
                suspend_target = "disk"
                result = virsh.dompmsuspend(vm_name, suspend_target,
                                            ignore_status=True, debug=True)
                libvirt.check_exit_status(result)
                # Wait domain state change: 'in shutdown' -> 'shut off'
                utils_misc.wait_for(lambda: virsh.is_dead(vm_name), 5)
            elif vm_operation == "suspend":
                result = virsh.suspend(vm_name, ignore_status=True, debug=True)
                libvirt.check_exit_status(result)
            elif vm_operation == "reboot":
                vm.reboot()
                vm_uptime_init = vm.uptime()
            else:
                logging.debug("No operation for the domain")

        else:
            if vm_operation == "save":
                if os.path.exists(save_file):
                    result = virsh.restore(save_file, ignore_status=True,
                                           debug=True)
                    libvirt.check_exit_status(result)
                    os.remove(save_file)
                else:
                    test.error("No save file for domain restore")
            elif vm_operation in ["managedsave", "s4"]:
                result = virsh.start(vm_name, ignore_status=True, debug=True)
                libvirt.check_exit_status(result)
            elif vm_operation == "s3":
                suspend_target = "mem"
                result = virsh.dompmwakeup(vm_name, ignore_status=True,
                                           debug=True)
                libvirt.check_exit_status(result)
            elif vm_operation == "suspend":
                result = virsh.resume(vm_name, ignore_status=True, debug=True)
                libvirt.check_exit_status(result)
            elif vm_operation == "reboot":
                pass
            else:
                logging.debug("No need recover the domain")
Пример #19
0
def run(test, params, env):
    """
    Test command: virsh managedsave-xxx
    including virsh managedsave-edit
              virsh managedsave-dumpxml
              virsh managedsave-define
              ...
    """
    vm_name = params.get('main_vm')
    checkpoint = params.get('checkpoint', '')
    error_msg = params.get('error_msg', '')
    ms_extra_options = params.get('ms_extra_options', '')
    pre_state = params.get('pre_state', '')
    status_error = 'yes' == params.get('status_error', 'no')

    vm = env.get_vm(vm_name)
    bkxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    virsh_dargs = {'debug': True, 'ignore_status': False}

    def start_and_login_vm():
        """
        Start vm and login, after which vm is accessible
        """
        vm.start()
        vm.wait_for_login().close()

    try:
        if checkpoint == 'dumpxml':
            # Check managedsave-dumpxml
            start_and_login_vm()
            virsh.managedsave(vm_name, **virsh_dargs)
            virsh.managedsave_dumpxml(vm_name, **virsh_dargs)
            tmp_dir = data_dir.get_tmp_dir()
            save_img_xml = os.path.join(tmp_dir, 'save_img.xml')
            managed_save_xml = os.path.join(tmp_dir, 'managed_save.xml')
            virsh.save_image_dumpxml(MANAGEDSAVE_FILE % vm_name, ' > %s'
                                     % save_img_xml, **virsh_dargs)
            virsh.managedsave_dumpxml(vm_name, ' > %s' % managed_save_xml,
                                      **virsh_dargs)
            result_need_check = process.run('diff %s %s' %
                                            (save_img_xml, managed_save_xml),
                                            shell=True, verbose=True)
        if checkpoint == 'secure_info':
            # Check managedsave-dumpxml with option --security-info
            vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
            vm_xml.VMXML.set_graphics_attr(vm_name, {'passwd': '123456'})
            start_and_login_vm()
            virsh.managedsave(vm_name, **virsh_dargs)
            default_xml = virsh.managedsave_dumpxml(vm_name, **virsh_dargs).stdout_text
            if 'passwd' in default_xml:
                test.fail('Found "passwd" in dumped vm xml. '
                          'Secure info like "passwd" should not be dumped.')
            secure_xml = virsh.managedsave_dumpxml(vm_name, '--security-info',
                                                   **virsh_dargs).stdout_text
            if 'passwd' not in secure_xml:
                test.fail('Not found "passwd" in dumped vm xml.'
                          'Secure info like "passwd" should be dumped '
                          'with option "--security-info"')
        if checkpoint == 'define':
            # Make change to a managedsave-dumped xml and redefine vm
            # and check if the change take effect
            start_option = '--paused' if pre_state == 'paused' else ''
            virsh.start(vm_name, start_option, **virsh_dargs)
            vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
            logging.debug(vmxml.devices)
            disk = vmxml.get_devices('disk')[0]
            img_path = disk.source.attrs['file']
            logging.info('Original image path: %s', img_path)
            # Copy old image to new image
            new_img_path = os.path.join(data_dir.get_tmp_dir(), 'test.img')
            shutil.copyfile(img_path, new_img_path)
            virsh.managedsave(vm_name, **virsh_dargs)
            xmlfile = os.path.join(data_dir.get_tmp_dir(), 'managedsave.xml')
            virsh.managedsave_dumpxml(vm_name, '>%s' % xmlfile, **virsh_dargs)
            # Make change to xmlfile and managedsave-define with it
            with open(xmlfile) as file_xml:
                updated_xml = file_xml.read().replace(img_path, new_img_path)
            with open(xmlfile, 'w') as file_xml:
                file_xml.write(updated_xml)
            virsh.managedsave_define(vm_name, xmlfile, ms_extra_options, **virsh_dargs)
            virsh.start(vm_name, **virsh_dargs)
            xml_after_define = virsh.dumpxml(vm_name, **virsh_dargs).stdout_text
            if 'test.img' not in xml_after_define:
                test.fail('Not found "test.img" in vm xml after managedsave-define.'
                          'Modification to xml did not take effect.')
        if checkpoint == 'no_save':
            # Start a guest but do not managedsave it
            start_and_login_vm()
            virsh.dom_list('--all --managed-save', **virsh_dargs)
        if checkpoint == 'rm_after_save':
            # Remove saved file after managedsave a vm
            start_and_login_vm()
            virsh.managedsave(vm_name, **virsh_dargs)
            os.remove(MANAGEDSAVE_FILE % vm_name)
        if checkpoint == 'not_saved_corrupt':
            # Do not managedsave a vm, but create a fake managedsaved file by
            # 'touch' a file
            start_and_login_vm()
            virsh.dom_list('--all --managed-save', **virsh_dargs)
            process.run('touch %s' % MANAGEDSAVE_FILE % vm_name, verbose=True)
            params['clean_managed_save'] = True
        if checkpoint == 'exclusive_option':
            virsh.managedsave(vm_name, **virsh_dargs)

        # Test managedsave-edit, managedsave_dumpxml, managedsave-define
        if params.get('check_cmd_error', '') == 'yes':
            ms_command = params.get('ms_command', '')
            if ms_command == 'edit':
                result_need_check = virsh.managedsave_edit(vm_name,
                                                           ms_extra_options,
                                                           debug=True)
            if ms_command == 'dumpxml':
                result_need_check = virsh.managedsave_dumpxml(vm_name,
                                                              ms_extra_options,
                                                              debug=True)
            if ms_command == 'define':
                result_need_check = virsh.managedsave_define(vm_name,
                                                             bkxml.xml,
                                                             ms_extra_options,
                                                             debug=True)
        # If needs to check result, check it
        if 'result_need_check' in locals():
            logging.info('Check command result.')
            libvirt.check_exit_status(result_need_check, status_error)
            if error_msg:
                libvirt.check_result(result_need_check, [error_msg])

    finally:
        if params.get('clean_managed_save'):
            os.remove(MANAGEDSAVE_FILE % vm_name)
        utils_libvirtd.libvirtd_restart()
        virsh.managedsave_remove(vm_name, debug=True)
        bkxml.sync()
Пример #20
0
def run(test, params, env):
    """
    Test virtiofs filesystem device:

    1.Start guest with 1/2 virtiofs filesystem devices.
    2.Start 2 guest with the same virtiofs filesystem device.
    3.Coldplug/Coldunplug virtiofs filesystem device
    4.Share data between guests and host.
    5.Lifecycle for guest with virtiofs filesystem device.
    """
    def generate_expected_process_option(expected_results):
        """
        Generate expected virtiofsd process option
        """
        if cache_mode != "auto":
            expected_results = "cache=%s" % cache_mode
        if xattr == "on":
            expected_results += ",xattr"
        elif xattr == "off":
            expected_results += ",no_xattr"
        if flock == "on":
            expected_results += ",flock"
        else:
            expected_results += ",no_flock"
        if lock_posix == "on":
            expected_results += ",posix_lock"
        else:
            expected_results += ",no_posix_lock"
        logging.debug(expected_results)
        return expected_results

    def shared_data(vm_names, fs_devs):
        """
        Shared data between guests and host:
        1.Mount dir in guest;
        2.Write a file in guest;
        3.Check the md5sum value are the same in guests and host;
        """
        md5s = []
        for vm in vms:
            session = vm.wait_for_login()
            for fs_dev in fs_devs:
                logging.debug(fs_dev)
                session.cmd('rm -rf %s' % fs_dev.source['dir'],
                            ignore_all_errors=False)
                session.cmd('mkdir -p %s' % fs_dev.source['dir'])
                logging.debug("mount virtiofs dir in guest")
                cmd = "mount -t virtiofs %s %s" % (fs_dev.target['dir'],
                                                   fs_dev.source['dir'])
                status, output = session.cmd_status_output(cmd, timeout=300)
                if status != 0:
                    session.close()
                    test.fail("mount virtiofs dir failed: %s" % output)
                if vm == vms[0]:
                    filename = fs_dev.source['dir'] + '/' + vm.name
                    cmd = "dd if=/dev/urandom of=%s bs=1M count=512 oflag=direct" % filename
                    status, output = session.cmd_status_output(cmd,
                                                               timeout=300)
                    if status != 0:
                        session.close()
                        test.fail("Write data failed: %s" % output)
                md5_value = session.cmd_status_output("md5sum %s" %
                                                      filename)[1].strip()
                md5s.append(md5_value)
                logging.debug(md5_value)
                md5_value = process.run("md5sum %s" %
                                        filename).stdout_text.strip()
                logging.debug(md5_value)
                md5s.append(md5_value)
            session.close()
        if len(set(md5s)) != len(fs_devs):
            test.fail("The md5sum value are not the same in guests and host")

    start_vm = params.get("start_vm", "no")
    vm_names = params.get("vms", "avocado-vt-vm1").split()
    cache_mode = params.get("cache_mode", "none")
    xattr = params.get("xattr", "on")
    lock_posix = params.get("lock_posix", "on")
    flock = params.get("flock", "on")
    xattr = params.get("xattr", "on")
    path = params.get("virtiofsd_path", "/usr/libexec/virtiofsd")
    queue_size = int(params.get("queue_size", "512"))
    driver_type = params.get("driver_type", "virtiofs")
    guest_num = int(params.get("guest_num", "1"))
    fs_num = int(params.get("fs_num", "1"))
    vcpus_per_cell = int(params.get("vcpus_per_cell", 2))
    source_dir_prefix = params.get("source_dir_prefix", "/dir")
    target_prefix = params.get("target_prefix", "mount_tag")
    error_msg_start = params.get("error_msg_start", "")
    error_msg_save = params.get("error_msg_save", "")
    status_error = params.get("status_error", "no") == "yes"
    socket_file_checking = params.get("socket_file_checking", "no") == "yes"
    suspend_resume = params.get("suspend_resume", "no") == "yes"
    managedsave = params.get("managedsave", "no") == "yes"
    coldplug = params.get("coldplug", "no") == "yes"

    fs_devs = []
    vms = []
    vmxml_backups = []
    expected_results = ""
    huge_pages_num = 0

    if len(vm_names) != guest_num:
        test.cancel("This test needs exactly %d vms." % guest_num)
    try:
        # Define filesystem device xml
        for index in range(fs_num):
            fsdev_keys = ['accessmode', 'driver', 'source', 'target', 'binary']
            accessmode = "passthrough"
            driver = {'type': driver_type, 'queue': queue_size}
            source_dir = str(source_dir_prefix) + str(index)
            logging.debug(source_dir)
            not os.path.isdir(source_dir) and os.mkdir(source_dir)
            target_dir = target_prefix + str(index)
            source = {'dir': source_dir}
            target = {'dir': target_dir}
            fsdev_dict = [accessmode, driver, source, target]
            binary_keys = [
                'path', 'cache_mode', 'xattr', 'lock_posix', 'flock'
            ]
            binary_values = [path, cache_mode, xattr, lock_posix, flock]
            binary_dict = dict(zip(binary_keys, binary_values))
            fsdev_values = [accessmode, driver, source, target, binary_dict]
            fsdev_dict = dict(zip(fsdev_keys, fsdev_values))
            logging.debug(fsdev_dict)
            fs_dev = libvirt_device_utils.create_fs_xml(fsdev_dict)
            logging.debug(fs_dev)
            fs_devs.append(fs_dev)

        #Start guest with virtiofs filesystem device
        for index in range(guest_num):
            logging.debug("prepare vm %s", vm_names[index])
            vm = env.get_vm(vm_names[index])
            vms.append(vm)
            vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_names[index])
            vmxml_backup = vmxml.copy()
            vmxml_backups.append(vmxml_backup)
            if vmxml.max_mem < 1024000:
                vmxml.max_mem = 1024000
            hp_obj = test_setup.HugePageConfig(params)
            host_hp_size = hp_obj.get_hugepage_size()
            huge_pages_num += vmxml.max_mem // host_hp_size + 128
            utils_memory.set_num_huge_pages(huge_pages_num)
            vmxml.remove_all_device_by_type('filesystem')
            vmxml.sync()
            numa_no = vmxml.vcpu // vcpus_per_cell if vmxml.vcpu != 1 else 1
            vm_xml.VMXML.set_vm_vcpus(vmxml.vm_name,
                                      vmxml.vcpu,
                                      numa_number=numa_no)
            vm_xml.VMXML.set_memoryBacking_tag(vmxml.vm_name,
                                               access_mode="shared")
            vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_names[index])
            logging.debug(vmxml)
            if coldplug:
                ret = virsh.attach_device(vm_names[index],
                                          fs_devs[0].xml,
                                          flagstr='--config',
                                          debug=True)
                utils_test.libvirt.check_exit_status(ret, expect_error=False)
            else:
                for fs in fs_devs:
                    vmxml.add_device(fs)
                    vmxml.sync()
            logging.debug(vmxml)
            result = virsh.start(vm_names[index], debug=True)
            if status_error and not managedsave:
                expected_error = error_msg_start
                utils_test.libvirt.check_exit_status(result, expected_error)
                return
            else:
                utils_test.libvirt.check_exit_status(result,
                                                     expect_error=False)
            expected_results = generate_expected_process_option(
                expected_results)
            cmd = 'ps aux | grep virtiofsd | head -n 1'
            utils_test.libvirt.check_cmd_output(cmd, content=expected_results)

        if managedsave:
            expected_error = error_msg_save
            result = virsh.managedsave(vm_names[0],
                                       ignore_status=True,
                                       debug=True)
            utils_test.libvirt.check_exit_status(result, expected_error)
        else:
            shared_data(vm_names, fs_devs)
            if suspend_resume:
                virsh.suspend(vm_names[0], debug=True, ignore_status=False)
                time.sleep(30)
                virsh.resume(vm_names[0], debug=True, ignore_statue=False)
            elif socket_file_checking:
                result = virsh.domid(vm_names[0])
                domid = result.stdout.strip()
                domain_dir = "var/lib/libvirt/qemu/domain-" + domid + '-' + vm_names[
                    0]
                if result.exit_status:
                    test.fail("Get domid failed.")
                    for fs_dev in fs_devs:
                        alias = fs_dev.alias['name']
                        expected_pid = domain_dir + alias + '-fs.pid'
                        expected_sock = alias + '-fs.sock'
                        status1 = process.run('ls -l %s' % expected_pid,
                                              shell=True).exit_status
                        status2 = process.run('ls -l %s' % expected_sock,
                                              shell=True).exit_status
                        if not (status1 and status2):
                            test.fail(
                                "The socket and pid file is not as expected")

    finally:
        for vm in vms:
            if vm.is_alive():
                session = vm.wait_for_login()
                for fs_dev in fs_devs:
                    mount_dir = fs_dev.source['dir']
                    logging.debug(mount_dir)
                    session.cmd('umount -f %s' % mount_dir,
                                ignore_all_errors=True)
                    session.cmd('rm -rf %s' % mount_dir,
                                ignore_all_errors=True)
                session.close()
                vm.destroy(gracefully=False)
        for index in range(fs_num):
            process.run('rm -rf %s' % source_dir_prefix + str(index),
                        ignore_status=False)
        for vmxml_backup in vmxml_backups:
            vmxml_backup.sync()
Пример #21
0
def run(test, params, env):
    """
    Test command: virsh managedsave.

    This command can save and destroy a
    running domain, so it can be restarted
    from the same state at a later time.
    """

    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)

    # define function
    def vm_recover_check(guest_name, option):
        """
        Check if the vm can be recovered correctly.

        :param guest_name : Checked vm's name.
        :param option : managedsave command option.
        """
        # This time vm not be shut down
        if vm.is_alive():
            raise error.TestFail("Guest should be inactive")
        virsh.start(guest_name)
        # This time vm should be in the list
        if vm.is_dead():
            raise error.TestFail("Guest should be active")
        if option:
            if option.count("running"):
                if vm.is_dead() or vm.is_paused():
                    raise error.TestFail("Guest state should be"
                                         " running after started"
                                         " because of '--running' option")
            elif option.count("paused"):
                if not vm.is_paused():
                    raise error.TestFail("Guest state should be"
                                         " paused after started"
                                         " because of '--paused' option")
        else:
            if params.get("paused_after_start_vm") == "yes":
                if not vm.is_paused():
                    raise error.TestFail("Guest state should be"
                                         " paused after started"
                                         " because of initia guest state")

    domid = vm.get_id()
    domuuid = vm.get_uuid()

    status_error = ("yes" == params.get("status_error", "no"))
    vm_ref = params.get("managedsave_vm_ref")
    libvirtd = params.get("libvirtd", "on")
    extra_param = params.get("managedsave_extra_param", "")
    progress = ("yes" == params.get("managedsave_progress", "no"))
    option = params.get("managedsave_option", "")
    if option:
        if not virsh.has_command_help_match('managedsave', option):
            # Older libvirt does not have this option
            raise error.TestNAError("Older libvirt does not"
                                    " handle arguments consistently")

    # run test case
    if vm_ref == "id":
        vm_ref = domid
    elif vm_ref == "uuid":
        vm_ref = domuuid
    elif vm_ref == "hex_id":
        vm_ref = hex(int(domid))
    elif vm_ref.count("invalid"):
        vm_ref = params.get(vm_ref)
    elif vm_ref == "name":
        vm_ref = vm_name

    # stop the libvirtd service
    if libvirtd == "off":
        utils_libvirtd.libvirtd_stop()

    # Ignore exception with "ignore_status=True"
    if progress:
        option += " --verbose"
    option += extra_param
    ret = virsh.managedsave(vm_ref, options=option, ignore_status=True)
    status = ret.exit_status
    # The progress information outputed in error message
    error_msg = ret.stderr.strip()

    # recover libvirtd service start
    if libvirtd == "off":
        utils_libvirtd.libvirtd_start()

    # check status_error
    try:
        if status_error:
            if not status:
                raise error.TestFail("Run successfully with wrong command!")
        else:
            if status:
                raise error.TestFail("Run failed with right command")
            if progress:
                if not error_msg.count("Managedsave:"):
                    raise error.TestFail("Got invalid progress output")
            vm_recover_check(vm_name, option)
    finally:
        if vm.is_paused():
            virsh.resume(vm_name)
Пример #22
0
def run_virsh_managedsave(test, params, env):
    """
    Test command: virsh managedsave.

    This command can save and destroy a
    running domain, so it can be restarted
    from the same state at a later time.
    """

    vm_name = params.get("main_vm", "vm1")
    vm = env.get_vm(params["main_vm"])

    #define function
    def vm_recover_check(guest_name):
        """
        Check if the vm can be recovered correctly.

        @param: guest_name : Checked vm's name.
        """
        ret = virsh.dom_list()
        #This time vm should not be in the list
        if re.search(guest_name, ret.stdout):
            raise error.TestFail("virsh list output invalid")
        virsh.start(guest_name)
        if params.get("paused_after_start_vm") == "yes":
            virsh.resume(guest_name)
        #This time vm should be in the list
        ret = virsh.dom_list()
        if not re.search(guest_name, ret.stdout):
            raise error.TestFail("virsh list output invalid")

    domid = virsh.domid(vm_name).strip()
    domuuid = virsh.domuuid(vm_name).strip()

    libvirtd = params.get("managedsave_libvirtd", "on")

    #run test case
    vm_ref = params.get("managedsave_vm_ref")
    if vm_ref == "id":
        vm_ref = domid
    elif vm_ref == "uuid":
        vm_ref = domuuid
    elif vm_ref == "hex_id":
        vm_ref = hex(int(domid))
    elif vm_ref == "managedsave_invalid_id" or\
         vm_ref == "managedsave_invalid_uuid":
        vm_ref = params.get(vm_ref)
    elif vm_ref == "name" or vm_ref == "extra_parame":
        vm_ref = "%s %s" % (vm_name, params.get("managedsave_extra_parame"))

    #stop the libvirtd service
    if libvirtd == "off":
        libvirt_vm.libvirtd_stop()

    #Ignore exception with "ignore_status=True"
    ret = virsh.managedsave(vm_ref, ignore_status=True)
    status = ret.exit_status

    #recover libvirtd service start
    if libvirtd == "off":
        libvirt_vm.libvirtd_start()

    #check status_error
    status_error = params.get("status_error")
    if status_error == "yes":
        if status == 0:
            if not virsh.has_command_help_match('managedsave',
                                                r'\s+--running\s+'):
                # Older libvirt does not have --running parameter
                raise error.TestNAError(
                    "Older libvirt does not handle arguments consistently")
            else:
                raise error.TestFail("Run successfully with wrong command!")
    elif status_error == "no":
        if status != 0:
            raise error.TestFail("Run failed with right command")
        vm_recover_check(vm_name)
Пример #23
0
def run(test, params, env):
    """
    Test Guest can start with different cpu mode settings
    and do managedsave
    """
    if 'ppc64le' not in platform.machine().lower():
        test.cancel('This case is for ppc only')

    vm_name = params.get('main_vm')
    status_error = 'yes' == params.get('status_error', 'no')
    error_msg = params.get('error_msg', '')

    cpu_mode = params.get('cpu_mode', '')
    match = params.get('match', '')
    model = params.get('model', '')
    fallback = params.get('fallback', '')
    qemu_line = params.get('qemu_line', '')
    do_managedsave = 'yes' == params.get('do_managedsave', 'no')

    bkxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)

    try:
        # Test mode='custom' and cpu model is the exact model which
        # should be upper case. And qemu command line also should
        # include '-cpu $MODEL'
        model = model.upper() if cpu_mode == 'custom' else model
        qemu_line = '-cpu %s' % model if model.isupper() else qemu_line

        # Create cpu xml for test
        cpu_xml = vm_xml.VMCPUXML()
        cpu_xml.mode = cpu_mode
        if model:
            cpu_xml.model = model
        if fallback:
            cpu_xml.fallback = fallback
        if match:
            cpu_xml.match = match
        logging.debug(cpu_xml)

        # Update vm's cpu
        vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
        vmxml.cpu = cpu_xml
        vmxml.sync()
        logging.debug(vmxml)

        # Check if vm could start successfully
        result = virsh.start(vm_name, debug=True)
        libvirt.check_exit_status(result, status_error)

        # Check error message of negative cases
        if status_error and error_msg:
            if model.lower() == 'power42':
                msg_list = error_msg.split(',')
                error_msg = msg_list[0] if model.isupper() else msg_list[1]
            libvirt.check_result(result, expected_fails=[error_msg])

        # Check qemu command line and other checkpoints
        if not status_error:
            if qemu_line:
                logging.info('Search for "%s" in qemu command line', qemu_line)
                qemu_cmd_line = process.run('ps -ef|grep qemu',
                                            shell=True,
                                            verbose=True).stdout_text
                if qemu_line in qemu_cmd_line:
                    logging.info('PASS: qemu command line check')
                else:
                    test.fail('Fail: qemu command line check: %s not found' %
                              qemu_line)
            if do_managedsave:
                managed_save_result = virsh.managedsave(vm_name, debug=True)
                libvirt.check_exit_status(managed_save_result)
                start_result = virsh.start(vm_name, debug=True)
                libvirt.check_exit_status(start_result)

    finally:
        bkxml.sync()
Пример #24
0
    def set_condition(vm_name, condn, reset=False, guestbt=None):
        """
        Set domain to given state or reset it.
        """
        bt = None
        if not reset:
            if condn == "avocadotest":
                bt = utils_test.run_avocado_bg(vm, params, test)
                if not bt:
                    test.cancel("guest stress failed to start")
                # Allow stress to start
                time.sleep(condn_sleep_sec)
                return bt
            elif condn == "stress":
                utils_test.load_stress("stress_in_vms", params=params, vms=[vm])
            elif condn in ["save", "managedsave"]:
                # No action
                pass
            elif condn == "suspend":
                result = virsh.suspend(vm_name, ignore_status=True, debug=True)
                libvirt.check_exit_status(result)
            elif condn == "hotplug":
                result = virsh.setvcpus(vm_name, max_vcpu, "--live",
                                        ignore_status=True, debug=True)
                libvirt.check_exit_status(result)
                exp_vcpu = {'max_config': max_vcpu, 'max_live': max_vcpu,
                            'cur_config': current_vcpu, 'cur_live': max_vcpu,
                            'guest_live': max_vcpu}
                result = cpu.check_vcpu_value(vm, exp_vcpu,
                                              option="--live")
            elif condn == "host_smt":
                if cpuutil.get_cpu_vendor_name() == 'power9':
                    result = process.run("ppc64_cpu --smt=4", shell=True)
                else:
                    test.cancel("Host SMT changes not allowed during guest live")
            else:
                logging.debug("No operation for the domain")

        else:
            if condn == "save":
                save_file = os.path.join(data_dir.get_tmp_dir(), vm_name + ".save")
                result = virsh.save(vm_name, save_file,
                                    ignore_status=True, debug=True)
                libvirt.check_exit_status(result)
                time.sleep(condn_sleep_sec)
                if os.path.exists(save_file):
                    result = virsh.restore(save_file, ignore_status=True,
                                           debug=True)
                    libvirt.check_exit_status(result)
                    os.remove(save_file)
                else:
                    test.error("No save file for domain restore")
            elif condn == "managedsave":
                result = virsh.managedsave(vm_name,
                                           ignore_status=True, debug=True)
                libvirt.check_exit_status(result)
                time.sleep(condn_sleep_sec)
                result = virsh.start(vm_name, ignore_status=True, debug=True)
                libvirt.check_exit_status(result)
            elif condn == "suspend":
                result = virsh.resume(vm_name, ignore_status=True, debug=True)
                libvirt.check_exit_status(result)
            elif condn == "avocadotest":
                guestbt.join(ignore_status=True)
            elif condn == "stress":
                utils_test.unload_stress("stress_in_vms", params=params, vms=[vm])
            elif condn == "hotplug":
                result = virsh.setvcpus(vm_name, current_vcpu, "--live",
                                        ignore_status=True, debug=True)
                libvirt.check_exit_status(result)
                exp_vcpu = {'max_config': max_vcpu, 'max_live': current_vcpu,
                            'cur_config': current_vcpu, 'cur_live': current_vcpu,
                            'guest_live': current_vcpu}
                result = cpu.check_vcpu_value(vm, exp_vcpu,
                                              option="--live")
            elif condn == "host_smt":
                result = process.run("ppc64_cpu --smt=2", shell=True)
                # Change back the host smt
                result = process.run("ppc64_cpu --smt=4", shell=True)
                # Work around due to known cgroup issue after cpu hot(un)plug
                # sequence
                root_cpuset_path = utils_cgroup.get_cgroup_mountpoint("cpuset")
                machine_cpuset_paths = []
                if os.path.isdir(os.path.join(root_cpuset_path,
                                              "machine.slice")):
                    machine_cpuset_paths.append(os.path.join(root_cpuset_path,
                                                             "machine.slice"))
                if os.path.isdir(os.path.join(root_cpuset_path, "machine")):
                    machine_cpuset_paths.append(os.path.join(root_cpuset_path,
                                                             "machine"))
                if not machine_cpuset_paths:
                    logging.warning("cgroup cpuset might not recover properly "
                                    "for guests after host smt changes, "
                                    "restore it manually")
                root_cpuset_cpus = os.path.join(root_cpuset_path, "cpuset.cpus")
                for path in machine_cpuset_paths:
                    machine_cpuset_cpus = os.path.join(path, "cpuset.cpus")
                    # check if file content differs
                    cmd = "diff %s %s" % (root_cpuset_cpus,
                                          machine_cpuset_cpus)
                    if process.system(cmd, verbose=True, ignore_status=True):
                        cmd = "cp %s %s" % (root_cpuset_cpus,
                                            machine_cpuset_cpus)
                        process.system(cmd, verbose=True)

            else:
                logging.debug("No need recover the domain")
        return bt
Пример #25
0
def run(test, params, env):
    """
    Test Guest can start with different cpu mode settings
    and do managedsave
    """
    if 'ppc64le' not in platform.machine().lower():
        test.cancel('This case is for ppc only')

    vm_name = params.get('main_vm')
    status_error = 'yes' == params.get('status_error', 'no')
    error_msg = params.get('error_msg', '')

    cpu_mode = params.get('cpu_mode', '')
    match = params.get('match', '')
    model = params.get('model', '')
    fallback = params.get('fallback', '')
    qemu_line = params.get('qemu_line', '')
    do_managedsave = 'yes' == params.get('do_managedsave', 'no')

    bkxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)

    try:
        # Test mode='custom' and cpu model is the exact model which
        # should be upper case. And qemu command line also should
        # include '-cpu $MODEL'
        model = model.upper() if cpu_mode == 'custom' else model
        qemu_line = '-cpu %s' % model if model.isupper() else qemu_line

        # Create cpu xml for test
        cpu_xml = vm_xml.VMCPUXML()
        cpu_xml.mode = cpu_mode
        if model:
            cpu_xml.model = model
        if fallback:
            cpu_xml.fallback = fallback
        if match:
            cpu_xml.match = match
        logging.debug(cpu_xml)

        # Update vm's cpu
        vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
        vmxml.cpu = cpu_xml
        vmxml.sync()
        logging.debug(vmxml)

        # Check if vm could start successfully
        result = virsh.start(vm_name, debug=True)
        libvirt.check_exit_status(result, status_error)

        # Check error message of negative cases
        if status_error and error_msg:
            if model.lower() == 'power42':
                msg_list = error_msg.split(',')
                error_msg = msg_list[0] if model.isupper() else msg_list[1]
            libvirt.check_result(result, expected_fails=[error_msg])

        # Check qemu command line and other checkpoints
        if not status_error:
            if qemu_line:
                logging.info('Search for "%s" in qemu command line', qemu_line)
                qemu_cmd_line = process.run(
                    'ps -ef|grep qemu', shell=True, verbose=True).stdout_text
                if qemu_line in qemu_cmd_line:
                    logging.info('PASS: qemu command line check')
                else:
                    test.fail('Fail: qemu command line check: %s not found' % qemu_line)
            if do_managedsave:
                managed_save_result = virsh.managedsave(vm_name, debug=True)
                libvirt.check_exit_status(managed_save_result)
                start_result = virsh.start(vm_name, debug=True)
                libvirt.check_exit_status(start_result)

    finally:
        bkxml.sync()
Пример #26
0
def run(test, params, env):
    """
    Test virsh undefine command.

    Undefine an inactive domain, or convert persistent to transient.
    1.Prepare test environment.
    2.Backup the VM's information to a xml file.
    3.When the libvirtd == "off", stop the libvirtd service.
    4.Perform virsh undefine operation.
    5.Recover test environment.(libvirts service,VM)
    6.Confirm the test result.
    """

    vm_ref = params.get("undefine_vm_ref", "vm_name")
    extra = params.get("undefine_extra", "")
    option = params.get("undefine_option", "")
    libvirtd_state = params.get("libvirtd", "on")
    status_error = ("yes" == params.get("status_error", "no"))
    undefine_twice = ("yes" == params.get("undefine_twice", 'no'))
    local_ip = params.get("local_ip", "LOCAL.EXAMPLE.COM")
    local_pwd = params.get("local_pwd", "password")
    remote_ip = params.get("remote_ip", "REMOTE.EXAMPLE.COM")
    remote_user = params.get("remote_user", "user")
    remote_pwd = params.get("remote_pwd", "password")
    remote_prompt = params.get("remote_prompt", "#")
    pool_type = params.get("pool_type")
    pool_name = params.get("pool_name", "test")
    pool_target = params.get("pool_target")
    volume_size = params.get("volume_size", "1G")
    vol_name = params.get("vol_name", "test_vol")
    emulated_img = params.get("emulated_img", "emulated_img")
    emulated_size = "%sG" % (int(volume_size[:-1]) + 1)
    disk_target = params.get("disk_target", "vdb")
    wipe_data = "yes" == params.get("wipe_data", "no")
    if wipe_data:
        option += " --wipe-storage"
    nvram_o = None
    if platform.machine() == 'aarch64':
        nvram_o = " --nvram"
        option += nvram_o

    vm_name = params.get("main_vm", "avocado-vt-vm1")
    vm = env.get_vm(vm_name)
    vm_id = vm.get_id()
    vm_uuid = vm.get_uuid()

    # polkit acl related params
    uri = params.get("virsh_uri")
    unprivileged_user = params.get('unprivileged_user')
    if unprivileged_user:
        if unprivileged_user.count('EXAMPLE'):
            unprivileged_user = '******'

    if not libvirt_version.version_compare(1, 1, 1):
        if params.get('setup_libvirt_polkit') == 'yes':
            test.cancel("API acl test not supported in current"
                        " libvirt version.")

    # Back up xml file.Xen host has no guest xml file to define a guset.
    backup_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)

    # Confirm how to reference a VM.
    if vm_ref == "vm_name":
        vm_ref = vm_name
    elif vm_ref == "id":
        vm_ref = vm_id
    elif vm_ref == "hex_vm_id":
        vm_ref = hex(int(vm_id))
    elif vm_ref == "uuid":
        vm_ref = vm_uuid
    elif vm_ref.find("invalid") != -1:
        vm_ref = params.get(vm_ref)

    volume = None
    pvtest = None
    status3 = None

    elems = backup_xml.xmltreefile.findall('/devices/disk/source')
    existing_images = [elem.get('file') for elem in elems]

    # Backup images since remove-all-storage could remove existing libvirt
    # managed guest images
    if existing_images and option.count("remove-all-storage"):
        for img in existing_images:
            backup_img = img + '.bak'
            logging.info('Backup %s to %s', img, backup_img)
            shutil.copyfile(img, backup_img)

    try:
        save_file = "/var/lib/libvirt/qemu/save/%s.save" % vm_name
        if option.count("managedsave") and vm.is_alive():
            virsh.managedsave(vm_name)

        if not vm.is_lxc():
            snp_list = virsh.snapshot_list(vm_name)
            if option.count("snapshot"):
                snp_file_list = []
                if not len(snp_list):
                    virsh.snapshot_create(vm_name)
                    logging.debug("Create a snapshot for test!")
                else:
                    # Backup snapshots for domain
                    for snp_item in snp_list:
                        tmp_file = os.path.join(test.tmpdir, snp_item + ".xml")
                        virsh.snapshot_dumpxml(vm_name, snp_item, to_file=tmp_file)
                        snp_file_list.append(tmp_file)
            else:
                if len(snp_list):
                    test.cancel("This domain has snapshot(s), "
                                "cannot be undefined!")
        if option.count("remove-all-storage"):
            pvtest = utlv.PoolVolumeTest(test, params)
            pvtest.pre_pool(pool_name, pool_type, pool_target, emulated_img,
                            emulated_size=emulated_size)
            new_pool = libvirt_storage.PoolVolume(pool_name)
            if not new_pool.create_volume(vol_name, volume_size):
                test.fail("Creation of volume %s failed." % vol_name)
            volumes = new_pool.list_volumes()
            volume = volumes[vol_name]
            virsh.attach_disk(vm_name, volume, disk_target, "--config")

        # Turn libvirtd into certain state.
        if libvirtd_state == "off":
            utils_libvirtd.libvirtd_stop()

        # Test virsh undefine command.
        output = ""
        if vm_ref != "remote":
            vm_ref = "%s %s" % (vm_ref, extra)
            cmdresult = virsh.undefine(vm_ref, option,
                                       unprivileged_user=unprivileged_user,
                                       uri=uri,
                                       ignore_status=True, debug=True)
            status = cmdresult.exit_status
            output = cmdresult.stdout.strip()
            if status:
                logging.debug("Error status, command output: %s",
                              cmdresult.stderr.strip())
            if undefine_twice:
                status2 = virsh.undefine(vm_ref, nvram_o,
                                         ignore_status=True).exit_status
        else:
            if remote_ip.count("EXAMPLE.COM") or local_ip.count("EXAMPLE.COM"):
                test.cancel("remote_ip and/or local_ip parameters"
                            " not changed from default values")
            try:
                local_user = params.get("username", "root")
                uri = libvirt_vm.complete_uri(local_ip)
                # setup ssh auto login from remote machine to test machine
                # for the command to execute remotely
                ssh_key.setup_remote_ssh_key(remote_ip, remote_user,
                                             remote_pwd, hostname2=local_ip,
                                             user2=local_user,
                                             password2=local_pwd)
                session = remote.remote_login("ssh", remote_ip, "22",
                                              remote_user, remote_pwd,
                                              remote_prompt)
                cmd_undefine = "virsh -c %s undefine %s" % (uri, vm_name)
                status, output = session.cmd_status_output(cmd_undefine)
                logging.info("Undefine output: %s", output)
            except (process.CmdError, remote.LoginError, aexpect.ShellError) as de:
                logging.error("Detail: %s", de)
                status = 1

        # Recover libvirtd state.
        if libvirtd_state == "off":
            utils_libvirtd.libvirtd_start()

        # Shutdown VM.
        if virsh.domain_exists(vm.name):
            try:
                if vm.is_alive():
                    vm.destroy(gracefully=False)
            except process.CmdError as detail:
                logging.error("Detail: %s", detail)

        # After vm.destroy, virsh.domain_exists returns True due to
        # timing issue and tests fails.
        time.sleep(2)
        # Check if VM exists.
        vm_exist = virsh.domain_exists(vm_name)

        # Check if xml file exists.
        xml_exist = False
        if vm.is_qemu() and os.path.exists("/etc/libvirt/qemu/%s.xml" % vm_name):
            xml_exist = True
        if vm.is_lxc() and os.path.exists("/etc/libvirt/lxc/%s.xml" % vm_name):
            xml_exist = True
        if vm.is_xen() and os.path.exists("/etc/xen/%s" % vm_name):
            xml_exist = True

        # Check if save file exists if use --managed-save
        save_exist = os.path.exists(save_file)

        # Check if save file exists if use --managed-save
        volume_exist = volume and os.path.exists(volume)

        # Test define with acl control and recover domain.
        if params.get('setup_libvirt_polkit') == 'yes':
            if virsh.domain_exists(vm.name):
                virsh.undefine(vm_ref, nvram_o, ignore_status=True)
            cmd = "chmod 666 %s" % backup_xml.xml
            process.run(cmd, ignore_status=False, shell=True)
            s_define = virsh.define(backup_xml.xml,
                                    unprivileged_user=unprivileged_user,
                                    uri=uri, ignore_status=True, debug=True)
            status3 = s_define.exit_status

    finally:
        # Recover main VM.
        try:
            backup_xml.sync()
        except LibvirtXMLError:
            # sync() tries to undefines and define the xml to sync
            # but virsh_undefine test would have undefined already
            # may lead to error out
            backup_xml.define()

        # Recover existing guest images
        if existing_images and option.count("remove-all-storage"):
            for img in existing_images:
                backup_img = img + '.bak'
                logging.info('Recover image %s to %s', backup_img, img)
                shutil.move(backup_img, img)

        # Clean up pool
        if pvtest:
            pvtest.cleanup_pool(pool_name, pool_type,
                                pool_target, emulated_img)
        # Recover VM snapshots.
        if option.count("snapshot") and (not vm.is_lxc()):
            logging.debug("Recover snapshots for domain!")
            for file_item in snp_file_list:
                virsh.snapshot_create(vm_name, file_item)

    # Check results.
    if status_error:
        if not status:
            test.fail("virsh undefine return unexpected result.")
        if params.get('setup_libvirt_polkit') == 'yes':
            if status3 == 0:
                test.fail("virsh define with false acl permission" +
                          " should failed.")
    else:
        if status:
            test.fail("virsh undefine failed.")
        if undefine_twice:
            if not status2:
                test.fail("Undefine the same VM twice succeeded.")
        if vm_exist:
            test.fail("VM still exists after undefine.")
        if xml_exist:
            test.fail("Xml file still exists after undefine.")
        if option.count("managedsave") and save_exist:
            test.fail("Save file still exists after undefine.")
        if option.count("remove-all-storage") and volume_exist:
            test.fail("Volume file '%s' still exists after"
                      " undefine." % volume)
        if wipe_data and option.count("remove-all-storage"):
            if not output.count("Wiping volume '%s'" % disk_target):
                test.fail("Command didn't wipe volume storage!")
        if params.get('setup_libvirt_polkit') == 'yes':
            if status3:
                test.fail("virsh define with right acl permission" +
                          " should succeeded")
Пример #27
0
def run_virsh_list(test, params, env):
    """
    Test command: virsh list.

    1) Filt parameters according libvirtd's version
    2) Prepare domain's exist state:transient,managed-save.
    3) Prepare libvirt's status.
    4) Execute list command.
    5) Result check.
    """
    def list_local_domains_on_remote(options_ref, remote_ip, remote_passwd, local_ip):
        """
        Create a virsh list command and execute it on remote host.
        It will list local domains on remote host.

        @param options_ref:options in virsh list command.
        @param remote_ip:remote host's ip.
        @param remote_passwd:remote host's password.
        @param local_ip:local ip, to create uri in virsh list.
        @return:return status and output of the virsh list command.
        """
        complete_uri = libvirt_vm.complete_uri(local_ip)
        command_on_remote = "virsh -c %s list %s" % (complete_uri, options_ref)
        session = remote.remote_login("ssh", remote_ip, "22", "root", remote_passwd, "#")
        time.sleep(5)
        status, output = session.cmd_status_output(command_on_remote, internal_timeout=5)
        time.sleep(5)
        session.close()
        return int(status), output

    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)

    options_ref = params.get("list_options_ref", "")
    list_ref = params.get("list_type_ref", "")
    vm_ref = params.get("vm_ref", "")

    #Some parameters are not supported on old libvirt, skip them.
    help_info = virsh.command("help list").stdout.strip()
    if vm_ref and not re.search(vm_ref, help_info):
        raise error.TestNAError("This version do not support vm type:%s"
                                 % vm_ref)
    if list_ref and not re.search(list_ref, help_info):
        raise error.TestNAError("This version do not support list type:%s"
                                 % list_ref)

    status_error = params.get("status_error", "no")
    addition_status_error = params.get("addition_status_error", "no")
    domuuid = vm.get_uuid().strip()
    # If a transient domain is destroyed, it will disappear.
    if vm_ref == "transient" and options_ref == "inactive":
        logging.info("Set addition_status_error to yes")
        logging.info("because transient domain will disappear after destroyed.")
        addition_status_error = "yes"

    if vm_ref == "transient":
        tmp_xml = vm.backup_xml()
        vm.undefine()
    elif vm_ref == "managed-save":
        virsh.managedsave(vm_name, ignore_status=True, print_info=True)

    #Prepare libvirtd status
    libvirtd = params.get("libvirtd", "on")
    if libvirtd == "off":
        libvirt_vm.service_libvirtd_control("stop")

    #run test case
    if list_ref == "--uuid":
        result_expected = domuuid
        logging.info("%s's uuid is: %s", vm_name, domuuid)
    else:
        result_expected = vm_name
        logging.info("domain's name is: %s", vm_name)

    if options_ref == "vm_id":
        domid = vm.get_id().strip()
        logging.info("%s's running-id is: %s", vm_name, domid)
        options_ref = "%s %s" % (domid, list_ref)
    elif options_ref == "vm_uuid":
        logging.info("%s's uuid is: %s", vm_name, domuuid)
        options_ref = "%s %s" % (domuuid, list_ref)
    elif options_ref == "inactive":
        vm.destroy()
        options_ref = "--inactive %s" % list_ref
    elif options_ref == "vm_name":
        options_ref = "%s %s" % (vm_name, list_ref)
    elif options_ref == "all":
        options_ref = "--all %s" % list_ref
    elif options_ref == "":
        options_ref = "%s" % list_ref

    remote_ref = params.get("remote_ref", "local")
    if remote_ref == "remote":
        remote_ip = params.get("remote_ip", "none")
        remote_passwd = params.get("remote_passwd", "none")
        local_ip = params.get("local_ip", "none")
        logging.info("Execute virsh command on remote host %s.", remote_ip)
        status, output = list_local_domains_on_remote(options_ref, remote_ip, remote_passwd, local_ip)
        logging.info("Status:%s", status)
        logging.info("Output:\n%s", output)
    else:
        if vm_ref:
            options_ref = "%s --%s" % (options_ref, vm_ref)
        result = virsh.dom_list(options_ref, ignore_status=True, print_info=True)
        status = result.exit_status
        output = result.stdout.strip()

    #Recover libvirtd service status
    if libvirtd == "off":
        libvirt_vm.service_libvirtd_control("start")

    #Recover of domain
    if vm_ref == "transient":
        vm.define(tmp_xml)
    elif vm_ref == "managed-save":
        #Recover saved guest.
        virsh.managedsave_remove(vm_name, ignore_status=True, print_info=True)

    #Check result
    status_error = (status_error == "no") and (addition_status_error == "no")
    if vm_ref == "managed-save":
        saved_output = re.search(vm_name+"\s+saved", output)
        if saved_output:
            output = saved_output.group(0)
        else:
            output = ""

    if not status_error:
        if status == 0 and re.search(result_expected, output):
            raise error.TestFail("Run successful with wrong command!")
    else:
        if status != 0:
            raise error.TestFail("Run failed with right command.")
        if not re.search(result_expected, output):
            raise error.TestFail("Run successful but result is not expected.")
Пример #28
0
def run(test, params, env):
    """
    Test virsh undefine command.

    Undefine an inactive domain, or convert persistent to transient.
    1.Prepare test environment.
    2.Backup the VM's information to a xml file.
    3.When the libvirtd == "off", stop the libvirtd service.
    4.Perform virsh undefine operation.
    5.Recover test environment.(libvirts service,VM)
    6.Confirm the test result.
    """

    vm_ref = params.get("undefine_vm_ref", "vm_name")
    extra = params.get("undefine_extra", "")
    option = params.get("undefine_option", "")
    libvirtd_state = params.get("libvirtd", "on")
    status_error = ("yes" == params.get("status_error", "no"))
    undefine_twice = ("yes" == params.get("undefine_twice", 'no'))
    local_ip = params.get("local_ip", "LOCAL.EXAMPLE.COM")
    remote_ip = params.get("remote_ip", "REMOTE.EXAMPLE.COM")
    remote_user = params.get("remote_user", "user")
    remote_pwd = params.get("remote_pwd", "password")
    remote_prompt = params.get("remote_prompt", "#")

    vm_name = params.get("main_vm", "virt-tests-vm1")
    vm = env.get_vm(vm_name)
    vm_id = vm.get_id()
    vm_uuid = vm.get_uuid()

    # Back up xml file.Xen host has no guest xml file to define a guset.
    backup_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)

    # Confirm how to reference a VM.
    if vm_ref == "vm_name":
        vm_ref = vm_name
    elif vm_ref == "id":
        vm_ref = vm_id
    elif vm_ref == "hex_vm_id":
        vm_ref = hex(int(vm_id))
    elif vm_ref == "uuid":
        vm_ref = vm_uuid
    elif vm_ref.find("invalid") != -1:
        vm_ref = params.get(vm_ref)

    save_file = "/var/lib/libvirt/qemu/save/%s.save" % vm_name
    if option.count("managedsave") and vm.is_alive():
        virsh.managedsave(vm_name)

    snp_list = virsh.snapshot_list(vm_name)
    if option.count("snapshot"):
        snp_file_list = []
        if not len(snp_list):
            virsh.snapshot_create(vm_name)
            logging.debug("Create a snapshot for test!")
        else:
            # Backup snapshots for domain
            for snp_item in snp_list:
                tmp_file = os.path.join(test.tmpdir, snp_item+".xml")
                virsh.snapshot_dumpxml(vm_name, snp_item, to_file=tmp_file)
                snp_file_list.append(tmp_file)
    else:
        if len(snp_list):
            raise error.TestNAError("This domain has snapshot(s), "
                                    "cannot be undefined!")

    # Turn libvirtd into certain state.
    if libvirtd_state == "off":
        utils_libvirtd.libvirtd_stop()

    # Test virsh undefine command.
    if vm_ref != "remote":
        vm_ref = "%s %s" % (vm_ref, extra)
        cmdresult = virsh.undefine(vm_ref, option,
                                   ignore_status=True, debug=True)
        status = cmdresult.exit_status
        if status:
            logging.debug("Error status, command output: %s", cmdresult.stdout)
        if undefine_twice:
            status2 = virsh.undefine(vm_ref,
                                     ignore_status=True).exit_status
    else:
        if remote_ip.count("EXAMPLE.COM") or local_ip.count("EXAMPLE.COM"):
            raise error.TestNAError("remote_ip and/or local_ip parameters not"
                                    " changed from default values")
        try:
            uri = libvirt_vm.complete_uri(local_ip)
            session = remote.remote_login("ssh", remote_ip, "22", remote_user,
                                          remote_pwd, remote_prompt)
            cmd_undefine = "virsh -c %s undefine %s" % (uri, vm_name)
            status, output = session.cmd_status_output(cmd_undefine)
            logging.info("Undefine output: %s", output)
        except (error.CmdError, remote.LoginError, aexpect.ShellError), detail:
            logging.error("Detail: %s", detail)
            status = 1
Пример #29
0
def run(test, params, env):
    """
    Test command: virsh dompmsuspend <domain> <target>
    The command suspends a running domain using guest OS's power management.
    """

    # MAIN TEST CODE ###
    # Process cartesian parameters
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)

    vm_state = params.get("vm_state", "running")
    suspend_target = params.get("pm_suspend_target", "mem")
    pm_enabled = params.get("pm_enabled", "not_set")
    pm_enabled_disk = params.get("pm_enabled_disk", "no")
    pm_enabled_mem = params.get("pm_enabled_mem", "no")
    test_managedsave = "yes" == params.get("test_managedsave", "no")
    test_save_restore = "yes" == params.get("test_save_restore", "no")
    test_suspend_resume = "yes" == params.get("test_suspend_resume", "no")
    pmsuspend_error = 'yes' == params.get("pmsuspend_error", 'no')
    pmsuspend_error_msg = params.get("pmsuspend_error_msg")
    agent_error_test = 'yes' == params.get("agent_error_test", 'no')
    arch = platform.processor()

    # Libvirt acl test related params
    uri = params.get("virsh_uri")
    unprivileged_user = params.get('unprivileged_user')
    if unprivileged_user:
        if unprivileged_user.count('EXAMPLE'):
            unprivileged_user = '******'

    if not libvirt_version.version_compare(1, 1, 1):
        if params.get('setup_libvirt_polkit') == 'yes':
            test.cancel("API acl test not supported in current"
                        " libvirt version.")

    # A backup of original vm
    vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    vmxml_backup = vmxml.copy()

    # Expected possible fail patterns.
    # Error output should match one of these patterns.
    # An empty list mean test should succeed.
    fail_pat = []
    virsh_dargs = {'debug': True, 'ignore_status': True}
    if params.get('setup_libvirt_polkit') == 'yes':
        virsh_dargs_copy = virsh_dargs.copy()
        virsh_dargs_copy['uri'] = uri
        virsh_dargs_copy['unprivileged_user'] = unprivileged_user
        if pmsuspend_error:
            fail_pat.append('access denied')

    # Setup possible failure patterns excluding ppc
    if "ppc64" not in arch:
        if pm_enabled == 'not_set':
            fail_pat.append('not supported')
        if pm_enabled == 'no':
            fail_pat.append('disabled')

    if vm_state == 'paused':
        # For older version
        fail_pat.append('not responding')
        # For newer version
        fail_pat.append('not running')
    elif vm_state == 'shutoff':
        fail_pat.append('not running')
    if agent_error_test:
        fail_pat.append('not running')
        fail_pat.append('agent not available')
    if pmsuspend_error_msg:
        fail_pat.append(pmsuspend_error_msg)

    # RHEL6 or older releases
    unsupported_guest_err = 'suspend mode is not supported by the guest'

    try:
        if vm.is_alive():
            vm.destroy()

        # Set pm tag in domain's XML if needed.
        if "ppc64" not in arch:
            if pm_enabled == 'not_set':
                try:
                    if vmxml.pm:
                        del vmxml.pm
                except xcepts.LibvirtXMLNotFoundError:
                    pass
            else:
                pm_xml = vm_xml.VMPMXML()
                pm_xml.mem_enabled = pm_enabled_mem
                pm_xml.disk_enabled = pm_enabled_disk
                vmxml.pm = pm_xml
            vmxml.sync()

        try:
            vm.prepare_guest_agent()
        except virt_vm.VMStartError as info:
            if "not supported" in str(info).lower():
                test.cancel(info)
            else:
                test.error(info)
        # Selinux should be enforcing
        vm.setenforce(1)

        # Create swap partition/file if nessesary.
        need_mkswap = False
        if suspend_target in ['disk', 'hybrid']:
            need_mkswap = not vm.has_swap()
        if need_mkswap:
            logging.debug("Creating swap partition.")
            vm.create_swap_partition()

        try:
            libvirtd = utils_libvirtd.Libvirtd()
            savefile = os.path.join(data_dir.get_tmp_dir(),
                                    "%s.save" % vm_name)
            session = vm.wait_for_login()
            # Touch a file on guest to test managed save command.
            if test_managedsave:
                session.cmd_status("touch pmtest")
            session.close()

            # Set vm state
            if vm_state == "paused":
                vm.pause()
            elif vm_state == "shutoff":
                vm.destroy()

            # Run test case
            result = virsh.dompmsuspend(vm_name,
                                        suspend_target,
                                        debug=True,
                                        uri=uri,
                                        unprivileged_user=unprivileged_user)
            if result.exit_status == 0:
                if fail_pat:
                    test.fail("Expected failed with %s, but run succeed:\n%s" %
                              (fail_pat, result.stdout))
            else:
                if unsupported_guest_err in result.stderr:
                    test.cancel("Unsupported suspend mode:\n%s" %
                                result.stderr)
                if not fail_pat:
                    test.fail("Expected success, but run failed:\n%s" %
                              result.stderr)
                #if not any_pattern_match(fail_pat, result.stderr):
                if not any(p in result.stderr for p in fail_pat):
                    test.fail("Expected failed with one of %s, but "
                              "failed with:\n%s" % (fail_pat, result.stderr))

            # check whether the state changes to pmsuspended
            if not utils_misc.wait_for(lambda: vm.state() == 'pmsuspended',
                                       30):
                test.fail("VM failed to change its state, expected state: "
                          "pmsuspended, but actual state: %s" % vm.state())

            if agent_error_test:
                ret = virsh.dompmsuspend(vm_name, "mem", **virsh_dargs)
                libvirt.check_result(ret, fail_pat)
                ret = virsh.dompmsuspend(vm_name, "disk", **virsh_dargs)
                libvirt.check_result(ret, fail_pat)
                ret = virsh.domtime(vm_name, **virsh_dargs)
                libvirt.check_result(ret, fail_pat)
            if test_managedsave:
                ret = virsh.managedsave(vm_name, **virsh_dargs)
                libvirt.check_exit_status(ret)
                # Dompmwakeup should return false here
                ret = virsh.dompmwakeup(vm_name, **virsh_dargs)
                libvirt.check_exit_status(ret, True)
                ret = virsh.start(vm_name)
                libvirt.check_exit_status(ret)
                if not vm.is_paused():
                    test.fail("Vm status is not paused before pm wakeup")
                if params.get('setup_libvirt_polkit') == 'yes':
                    ret = virsh.dompmwakeup(vm_name, **virsh_dargs_copy)
                else:
                    ret = virsh.dompmwakeup(vm_name, **virsh_dargs)
                libvirt.check_exit_status(ret)
                if not vm.is_paused():
                    test.fail("Vm status is not paused after pm wakeup")
                ret = virsh.resume(vm_name, **virsh_dargs)
                libvirt.check_exit_status(ret)
                sess = vm.wait_for_login()
                if sess.cmd_status("ls pmtest && rm -f pmtest"):
                    test.fail("Check managed save failed on guest")
                sess.close()
            if test_save_restore:
                # Run a series of operations to check libvirtd status.
                ret = virsh.dompmwakeup(vm_name, **virsh_dargs)
                libvirt.check_exit_status(ret)
                # Wait for vm is started
                vm.wait_for_login()
                ret = virsh.save(vm_name, savefile, **virsh_dargs)
                libvirt.check_exit_status(ret)
                ret = virsh.restore(savefile, **virsh_dargs)
                libvirt.check_exit_status(ret)
                # Wait for vm is started
                vm.wait_for_login()
                # run pmsuspend again
                ret = virsh.dompmsuspend(vm_name, suspend_target,
                                         **virsh_dargs)
                libvirt.check_exit_status(ret)
                # save and restore the guest again.
                ret = virsh.save(vm_name, savefile, **virsh_dargs)
                libvirt.check_exit_status(ret)
                ret = virsh.restore(savefile, **virsh_dargs)
                libvirt.check_exit_status(ret)
                ret = virsh.destroy(vm_name, **virsh_dargs)
                libvirt.check_exit_status(ret)
                if not libvirtd.is_running():
                    test.fail("libvirtd crashed")
            if test_suspend_resume:
                ret = virsh.suspend(vm_name)
                libvirt.check_exit_status(ret, expect_error=True)
                if vm.state() != 'pmsuspended':
                    test.fail("VM state should be pmsuspended")
                ret = virsh.resume(vm_name)
                libvirt.check_exit_status(ret, expect_error=True)
                if vm.state() != 'pmsuspended':
                    test.fail("VM state should be pmsuspended")
        finally:
            libvirtd.restart()
            # Remove the tmp file
            if os.path.exists(savefile):
                os.remove(savefile)
            # Restore VM state
            if vm_state == "paused":
                vm.resume()

            if suspend_target in ['mem', 'hybrid']:
                if vm.state() == "pmsuspended":
                    virsh.dompmwakeup(vm_name)
            else:
                if vm.state() == "in shutdown":
                    vm.wait_for_shutdown()
                if vm.is_dead():
                    vm.start()

            if need_mkswap:
                vm.cleanup_swap()

    finally:
        # Destroy the vm.
        if vm.is_alive():
            vm.destroy()
        # Recover xml of vm.
        vmxml_backup.sync()
Пример #30
0
def run(test, params, env):
    """
    Test command: virsh managedsave.

    This command can save and destroy a
    running domain, so it can be restarted
    from the same state at a later time.
    """

    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    managed_save_file = "/var/lib/libvirt/qemu/save/%s.save" % vm_name

    # define function
    def vm_recover_check(option, libvirtd, check_shutdown=False):
        """
        Check if the vm can be recovered correctly.

        :param guest_name : Checked vm's name.
        :param option : managedsave command option.
        """
        # This time vm not be shut down
        if vm.is_alive():
            test.fail("Guest should be inactive")
        # Check vm managed save state.
        ret = virsh.dom_list("--managed-save --inactive", debug=True)
        vm_state1 = re.findall(r".*%s.*" % vm_name,
                               ret.stdout.strip())[0].split()[2]
        ret = virsh.dom_list("--managed-save --all", debug=True)
        vm_state2 = re.findall(r".*%s.*" % vm_name,
                               ret.stdout.strip())[0].split()[2]
        if vm_state1 != "saved" or vm_state2 != "saved":
            test.fail("Guest state should be saved")

        virsh.start(vm_name, debug=True)
        # This time vm should be in the list
        if vm.is_dead():
            test.fail("Guest should be active")
        # Restart libvirtd and check vm status again.
        libvirtd.restart()
        if vm.is_dead():
            test.fail("Guest should be active after"
                      " restarting libvirtd")
        # Check managed save file:
        if os.path.exists(managed_save_file):
            test.fail("Managed save image exist "
                      "after starting the domain")
        if option:
            if option.count("running"):
                if vm.is_dead() or vm.is_paused():
                    test.fail("Guest state should be"
                              " running after started"
                              " because of '--running' option")
            elif option.count("paused"):
                if not vm.is_paused():
                    test.fail("Guest state should be"
                              " paused after started"
                              " because of '--paused' option")
        else:
            if params.get("paused_after_start_vm") == "yes":
                if not vm.is_paused():
                    test.fail("Guest state should be"
                              " paused after started"
                              " because of initia guest state")
        if check_shutdown:
            # Resume the domain.
            if vm.is_paused():
                vm.resume()
            vm.wait_for_login()
            # Shutdown and start the domain,
            # it should be in runing state and can be login.
            vm.shutdown()
            vm.wait_for_shutdown()
            vm.start()
            vm.wait_for_login()

    def vm_undefine_check(vm_name):
        """
        Check if vm can be undefined with manage-save option
        """
        #backup xml file
        xml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
        if not os.path.exists(managed_save_file):
            test.fail("Can't find managed save image")
        #undefine domain with no options.
        if not virsh.undefine(vm_name, options=None,
                              ignore_status=True).exit_status:
            test.fail("Guest shouldn't be undefined"
                      "while domain managed save image exists")
        #undefine domain with managed-save option.
        if virsh.undefine(vm_name, options="--managed-save",
                          ignore_status=True).exit_status:
            test.fail("Guest can't be undefine with "
                      "managed-save option")

        if os.path.exists(managed_save_file):
            test.fail("Managed save image exists"
                      " after undefining vm")
        #restore and start the vm.
        xml_backup.define()
        vm.start()

    def check_flags_parallel(virsh_cmd, bash_cmd, flags):
        """
        Run the commands parallel and check the output.
        """
        cmd = ("%s & %s" % (virsh_cmd, bash_cmd))
        ret = process.run(cmd, ignore_status=True, shell=True,
                          ignore_bg_processes=True)
        output = ret.stdout_text.strip()
        logging.debug("check flags output: %s" % output)
        lines = re.findall(r"flags:.(\d+)", output, re.M)
        logging.debug("Find all fdinfo flags: %s" % lines)
        lines = [int(i, 8) & flags for i in lines]
        if flags not in lines:
            test.fail("Checking flags %s failed" % flags)

        return ret

    def check_multi_guests(guests, start_delay, libvirt_guests):
        """
        Check start_delay option for multiple guests.
        """
        # Destroy vm first
        if vm.is_alive():
            vm.destroy(gracefully=False)
        # Clone given number of guests
        timeout = params.get("clone_timeout", 360)
        for i in range(int(guests)):
            dst_vm = "%s_%s" % (vm_name, i)
            utils_libguestfs.virt_clone_cmd(vm_name, dst_vm,
                                            True, timeout=timeout)
            virsh.start(dst_vm, debug=True)

        # Wait 10 seconds for vm to start
        time.sleep(10)
        is_systemd = process.run("cat /proc/1/comm", shell=True).stdout_text.count("systemd")
        if is_systemd:
            libvirt_guests.restart()
            pattern = r'(.+ \d\d:\d\d:\d\d).+: Resuming guest.+done'
        else:
            ret = process.run("service libvirt-guests restart | \
                              awk '{ print strftime(\"%b %y %H:%M:%S\"), \
                              $0; fflush(); }'", shell=True)
            pattern = r'(.+ \d\d:\d\d:\d\d)+ Resuming guest.+done'

        # libvirt-guests status command read messages from systemd
        # journal, in cases of messages are not ready in time,
        # add a time wait here.
        def wait_func():
            return libvirt_guests.raw_status().stdout.count("Resuming guest")

        utils_misc.wait_for(wait_func, 5)
        if is_systemd:
            ret = libvirt_guests.raw_status()
        logging.info("status output: %s", ret.stdout_text)
        resume_time = re.findall(pattern, ret.stdout_text, re.M)
        if not resume_time:
            test.fail("Can't see messages of resuming guest")

        # Convert time string to int
        resume_seconds = [time.mktime(time.strptime(
            tm, "%b %y %H:%M:%S")) for tm in resume_time]
        logging.info("Resume time in seconds: %s", resume_seconds)
        # Check if start_delay take effect
        for i in range(len(resume_seconds)-1):
            if resume_seconds[i+1] - resume_seconds[i] < int(start_delay):
                test.fail("Checking start_delay failed")

    def wait_for_state(vm_state):
        """
        Wait for vm state is ready.
        """
        utils_misc.wait_for(lambda: vm.state() == vm_state, 10)

    def check_guest_flags(bash_cmd, flags):
        """
        Check bypass_cache option for single guest.
        """
        # Drop caches.
        drop_caches()
        # form proper parallel command based on if systemd is used or not
        is_systemd = process.run("cat /proc/1/comm", shell=True).stdout_text.count("systemd")
        if is_systemd:
            virsh_cmd_stop = "systemctl stop libvirt-guests"
            virsh_cmd_start = "systemctl start libvirt-guests"
        else:
            virsh_cmd_stop = "service libvirt-guests stop"
            virsh_cmd_start = "service libvirt-guests start"

        ret = check_flags_parallel(virsh_cmd_stop, bash_cmd %
                                   (managed_save_file, managed_save_file,
                                    "1"), flags)
        if is_systemd:
            ret = libvirt_guests.raw_status()
        logging.info("status output: %s", ret.stdout_text)
        if all(["Suspending %s" % vm_name not in ret.stdout_text,
                "stopped, with saved guests" not in ret.stdout_text]):
            test.fail("Can't see messages of suspending vm")
        # status command should return 3.
        if not is_systemd:
            ret = libvirt_guests.raw_status()
        if ret.exit_status != 3:
            test.fail("The exit code %s for libvirt-guests"
                      " status is not correct" % ret)

        # Wait for VM in shut off state
        wait_for_state("shut off")
        check_flags_parallel(virsh_cmd_start, bash_cmd %
                             (managed_save_file, managed_save_file,
                              "0"), flags)
        # Wait for VM in running state
        wait_for_state("running")

    def vm_msave_remove_check(vm_name):
        """
        Check managed save remove command.
        """
        if not os.path.exists(managed_save_file):
            test.fail("Can't find managed save image")
        virsh.managedsave_remove(vm_name, debug=True)
        if os.path.exists(managed_save_file):
            test.fail("Managed save image still exists")
        virsh.start(vm_name, debug=True)
        # The domain state should be running
        if vm.state() != "running":
            test.fail("Guest state should be"
                      " running after started")

    def vm_managedsave_loop(vm_name, loop_range, libvirtd):
        """
        Run a loop of managedsave command and check its result.
        """
        if vm.is_dead():
            virsh.start(vm_name, debug=True)
        for i in range(int(loop_range)):
            logging.debug("Test loop: %s" % i)
            virsh.managedsave(vm_name, debug=True)
            virsh.start(vm_name, debug=True)
        # Check libvirtd status.
        if not libvirtd.is_running():
            test.fail("libvirtd is stopped after cmd")
        # Check vm status.
        if vm.state() != "running":
            test.fail("Guest isn't in running state")

    def build_vm_xml(vm_name, **dargs):
        """
        Build the new domain xml and define it.
        """
        try:
            # stop vm before doing any change to xml
            if vm.is_alive():
                vm.destroy(gracefully=False)
            vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
            if dargs.get("cpu_mode"):
                if "cpu" in vmxml:
                    del vmxml.cpu
                cpuxml = vm_xml.VMCPUXML()
                cpuxml.mode = params.get("cpu_mode", "host-model")
                cpuxml.match = params.get("cpu_match", "exact")
                cpuxml.fallback = params.get("cpu_fallback", "forbid")
                cpu_topology = {}
                cpu_topology_sockets = params.get("cpu_topology_sockets")
                if cpu_topology_sockets:
                    cpu_topology["sockets"] = cpu_topology_sockets
                cpu_topology_cores = params.get("cpu_topology_cores")
                if cpu_topology_cores:
                    cpu_topology["cores"] = cpu_topology_cores
                cpu_topology_threads = params.get("cpu_topology_threads")
                if cpu_topology_threads:
                    cpu_topology["threads"] = cpu_topology_threads
                if cpu_topology:
                    cpuxml.topology = cpu_topology
                vmxml.cpu = cpuxml
                vmxml.vcpu = int(params.get("vcpu_nums"))
            if dargs.get("sec_driver"):
                seclabel_dict = {"type": "dynamic", "model": "selinux",
                                 "relabel": "yes"}
                vmxml.set_seclabel([seclabel_dict])

            vmxml.sync()
            vm.start()
        except Exception as e:
            logging.error(str(e))
            test.cancel("Build domain xml failed")

    status_error = ("yes" == params.get("status_error", "no"))
    vm_ref = params.get("managedsave_vm_ref", "name")
    libvirtd_state = params.get("libvirtd", "on")
    extra_param = params.get("managedsave_extra_param", "")
    progress = ("yes" == params.get("managedsave_progress", "no"))
    cpu_mode = "yes" == params.get("managedsave_cpumode", "no")
    test_undefine = "yes" == params.get("managedsave_undefine", "no")
    test_bypass_cache = "yes" == params.get("test_bypass_cache", "no")
    autostart_bypass_cache = params.get("autostart_bypass_cache", "")
    multi_guests = params.get("multi_guests", "")
    test_libvirt_guests = params.get("test_libvirt_guests", "")
    check_flags = "yes" == params.get("check_flags", "no")
    security_driver = params.get("security_driver", "")
    remove_after_cmd = "yes" == params.get("remove_after_cmd", "no")
    option = params.get("managedsave_option", "")
    check_shutdown = "yes" == params.get("shutdown_after_cmd", "no")
    pre_vm_state = params.get("pre_vm_state", "")
    move_saved_file = "yes" == params.get("move_saved_file", "no")
    test_loop_cmd = "yes" == params.get("test_loop_cmd", "no")
    if option:
        if not virsh.has_command_help_match('managedsave', option):
            # Older libvirt does not have this option
            test.cancel("Older libvirt does not"
                        " handle arguments consistently")

    # Backup xml file.
    vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    # Get the libvirtd service
    libvirtd = utils_libvirtd.Libvirtd()
    # Get config files.
    qemu_config = utils_config.LibvirtQemuConfig()
    libvirt_guests_config = utils_config.LibvirtGuestsConfig()
    # Get libvirt-guests service
    libvirt_guests = Factory.create_service("libvirt-guests")

    try:
        # Destroy vm first for setting configuration file
        if vm.state() == "running":
            vm.destroy(gracefully=False)
        # Prepare test environment.
        if libvirtd_state == "off":
            libvirtd.stop()
        if autostart_bypass_cache:
            ret = virsh.autostart(vm_name, "", ignore_status=True, debug=True)
            libvirt.check_exit_status(ret)
            qemu_config.auto_start_bypass_cache = autostart_bypass_cache
            libvirtd.restart()
        if security_driver:
            qemu_config.security_driver = [security_driver]
        if test_libvirt_guests:
            if multi_guests:
                start_delay = params.get("start_delay", "20")
                libvirt_guests_config.START_DELAY = start_delay
            if check_flags:
                libvirt_guests_config.BYPASS_CACHE = "1"
            # The config file format should be "x=y" instead of "x = y"
            process.run("sed -i -e 's/ = /=/g' "
                        "/etc/sysconfig/libvirt-guests",
                        shell=True)
            libvirt_guests.restart()

        # Change domain xml.
        if cpu_mode:
            build_vm_xml(vm_name, cpu_mode=True)
        if security_driver:
            build_vm_xml(vm_name, sec_driver=True)

        # Turn VM into certain state.
        if pre_vm_state == "transient":
            logging.info("Creating %s..." % vm_name)
            vmxml_for_test = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
            if vm.is_alive():
                vm.destroy(gracefully=False)
            # Wait for VM to be in shut off state
            utils_misc.wait_for(lambda: vm.state() == "shut off", 10)
            vm.undefine()
            if virsh.create(vmxml_for_test.xml, ignore_status=True,
                            debug=True).exit_status:
                vmxml_backup.define()
                test.cancel("Cann't create the domain")

        # Wait for vm in stable state
        if params.get("start_vm") == "yes":
            if vm.state() == "shut off":
                vm.start()
                vm.wait_for_login()

        # run test case
        domid = vm.get_id()
        domuuid = vm.get_uuid()
        if vm_ref == "id":
            vm_ref = domid
        elif vm_ref == "uuid":
            vm_ref = domuuid
        elif vm_ref == "hex_id":
            vm_ref = hex(int(domid))
        elif vm_ref.count("invalid"):
            vm_ref = params.get(vm_ref)
        elif vm_ref == "name":
            vm_ref = vm_name

        # Ignore exception with "ignore_status=True"
        if progress:
            option += " --verbose"
        option += extra_param

        # For bypass_cache test. Run a shell command to check fd flags while
        # excuting managedsave command
        software_mgr = software_manager.SoftwareManager()
        if not software_mgr.check_installed('lsof'):
            logging.info('Installing lsof package:')
            software_mgr.install('lsof')
        bash_cmd = ("let i=1; while((i++<400)); do if [ -e %s ]; then (cat /proc"
                    "/$(lsof -w %s|awk '/libvirt_i/{print $2}')/fdinfo/*%s* |"
                    "grep 'flags:.*') && break; else sleep 0.05; fi; done;")
        # Flags to check bypass cache take effect
        flags = os.O_DIRECT
        if test_bypass_cache:
            # Drop caches.
            drop_caches()
            virsh_cmd = "virsh managedsave %s %s" % (option, vm_name)
            check_flags_parallel(virsh_cmd, bash_cmd %
                                 (managed_save_file, managed_save_file,
                                  "1"), flags)
            # Wait for VM in shut off state
            wait_for_state("shut off")
            virsh_cmd = "virsh start %s %s" % (option, vm_name)
            check_flags_parallel(virsh_cmd, bash_cmd %
                                 (managed_save_file, managed_save_file,
                                  "0"), flags)
            # Wait for VM in running state
            wait_for_state("running")
        elif test_libvirt_guests:
            logging.debug("libvirt-guests status: %s", libvirt_guests.status())
            if multi_guests:
                check_multi_guests(multi_guests,
                                   start_delay, libvirt_guests)

            if check_flags:
                check_guest_flags(bash_cmd, flags)

        else:
            # Ensure VM is running
            utils_misc.wait_for(lambda: vm.state() == "running", 10)
            ret = virsh.managedsave(vm_ref, options=option,
                                    ignore_status=True, debug=True)
            status = ret.exit_status
            # The progress information outputed in error message
            error_msg = ret.stderr.strip()
            if move_saved_file:
                cmd = "echo > %s" % managed_save_file
                process.run(cmd, shell=True)

            # recover libvirtd service start
            if libvirtd_state == "off":
                libvirtd.start()

            if status_error:
                if not status:
                    test.fail("Run successfully with wrong command!")
            else:
                if status:
                    test.fail("Run failed with right command")
                if progress:
                    if not error_msg.count("Managedsave:"):
                        test.fail("Got invalid progress output")
                if remove_after_cmd:
                    vm_msave_remove_check(vm_name)
                elif test_undefine:
                    vm_undefine_check(vm_name)
                elif autostart_bypass_cache:
                    libvirtd.stop()
                    virsh_cmd = ("(service libvirtd start)")
                    check_flags_parallel(virsh_cmd, bash_cmd %
                                         (managed_save_file, managed_save_file,
                                          "0"), flags)
                elif test_loop_cmd:
                    loop_range = params.get("loop_range", "20")
                    vm_managedsave_loop(vm_name, loop_range, libvirtd)
                else:
                    vm_recover_check(option, libvirtd, check_shutdown)
    finally:
        # Restore test environment.
        # Restart libvirtd.service
        qemu_config.restore()
        libvirt_guests_config.restore()
        libvirtd.restart()
        if autostart_bypass_cache:
            virsh.autostart(vm_name, "--disable",
                            ignore_status=True, debug=True)
        vm.destroy(gracefully=False)
        virsh.managedsave_remove(vm_name, debug=True)
        vmxml_backup.sync()
        if multi_guests:
            for i in range(int(multi_guests)):
                virsh.remove_domain("%s_%s" % (vm_name, i),
                                    "--remove-all-storage",
                                    debug=True)
Пример #31
0
def run_virsh_domjobinfo(test, params, env):
    """
    Test command: virsh domjobinfo.

    The command returns information about jobs running on a domain.
    1.Prepare test environment.
    2.When the libvirtd == "off", stop the libvirtd service.
    3.Perform virsh domjobinfo operation.
    4.Recover test environment.
    5.Confirm the test result.
    """
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)

    domid = vm.get_id()
    domuuid = vm.get_uuid()

    pre_vm_state = params.get("domjobinfo_pre_vm_state", "null")
    vm_ref = params.get("domjobinfo_vm_ref")
    status_error = params.get("status_error", "no")
    libvirtd = params.get("libvirtd", "on")
    tmp_file = os.path.join(test.tmpdir, '%s.tmp' % vm_name )

    #prepare the state of vm
    if pre_vm_state == "dump":
        virsh.dump(vm_name, tmp_file)
    elif pre_vm_state == "save":
        virsh.save(vm_name, tmp_file)
    elif pre_vm_state == "restore":
        virsh.save(vm_name, tmp_file)
        virsh.restore(tmp_file)
    elif pre_vm_state == "managedsave":
        virsh.managedsave(vm_name)

    #run test case
    if vm_ref == "id":
        vm_ref = domid
    elif vm_ref == "hex_id":
        vm_ref = hex(int(domid))
    elif vm_ref == "name":
        vm_ref = "%s %s" % (vm_name, params.get("domjobinfo_extra"))
    elif vm_ref == "uuid":
        vm_ref = domuuid
    elif vm_ref.find("invalid") != -1:
        vm_ref = params.get(vm_ref)

    if libvirtd == "off":
        utils_libvirtd.libvirtd_stop()

    status = virsh.domjobinfo(vm_ref, ignore_status=True).exit_status

    #recover libvirtd service start
    if libvirtd == "off":
        utils_libvirtd.libvirtd_start()

    #check status_error
    if status_error == "yes":
        if status == 0:
            raise error.TestFail("Run successfully with wrong command!")
    elif status_error == "no":
        if status != 0:
            raise error.TestFail("Run failed with right command")
Пример #32
0
def run(test, params, env):
    """
    Test rbd disk device.

    1.Prepare test environment,destroy or suspend a VM.
    2.Prepare disk image.
    3.Edit disks xml and start the domain.
    4.Perform test operation.
    5.Recover test environment.
    """
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    virsh_dargs = {'debug': True, 'ignore_status': True}
    # Global variable to store max/current memory,
    # it may change after attach/detach
    new_max_mem = None
    new_cur_mem = None

    def consume_vm_mem(size=1000, timeout=360):
        """
        To consume guest memory, default size is 1000M
        """
        session = vm.wait_for_login()
        # Mount tmpfs on /mnt and write to a file on it,
        # it is the memory operation
        sh_cmd = ("swapoff -a; mount -t tmpfs -o size={0}M tmpfs "
                  "/mnt; dd if=/dev/urandom of=/mnt/test bs=1M"
                  " count={0}".format(size))
        session.cmd(sh_cmd, timeout=timeout)
        session.close()

    def check_qemu_cmd():
        """
        Check qemu command line options.
        """
        cmd = ("ps -ef | grep %s | grep -v grep " % vm_name)
        if max_mem_rt:
            cmd += (" | grep 'slots=%s,maxmem=%sk'" %
                    (max_mem_slots, max_mem_rt))
        if tg_size:
            size = int(tg_size) * 1024
            cmd_str = 'memdimm.\|memory-backend-ram,id=ram-node.'
            cmd += (" | grep 'memory-backend-ram,id=%s' | grep 'size=%s" %
                    (cmd_str, size))
            if pg_size:
                cmd += ",host-nodes=%s" % node_mask
                if numa_memnode:
                    for node in numa_memnode:
                        if ('nodeset' in node
                                and node['nodeset'] in node_mask):
                            cmd += ",policy=%s" % node['mode']
                cmd += ".*pc-dimm,node=%s" % tg_node
            if mem_addr:
                cmd += (".*slot=%s,addr=%s" %
                        (mem_addr['slot'], int(mem_addr['base'], 16)))
            cmd += "'"
        # Run the command
        process.run(cmd, shell=True)

    def check_guest_meminfo(old_mem, check_option):
        """
        Check meminfo on guest.
        """
        assert old_mem is not None
        session = vm.wait_for_login()
        # Hot-plugged memory should be online by udev rules
        udev_file = "/lib/udev/rules.d/80-hotplug-cpu-mem.rules"
        udev_rules = ('SUBSYSTEM=="memory", ACTION=="add", TEST=="state",'
                      ' ATTR{state}=="offline", ATTR{state}="online"')
        cmd = ("grep memory %s || echo '%s' >> %s" %
               (udev_file, udev_rules, udev_file))
        session.cmd(cmd)
        # Wait a while for new memory to be detected.
        utils_misc.wait_for(lambda: vm.get_totalmem_sys() != int(old_mem),
                            20,
                            first=15.0)
        new_mem = vm.get_totalmem_sys()
        session.close()
        logging.debug("Memtotal on guest: %s", new_mem)
        no_of_times = 1
        if at_times:
            no_of_times = at_times
        if check_option == "attach":
            if new_mem != int(old_mem) + (int(tg_size) * no_of_times):
                test.fail("Total memory on guest couldn't changed after "
                          "attach memory device")

        if check_option == "detach":
            if new_mem != int(old_mem) - (int(tg_size) * no_of_times):
                test.fail("Total memory on guest couldn't changed after "
                          "detach memory device")

    def check_dom_xml(at_mem=False, dt_mem=False):
        """
        Check domain xml options.
        """
        # Global variable to store max/current memory
        global new_max_mem
        global new_cur_mem
        if attach_option.count("config"):
            dom_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
        else:
            dom_xml = vm_xml.VMXML.new_from_dumpxml(vm_name)
        try:
            xml_max_mem_rt = int(dom_xml.max_mem_rt)
            xml_max_mem = int(dom_xml.max_mem)
            xml_cur_mem = int(dom_xml.current_mem)
            assert int(max_mem_rt) == xml_max_mem_rt

            # Check attached/detached memory
            if at_mem:
                if at_times:
                    assert int(max_mem) + (int(tg_size) *
                                           at_times) == xml_max_mem
                else:
                    assert int(max_mem) + int(tg_size) == xml_max_mem
                # Bug 1220702, skip the check for current memory
                if at_times:
                    assert int(cur_mem) + (int(tg_size) *
                                           at_times) == xml_cur_mem
                else:
                    assert int(cur_mem) + int(tg_size) == xml_cur_mem
                new_max_mem = xml_max_mem
                new_cur_mem = xml_cur_mem
                mem_dev = dom_xml.get_devices("memory")
                memory_devices = 1
                if at_times:
                    memory_devices = at_times
                if len(mem_dev) != memory_devices:
                    test.fail("Found wrong number of memory device")
                assert int(tg_size) == int(mem_dev[0].target.size)
                assert int(tg_node) == int(mem_dev[0].target.node)
            elif dt_mem:
                if at_times:
                    assert int(new_max_mem) - (int(tg_size) *
                                               at_times) == xml_max_mem
                    assert int(new_cur_mem) - (int(tg_size) *
                                               at_times) == xml_cur_mem
                else:
                    assert int(new_max_mem) - int(tg_size) == xml_max_mem
                    # Bug 1220702, skip the check for current memory
                    assert int(new_cur_mem) - int(tg_size) == xml_cur_mem
        except AssertionError:
            utils_misc.log_last_traceback()
            test.fail("Found unmatched memory setting from domain xml")

    def check_save_restore():
        """
        Test save and restore operation
        """
        save_file = os.path.join(data_dir.get_tmp_dir(), "%s.save" % vm_name)
        ret = virsh.save(vm_name, save_file, **virsh_dargs)
        libvirt.check_exit_status(ret)
        ret = virsh.restore(save_file, **virsh_dargs)
        libvirt.check_exit_status(ret)
        if os.path.exists(save_file):
            os.remove(save_file)
        # Login to check vm status
        vm.wait_for_login().close()

    def create_mem_xml():
        """
        Create memory device xml.
        """
        mem_xml = memory.Memory()
        mem_model = params.get("mem_model", "dimm")
        mem_xml.mem_model = mem_model
        if tg_size:
            tg_xml = memory.Memory.Target()
            tg_xml.size = int(tg_size)
            tg_xml.size_unit = tg_sizeunit
            # There is support for non-numa node
            if numa_cells:
                tg_xml.node = int(tg_node)
            mem_xml.target = tg_xml
        if pg_size:
            src_xml = memory.Memory.Source()
            src_xml.pagesize = int(pg_size)
            src_xml.pagesize_unit = pg_unit
            src_xml.nodemask = node_mask
            mem_xml.source = src_xml
        if mem_addr:
            mem_xml.address = mem_xml.new_mem_address(**{"attrs": mem_addr})

        logging.debug("Memory device xml: %s", mem_xml)
        return mem_xml.copy()

    def add_device(dev_xml, at_error=False):
        """
        Add memory device by attachment or modify domain xml.
        """
        if attach_device:
            ret = virsh.attach_device(vm_name,
                                      dev_xml.xml,
                                      flagstr=attach_option)
            libvirt.check_exit_status(ret, at_error)
        else:
            vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm.name)
            if numa_cells:
                del vmxml.max_mem
                del vmxml.current_mem
            vmxml.add_device(dev_xml)
            vmxml.sync()

    def modify_domain_xml():
        """
        Modify domain xml and define it.
        """
        vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm.name)
        mem_unit = params.get("mem_unit", "KiB")
        vcpu = params.get("vcpu", "4")
        if max_mem_rt:
            vmxml.max_mem_rt = int(max_mem_rt)
            vmxml.max_mem_rt_slots = max_mem_slots
            vmxml.max_mem_rt_unit = mem_unit
        if vcpu:
            vmxml.vcpu = int(vcpu)
            vcpu_placement = params.get("vcpu_placement", "static")
            vmxml.placement = vcpu_placement
        if numa_memnode:
            vmxml.numa_memory = {}
            vmxml.numa_memnode = numa_memnode
        else:
            try:
                del vmxml.numa_memory
                del vmxml.numa_memnode
            except Exception:
                # Not exists
                pass

        if numa_cells:
            cells = [ast.literal_eval(x) for x in numa_cells]
            # Rounding the numa memory values
            if align_mem_values:
                for cell in range(cells.__len__()):
                    memory_value = str(
                        utils_numeric.align_value(cells[cell]["memory"],
                                                  align_to_value))
                    cells[cell]["memory"] = memory_value
            cpu_xml = vm_xml.VMCPUXML()
            cpu_xml.xml = "<cpu><numa/></cpu>"
            cpu_mode = params.get("cpu_mode")
            model_fallback = params.get("model_fallback")
            if cpu_mode:
                cpu_xml.mode = cpu_mode
            if model_fallback:
                cpu_xml.fallback = model_fallback
            cpu_xml.numa_cell = cells
            vmxml.cpu = cpu_xml
            # Delete memory and currentMemory tag,
            # libvirt will fill it automatically
            del vmxml.max_mem
            del vmxml.current_mem

        # hugepages setting
        if huge_pages:
            membacking = vm_xml.VMMemBackingXML()
            hugepages = vm_xml.VMHugepagesXML()
            pagexml_list = []
            for i in range(len(huge_pages)):
                pagexml = hugepages.PageXML()
                pagexml.update(huge_pages[i])
                pagexml_list.append(pagexml)
            hugepages.pages = pagexml_list
            membacking.hugepages = hugepages
            vmxml.mb = membacking

        logging.debug("vm xml: %s", vmxml)
        vmxml.sync()

    pre_vm_state = params.get("pre_vm_state", "running")
    attach_device = "yes" == params.get("attach_device", "no")
    detach_device = "yes" == params.get("detach_device", "no")
    attach_error = "yes" == params.get("attach_error", "no")
    start_error = "yes" == params.get("start_error", "no")
    detach_error = "yes" == params.get("detach_error", "no")
    maxmem_error = "yes" == params.get("maxmem_error", "no")
    attach_option = params.get("attach_option", "")
    test_qemu_cmd = "yes" == params.get("test_qemu_cmd", "no")
    test_managedsave = "yes" == params.get("test_managedsave", "no")
    test_save_restore = "yes" == params.get("test_save_restore", "no")
    test_mem_binding = "yes" == params.get("test_mem_binding", "no")
    restart_libvirtd = "yes" == params.get("restart_libvirtd", "no")
    add_mem_device = "yes" == params.get("add_mem_device", "no")
    test_dom_xml = "yes" == params.get("test_dom_xml", "no")
    max_mem = params.get("max_mem")
    max_mem_rt = params.get("max_mem_rt")
    max_mem_slots = params.get("max_mem_slots", "16")
    cur_mem = params.get("current_mem")
    numa_cells = params.get("numa_cells", "").split()
    set_max_mem = params.get("set_max_mem")
    align_mem_values = "yes" == params.get("align_mem_values", "no")
    align_to_value = int(params.get("align_to_value", "65536"))
    hot_reboot = "yes" == params.get("hot_reboot", "no")
    rand_reboot = "yes" == params.get("rand_reboot", "no")
    guest_known_unplug_errors = []
    guest_known_unplug_errors.append(params.get("guest_known_unplug_errors"))
    host_known_unplug_errors = []
    host_known_unplug_errors.append(params.get("host_known_unplug_errors"))

    # params for attached device
    tg_size = params.get("tg_size")
    tg_sizeunit = params.get("tg_sizeunit", 'KiB')
    tg_node = params.get("tg_node", 0)
    pg_size = params.get("page_size")
    pg_unit = params.get("page_unit", "KiB")
    node_mask = params.get("node_mask", "0")
    mem_addr = ast.literal_eval(params.get("memory_addr", "{}"))
    huge_pages = [
        ast.literal_eval(x) for x in params.get("huge_pages", "").split()
    ]
    numa_memnode = [
        ast.literal_eval(x) for x in params.get("numa_memnode", "").split()
    ]
    at_times = int(params.get("attach_times", 1))

    # Back up xml file.
    vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)

    if not libvirt_version.version_compare(1, 2, 14):
        test.cancel("Memory hotplug not supported in current libvirt version.")

    if align_mem_values:
        # Rounding the following values to 'align'
        max_mem = utils_numeric.align_value(max_mem, align_to_value)
        max_mem_rt = utils_numeric.align_value(max_mem_rt, align_to_value)
        cur_mem = utils_numeric.align_value(cur_mem, align_to_value)
        tg_size = utils_numeric.align_value(tg_size, align_to_value)

    try:
        # Drop caches first for host has enough memory
        drop_caches()
        # Destroy domain first
        if vm.is_alive():
            vm.destroy(gracefully=False)
        modify_domain_xml()

        # Start the domain any way if attach memory device
        old_mem_total = None
        if attach_device:
            vm.start()
            session = vm.wait_for_login()
            old_mem_total = vm.get_totalmem_sys()
            logging.debug("Memtotal on guest: %s", old_mem_total)
            session.close()
        dev_xml = None

        # To attach the memory device.
        if add_mem_device:
            at_times = int(params.get("attach_times", 1))
            dev_xml = create_mem_xml()
            randvar = 0
            rand_value = random.randint(15, 25)
            logging.debug("reboots at %s", rand_value)
            for x in xrange(at_times):
                # If any error excepted, command error status should be
                # checked in the last time
                randvar = randvar + 1
                logging.debug("attaching device count = %s", x)
                if x == at_times - 1:
                    add_device(dev_xml, attach_error)
                else:
                    add_device(dev_xml)
                if hot_reboot:
                    vm.reboot()
                    vm.wait_for_login()
                if rand_reboot and randvar == rand_value:
                    randvar = 0
                    rand_value = random.randint(15, 25)
                    logging.debug("reboots at %s", rand_value)
                    vm.reboot()
                    vm.wait_for_login()

        # Check domain xml after attach device.
        if test_dom_xml:
            check_dom_xml(at_mem=attach_device)

        # Set domain state
        if pre_vm_state == "transient":
            logging.info("Creating %s...", vm_name)
            vmxml_for_test = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
            if vm.is_alive():
                vm.destroy(gracefully=False)
            vm.undefine()
            if virsh.create(vmxml_for_test.xml, **virsh_dargs).exit_status:
                vmxml_backup.define()
                test.fail("Cann't create the domain")
        elif vm.is_dead():
            try:
                vm.start()
                vm.wait_for_login().close()
            except virt_vm.VMStartError as detail:
                if start_error:
                    pass
                else:
                    test.fail(detail)

        # Set memory operation
        if set_max_mem:
            max_mem_option = params.get("max_mem_option", "")
            ret = virsh.setmaxmem(vm_name, set_max_mem, flagstr=max_mem_option)
            libvirt.check_exit_status(ret, maxmem_error)

        # Check domain xml after start the domain.
        if test_dom_xml:
            check_dom_xml(at_mem=attach_device)

        # Check qemu command line
        if test_qemu_cmd:
            check_qemu_cmd()

        # Check guest meminfo after attachment
        if (attach_device and not attach_option.count("config")
                and not any([attach_error, start_error])):
            check_guest_meminfo(old_mem_total, check_option="attach")

        # Consuming memory on guest,
        # to verify memory changes by numastat
        if test_mem_binding:
            pid = vm.get_pid()
            old_numastat = read_from_numastat(pid, "Total")
            logging.debug("Numastat: %s", old_numastat)
            consume_vm_mem()
            new_numastat = read_from_numastat(pid, "Total")
            logging.debug("Numastat: %s", new_numastat)
            # Only check total memory which is the last element
            if float(new_numastat[-1]) - float(old_numastat[-1]) < 0:
                test.fail("Numa memory can't be consumed on guest")

        # Run managedsave command to check domain xml.
        if test_managedsave:
            ret = virsh.managedsave(vm_name, **virsh_dargs)
            libvirt.check_exit_status(ret)
            vm.start()
            vm.wait_for_login().close()
            if test_dom_xml:
                check_dom_xml(at_mem=attach_device)

        # Run save and restore command to check domain xml
        if test_save_restore:
            check_save_restore()
            if test_dom_xml:
                check_dom_xml(at_mem=attach_device)

        # Check domain xml after restarting libvirtd
        if restart_libvirtd:
            libvirtd = utils_libvirtd.Libvirtd()
            libvirtd.restart()
            if test_dom_xml:
                check_dom_xml(at_mem=attach_device)

        # Detach the memory device
        unplug_failed_with_known_error = False
        if detach_device:
            if not dev_xml:
                dev_xml = create_mem_xml()
            for x in xrange(at_times):
                ret = virsh.detach_device(vm_name,
                                          dev_xml.xml,
                                          flagstr=attach_option)
                if ret.stderr and host_known_unplug_errors:
                    for known_error in host_known_unplug_errors:
                        if (known_error[0] == known_error[-1]) and \
                           known_error.startswith(("'")):
                            known_error = known_error[1:-1]
                        if known_error in ret.stderr:
                            unplug_failed_with_known_error = True
                            logging.debug(
                                "Known error occured in Host, while"
                                " hot unplug: %s", known_error)
                if unplug_failed_with_known_error:
                    break
                try:
                    libvirt.check_exit_status(ret, detach_error)
                except Exception as detail:
                    dmesg_file = tempfile.mktemp(dir=data_dir.get_tmp_dir())
                    try:
                        session = vm.wait_for_login()
                        utils_misc.verify_dmesg(dmesg_log_file=dmesg_file,
                                                ignore_result=True,
                                                session=session,
                                                level_check=5)
                    except Exception:
                        session.close()
                        test.fail("After memory unplug Unable to connect to VM"
                                  " or unable to collect dmesg")
                    session.close()
                    if os.path.exists(dmesg_file):
                        with open(dmesg_file, 'r') as f:
                            flag = re.findall(
                                r'memory memory\d+?: Offline failed', f.read())
                        if not flag:
                            # The attached memory is used by vm, and it could not be unplugged
                            # The result is expected
                            os.remove(dmesg_file)
                            test.fail(detail)
                        unplug_failed_with_known_error = True
                        os.remove(dmesg_file)
            # Check whether a known error occured or not
            dmesg_file = tempfile.mktemp(dir=data_dir.get_tmp_dir())
            try:
                session = vm.wait_for_login()
                utils_misc.verify_dmesg(dmesg_log_file=dmesg_file,
                                        ignore_result=True,
                                        session=session,
                                        level_check=4)
            except Exception:
                session.close()
                test.fail("After memory unplug Unable to connect to VM"
                          " or unable to collect dmesg")
            session.close()
            if guest_known_unplug_errors and os.path.exists(dmesg_file):
                for known_error in guest_known_unplug_errors:
                    if (known_error[0] == known_error[-1]) and \
                       known_error.startswith(("'")):
                        known_error = known_error[1:-1]
                    with open(dmesg_file, 'r') as f:
                        if known_error in f.read():
                            unplug_failed_with_known_error = True
                            logging.debug(
                                "Known error occured, while hot unplug"
                                ": %s", known_error)
            if test_dom_xml and not unplug_failed_with_known_error:
                check_dom_xml(dt_mem=detach_device)
                # Remove dmesg temp file
                if os.path.exists(dmesg_file):
                    os.remove(dmesg_file)

    finally:
        # Delete snapshots.
        snapshot_lists = virsh.snapshot_list(vm_name)
        if len(snapshot_lists) > 0:
            libvirt.clean_up_snapshots(vm_name, snapshot_lists)
            for snap in snapshot_lists:
                virsh.snapshot_delete(vm_name, snap, "--metadata")

        # Recover VM.
        if vm.is_alive():
            vm.destroy(gracefully=False)
        logging.info("Restoring vm...")
        vmxml_backup.sync()
Пример #33
0
def run(test, params, env):
    """
    Test command: virsh managedsave.

    This command can save and destroy a
    running domain, so it can be restarted
    from the same state at a later time.
    """

    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    managed_save_file = "/var/lib/libvirt/qemu/save/%s.save" % vm_name
    shutdown_timeout = int(params.get('shutdown_timeout', 60))

    # define function
    def vm_recover_check(option, libvirtd, check_shutdown=False):
        """
        Check if the vm can be recovered correctly.

        :param guest_name : Checked vm's name.
        :param option : managedsave command option.
        """
        # This time vm not be shut down
        if vm.is_alive():
            test.fail("Guest should be inactive")
        # Check vm managed save state.
        ret = virsh.dom_list("--managed-save --inactive", debug=True)
        vm_state1 = re.findall(r".*%s.*" % vm_name,
                               ret.stdout.strip())[0].split()[2]
        ret = virsh.dom_list("--managed-save --all", debug=True)
        vm_state2 = re.findall(r".*%s.*" % vm_name,
                               ret.stdout.strip())[0].split()[2]
        if vm_state1 != "saved" or vm_state2 != "saved":
            test.fail("Guest state should be saved")

        virsh.start(vm_name, debug=True)
        # This time vm should be in the list
        if vm.is_dead():
            test.fail("Guest should be active")
        # Restart libvirtd and check vm status again.
        libvirtd.restart()
        if vm.is_dead():
            test.fail("Guest should be active after"
                      " restarting libvirtd")
        # Check managed save file:
        if os.path.exists(managed_save_file):
            test.fail("Managed save image exist "
                      "after starting the domain")
        if option:
            if option.count("running"):
                if vm.is_dead() or vm.is_paused():
                    test.fail("Guest state should be"
                              " running after started"
                              " because of '--running' option")
            elif option.count("paused"):
                if not vm.is_paused():
                    test.fail("Guest state should be"
                              " paused after started"
                              " because of '--paused' option")
        else:
            if params.get("paused_after_start_vm") == "yes":
                if not vm.is_paused():
                    test.fail("Guest state should be"
                              " paused after started"
                              " because of initia guest state")
        if check_shutdown:
            # Resume the domain.
            if vm.is_paused():
                vm.resume()
            vm.wait_for_login()
            # Shutdown and start the domain,
            # it should be in running state and can be login.
            vm.shutdown()
            if not vm.wait_for_shutdown(shutdown_timeout):
                test.fail('VM failed to shutdown')
            vm.start()
            vm.wait_for_login()

    def vm_undefine_check(vm_name):
        """
        Check if vm can be undefined with manage-save option
        """
        #backup xml file
        xml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
        if not os.path.exists(managed_save_file):
            test.fail("Can't find managed save image")
        #undefine domain with no options.
        if not virsh.undefine(vm_name, options=None,
                              ignore_status=True).exit_status:
            test.fail("Guest shouldn't be undefined"
                      "while domain managed save image exists")
        #undefine domain with managed-save option.
        if virsh.undefine(vm_name, options="--managed-save",
                          ignore_status=True).exit_status:
            test.fail("Guest can't be undefine with "
                      "managed-save option")

        if os.path.exists(managed_save_file):
            test.fail("Managed save image exists"
                      " after undefining vm")
        #restore and start the vm.
        xml_backup.define()
        vm.start()

    def check_flags_parallel(virsh_cmd, bash_cmd, flags):
        """
        Run the commands parallel and check the output.
        """
        cmd = ("%s & %s" % (virsh_cmd, bash_cmd))
        ret = process.run(cmd, ignore_status=True, shell=True,
                          ignore_bg_processes=True)
        output = ret.stdout_text.strip()
        logging.debug("check flags output: %s" % output)
        lines = re.findall(r"flags:.(\d+)", output, re.M)
        logging.debug("Find all fdinfo flags: %s" % lines)
        lines = [int(i, 8) & flags for i in lines]
        if flags not in lines:
            test.fail("Checking flags %s failed" % flags)

        return ret

    def check_multi_guests(guests, start_delay, libvirt_guests):
        """
        Check start_delay option for multiple guests.
        """
        # Destroy vm first
        if vm.is_alive():
            vm.destroy(gracefully=False)
        # Clone given number of guests
        timeout = params.get("clone_timeout", 360)
        for i in range(int(guests)):
            dst_vm = "%s_%s" % (vm_name, i)
            utils_libguestfs.virt_clone_cmd(vm_name, dst_vm,
                                            True, timeout=timeout)
            virsh.start(dst_vm, debug=True)

        # Wait 10 seconds for vm to start
        time.sleep(10)
        is_systemd = process.run("cat /proc/1/comm", shell=True).stdout_text.count("systemd")
        if is_systemd:
            libvirt_guests.restart()
            pattern = r'(.+ \d\d:\d\d:\d\d).+: Resuming guest .*?[\n]*.*done'
        else:
            ret = process.run("service libvirt-guests restart | \
                              awk '{ print strftime(\"%b %y %H:%M:%S\"), \
                              $0; fflush(); }'", shell=True)
            pattern = r'(.+ \d\d:\d\d:\d\d)+ Resuming guest.+done'

        # libvirt-guests status command read messages from systemd
        # journal, in cases of messages are not ready in time,
        # add a time wait here.
        def wait_func():
            return libvirt_guests.raw_status().stdout.count("Resuming guest")

        utils_misc.wait_for(wait_func, 15)
        if is_systemd:
            ret = libvirt_guests.raw_status()
        logging.info("status output: %s", ret.stdout_text)
        resume_time = re.findall(pattern, ret.stdout_text, re.M)
        if not resume_time:
            test.fail("Can't see messages of resuming guest")

        # Convert time string to int
        resume_seconds = [time.mktime(time.strptime(
            tm, "%b %y %H:%M:%S")) for tm in resume_time]
        logging.info("Resume time in seconds: %s", resume_seconds)
        # Check if start_delay take effect
        for i in range(len(resume_seconds)-1):
            if resume_seconds[i+1] - resume_seconds[i] < int(start_delay):
                test.fail("Checking start_delay failed")

    def wait_for_state(vm_state):
        """
        Wait for vm state is ready.
        """
        utils_misc.wait_for(lambda: vm.state() == vm_state, 10)

    def check_guest_flags(bash_cmd, flags):
        """
        Check bypass_cache option for single guest.
        """
        # Drop caches.
        drop_caches()
        # form proper parallel command based on if systemd is used or not
        is_systemd = process.run("cat /proc/1/comm", shell=True).stdout_text.count("systemd")
        if is_systemd:
            virsh_cmd_stop = "systemctl stop libvirt-guests"
            virsh_cmd_start = "systemctl start libvirt-guests"
        else:
            virsh_cmd_stop = "service libvirt-guests stop"
            virsh_cmd_start = "service libvirt-guests start"

        ret = check_flags_parallel(virsh_cmd_stop, bash_cmd %
                                   (managed_save_file, managed_save_file,
                                    "1"), flags)
        if is_systemd:
            ret = libvirt_guests.raw_status()
        logging.info("status output: %s", ret.stdout_text)
        if all(["Suspending %s" % vm_name not in ret.stdout_text,
                "stopped, with saved guests" not in ret.stdout_text]):
            test.fail("Can't see messages of suspending vm")
        # status command should return 3.
        if not is_systemd:
            ret = libvirt_guests.raw_status()
        if ret.exit_status != 3:
            test.fail("The exit code %s for libvirt-guests"
                      " status is not correct" % ret)

        # Wait for VM in shut off state
        wait_for_state("shut off")
        check_flags_parallel(virsh_cmd_start, bash_cmd %
                             (managed_save_file, managed_save_file,
                              "0"), flags)
        # Wait for VM in running state
        wait_for_state("running")

    def vm_msave_remove_check(vm_name):
        """
        Check managed save remove command.
        """
        if not os.path.exists(managed_save_file) and case not in ['not_saved_without_file', 'saved_without_file']:
            test.fail("Can't find managed save image")
        ret = virsh.managedsave_remove(vm_name, debug=True)
        libvirt.check_exit_status(ret, msave_rm_error)
        if os.path.exists(managed_save_file):
            test.fail("Managed save image still exists")
        virsh.start(vm_name, debug=True)
        # The domain state should be running
        if vm.state() != "running":
            test.fail("Guest state should be"
                      " running after started")

    def vm_managedsave_loop(vm_name, loop_range, libvirtd):
        """
        Run a loop of managedsave command and check its result.
        """
        if vm.is_dead():
            virsh.start(vm_name, debug=True)
        for i in range(int(loop_range)):
            logging.debug("Test loop: %s" % i)
            virsh.managedsave(vm_name, debug=True)
            virsh.start(vm_name, debug=True)
        # Check libvirtd status.
        if not libvirtd.is_running():
            test.fail("libvirtd is stopped after cmd")
        # Check vm status.
        if vm.state() != "running":
            test.fail("Guest isn't in running state")

    def build_vm_xml(vm_name, **dargs):
        """
        Build the new domain xml and define it.
        """
        try:
            # stop vm before doing any change to xml
            if vm.is_alive():
                vm.destroy(gracefully=False)
            vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
            if dargs.get("cpu_mode"):
                if "cpu" in vmxml:
                    del vmxml.cpu
                cpuxml = vm_xml.VMCPUXML()
                cpuxml.mode = params.get("cpu_mode", "host-model")
                cpuxml.match = params.get("cpu_match", "exact")
                cpuxml.fallback = params.get("cpu_fallback", "forbid")
                cpu_topology = {}
                cpu_topology_sockets = params.get("cpu_topology_sockets")
                if cpu_topology_sockets:
                    cpu_topology["sockets"] = cpu_topology_sockets
                cpu_topology_cores = params.get("cpu_topology_cores")
                if cpu_topology_cores:
                    cpu_topology["cores"] = cpu_topology_cores
                cpu_topology_threads = params.get("cpu_topology_threads")
                if cpu_topology_threads:
                    cpu_topology["threads"] = cpu_topology_threads
                if cpu_topology:
                    cpuxml.topology = cpu_topology
                vmxml.cpu = cpuxml
                vmxml.vcpu = int(params.get("vcpu_nums"))
            if dargs.get("sec_driver"):
                seclabel_dict = {"type": "dynamic", "model": "selinux",
                                 "relabel": "yes"}
                vmxml.set_seclabel([seclabel_dict])

            vmxml.sync()
            vm.start()
        except Exception as e:
            logging.error(str(e))
            test.cancel("Build domain xml failed")

    status_error = ("yes" == params.get("status_error", "no"))
    vm_ref = params.get("managedsave_vm_ref", "name")
    libvirtd_state = params.get("libvirtd", "on")
    extra_param = params.get("managedsave_extra_param", "")
    progress = ("yes" == params.get("managedsave_progress", "no"))
    cpu_mode = "yes" == params.get("managedsave_cpumode", "no")
    test_undefine = "yes" == params.get("managedsave_undefine", "no")
    test_bypass_cache = "yes" == params.get("test_bypass_cache", "no")
    autostart_bypass_cache = params.get("autostart_bypass_cache", "")
    multi_guests = params.get("multi_guests", "")
    test_libvirt_guests = params.get("test_libvirt_guests", "")
    check_flags = "yes" == params.get("check_flags", "no")
    security_driver = params.get("security_driver", "")
    remove_after_cmd = "yes" == params.get("remove_after_cmd", "no")
    option = params.get("managedsave_option", "")
    check_shutdown = "yes" == params.get("shutdown_after_cmd", "no")
    pre_vm_state = params.get("pre_vm_state", "")
    move_saved_file = "yes" == params.get("move_saved_file", "no")
    test_loop_cmd = "yes" == params.get("test_loop_cmd", "no")
    remove_test = 'yes' == params.get('remove_test', 'no')
    case = params.get('case', '')
    msave_rm_error = "yes" == params.get("msave_rm_error", "no")
    if option:
        if not virsh.has_command_help_match('managedsave', option):
            # Older libvirt does not have this option
            test.cancel("Older libvirt does not"
                        " handle arguments consistently")

    # Backup xml file.
    vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    # Get the libvirtd service
    libvirtd = utils_libvirtd.Libvirtd()
    # Get config files.
    qemu_config = utils_config.LibvirtQemuConfig()
    libvirt_guests_config = utils_config.LibvirtGuestsConfig()
    # Get libvirt-guests service
    libvirt_guests = Factory.create_service("libvirt-guests")

    try:
        # Destroy vm first for setting configuration file
        if vm.state() == "running":
            vm.destroy(gracefully=False)
        # Prepare test environment.
        if libvirtd_state == "off":
            libvirtd.stop()
        if autostart_bypass_cache:
            ret = virsh.autostart(vm_name, "", ignore_status=True, debug=True)
            libvirt.check_exit_status(ret)
            qemu_config.auto_start_bypass_cache = autostart_bypass_cache
            libvirtd.restart()
        if security_driver:
            qemu_config.security_driver = [security_driver]
        if test_libvirt_guests:
            if multi_guests:
                start_delay = params.get("start_delay", "20")
                libvirt_guests_config.START_DELAY = start_delay
            if check_flags:
                libvirt_guests_config.BYPASS_CACHE = "1"
            # The config file format should be "x=y" instead of "x = y"
            process.run("sed -i -e 's/ = /=/g' "
                        "/etc/sysconfig/libvirt-guests",
                        shell=True)
            libvirt_guests.restart()

        # Change domain xml.
        if cpu_mode:
            build_vm_xml(vm_name, cpu_mode=True)
        if security_driver:
            build_vm_xml(vm_name, sec_driver=True)

        # Turn VM into certain state.
        if pre_vm_state == "transient":
            logging.info("Creating %s..." % vm_name)
            vmxml_for_test = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
            if vm.is_alive():
                vm.destroy(gracefully=False)
            # Wait for VM to be in shut off state
            utils_misc.wait_for(lambda: vm.state() == "shut off", 10)
            vm.undefine()
            if virsh.create(vmxml_for_test.xml, ignore_status=True,
                            debug=True).exit_status:
                vmxml_backup.define()
                test.cancel("Can't create the domain")

        # Wait for vm in stable state
        if params.get("start_vm") == "yes":
            if vm.state() == "shut off":
                vm.start()
                vm.wait_for_login()

        # run test case
        domid = vm.get_id()
        domuuid = vm.get_uuid()
        if vm_ref == "id":
            vm_ref = domid
        elif vm_ref == "uuid":
            vm_ref = domuuid
        elif vm_ref == "hex_id":
            vm_ref = hex(int(domid))
        elif vm_ref.count("invalid"):
            vm_ref = params.get(vm_ref)
        elif vm_ref == "name":
            vm_ref = vm_name

        # Ignore exception with "ignore_status=True"
        if progress:
            option += " --verbose"
        option += extra_param

        # For bypass_cache test. Run a shell command to check fd flags while
        # executing managedsave command
        software_mgr = software_manager.SoftwareManager()
        if not software_mgr.check_installed('lsof'):
            logging.info('Installing lsof package:')
            software_mgr.install('lsof')
        bash_cmd = ("let i=1; while((i++<400)); do if [ -e %s ]; then (cat /proc"
                    "/$(lsof -w %s|awk '/libvirt_i/{print $2}')/fdinfo/%s |"
                    "grep 'flags:.*') && break; else sleep 0.05; fi; done;")
        # Flags to check bypass cache take effect
        flags = os.O_DIRECT
        if test_bypass_cache:
            # Drop caches.
            drop_caches()
            virsh_cmd = "virsh managedsave %s %s" % (option, vm_name)
            check_flags_parallel(virsh_cmd, bash_cmd %
                                 (managed_save_file, managed_save_file,
                                  "1"), flags)
            # Wait for VM in shut off state
            wait_for_state("shut off")
            virsh_cmd = "virsh start %s %s" % (option, vm_name)
            check_flags_parallel(virsh_cmd, bash_cmd %
                                 (managed_save_file, managed_save_file,
                                  "0"), flags)
            # Wait for VM in running state
            wait_for_state("running")
        elif test_libvirt_guests:
            logging.debug("libvirt-guests status: %s", libvirt_guests.status())
            if multi_guests:
                check_multi_guests(multi_guests,
                                   start_delay, libvirt_guests)

            if check_flags:
                check_guest_flags(bash_cmd, flags)
        elif remove_test:
            if case == 'not_saved_with_file':
                process.run('touch %s' % managed_save_file, shell=True, verbose=True)
            vm_msave_remove_check(vm_name)

        else:
            # Ensure VM is running
            utils_misc.wait_for(lambda: vm.state() == "running", 10)
            ret = virsh.managedsave(vm_ref, options=option,
                                    ignore_status=True, debug=True)
            status = ret.exit_status
            # The progress information outputted in error message
            error_msg = ret.stderr.strip()
            if move_saved_file:
                cmd = "echo > %s" % managed_save_file
                process.run(cmd, shell=True)
            if case == 'saved_without_file':
                os.remove(managed_save_file)

            # recover libvirtd service start
            if libvirtd_state == "off":
                libvirtd.start()

            if status_error:
                if not status:
                    if libvirtd_state == "off" and libvirt_version.version_compare(5, 6, 0):
                        logging.info("From libvirt version 5.6.0 libvirtd is restarted "
                                     "and command should succeed")
                    else:
                        test.fail("Run successfully with wrong command!")
            else:
                if status:
                    test.fail("Run failed with right command")
                if progress:
                    if not error_msg.count("Managedsave:"):
                        test.fail("Got invalid progress output")
                if remove_after_cmd or case == 'saved_without_file':
                    vm_msave_remove_check(vm_name)
                elif test_undefine:
                    vm_undefine_check(vm_name)
                elif autostart_bypass_cache:
                    # rhbz#1755303
                    if libvirt_version.version_compare(5, 6, 0):
                        os.remove("/run/libvirt/qemu/autostarted")
                    libvirtd.stop()
                    virsh_cmd = ("(service libvirtd start)")
                    check_flags_parallel(virsh_cmd, bash_cmd %
                                         (managed_save_file, managed_save_file,
                                          "0"), flags)
                elif test_loop_cmd:
                    loop_range = params.get("loop_range", "20")
                    vm_managedsave_loop(vm_name, loop_range, libvirtd)
                else:
                    vm_recover_check(option, libvirtd, check_shutdown)
    finally:
        # Restore test environment.
        # Restart libvirtd.service
        qemu_config.restore()
        libvirt_guests_config.restore()
        libvirtd.restart()
        if autostart_bypass_cache:
            virsh.autostart(vm_name, "--disable",
                            ignore_status=True, debug=True)
        vm.destroy(gracefully=False)
        virsh.managedsave_remove(vm_name, debug=True)
        vmxml_backup.sync()
        if multi_guests:
            for i in range(int(multi_guests)):
                virsh.remove_domain("%s_%s" % (vm_name, i),
                                    "--remove-all-storage",
                                    debug=True)
Пример #34
0
def run(test, params, env):
    """
    Test mtu feature from virtual network
    """
    vm_name = params.get('main_vm')
    vm = env.get_vm(vm_name)
    mtu_type = params.get('mtu_type')
    mtu_size = params.get('mtu_size', '')
    net = params.get('net', DEFAULT_NET)
    net_type = params.get('net_type', '')
    with_iface = 'yes' == params.get('with_iface', 'no')
    with_net = 'yes' == params.get('with_net', 'no')
    status_error = 'yes' == params.get('status_error', 'no')
    check = params.get('check', '')
    error_msg = params.get('error_msg', '')
    bridge_name = 'br_mtu' + utils_misc.generate_random_string(3)
    add_pkg = params.get('add_pkg', '')
    model = params.get('model', 'virtio')

    def set_network(size, net='default'):
        """
        Set mtu size to a certain network
        """
        logging.info('Set mtu size of network "%s" to %s', net, size)
        default_xml = NetworkXML.new_from_net_dumpxml(net)
        default_xml.mtu = size
        default_xml.sync()
        logging.debug(virsh.net_dumpxml(net))

    def set_interface(mtu_size='',
                      source_network='default',
                      iface_type='network',
                      iface_model='virtio'):
        """
        Set mtu size to a certain interface
        """
        interface_type = 'bridge' if iface_type in (
            'bridge', 'openvswitch') else iface_type
        iface_dict = {
            'type': interface_type,
            'source': "{'%s': '%s'}" % (interface_type, source_network),
            'model': iface_model
        }

        if iface_type == 'openvswitch':
            iface_dict.update({'virtualport_type': 'openvswitch'})

        if mtu_size:
            iface_dict.update({'mtu': "{'size': %s}" % mtu_size})

        libvirt.modify_vm_iface(vm_name, 'update_iface', iface_dict)
        logging.debug(virsh.dumpxml(vm_name).stdout)

    def get_default_if():
        """
        Get default interface that is using by vm
        """
        ifaces = utils_net.get_sorted_net_if()
        logging.debug('Interfaces on host: %s', ifaces)
        for iface in ifaces[0]:
            if 'Link detected: yes' in process.run('ethtool %s' %
                                                   iface).stdout_text:
                logging.debug('Found host interface "%s"', iface)
                return iface

    def create_bridge():
        """
        Create a bridge on host for test
        """
        cmd_create_br = 'nmcli con add type bridge con-name %s ifname %s'
        con_name = 'con_' + utils_misc.generate_random_string(3)
        bridge_name = 'br_' + utils_misc.generate_random_string(3)
        process.run(cmd_create_br % (con_name, bridge_name), verbose=True)
        return con_name, bridge_name

    def create_network_xml(name, network_type, base_if='', **kwargs):
        """
        Create a network xml to be defined
        """
        m_net = NetworkXML(name)
        m_net.forward = {'mode': 'bridge'}
        if network_type in ('bridge', 'openvswitch'):
            m_net.bridge = {'name': kwargs['bridge_name']}
        elif network_type == 'macvtap':
            if base_if:
                m_net.forward_interface = [{'dev': base_if}]
        if network_type == 'openvswitch':
            m_net.virtualport_type = 'openvswitch'
        if 'mtu' in kwargs:
            m_net.mtu = kwargs['mtu']
        logging.debug(m_net)
        return m_net.xml

    def create_iface(iface_type, **kwargs):
        """
        Create a interface to be attached to vm
        """
        m_iface = Interface(iface_type)
        m_iface.mac_address = utils_net.generate_mac_address_simple()
        if 'base_if' in kwargs:
            m_iface.source = {'dev': kwargs['base_if'], 'mode': 'vepa'}
        if 'source_net' in kwargs:
            m_iface.source = {'network': kwargs['source_net']}
        if 'mtu' in kwargs:
            m_iface.mtu = {'size': kwargs['mtu']}
        if 'model_net' in kwargs:
            m_iface.model = kwargs['model_net']
        logging.debug(m_iface.get_xml())
        logging.debug(m_iface)
        return m_iface

    def check_mtu(mtu_size, qemu=False):
        """
        Check if mtu meets expectation on host
        """
        error = ''
        live_vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
        iface_xml = live_vmxml.get_devices('interface')[0]
        logging.debug(iface_xml.target)
        dev = iface_xml.target['dev']
        ifconfig_info = process.run('ifconfig|grep mtu|grep %s' % dev,
                                    shell=True,
                                    verbose=True).stdout_text
        if 'mtu %s' % mtu_size in ifconfig_info:
            logging.info('PASS on ifconfig check for vnet.')
        else:
            error += 'Fail on ifconfig check for vnet.'
        if qemu:
            qemu_mtu_info = process.run('ps aux|grep qemu-kvm',
                                        shell=True,
                                        verbose=True).stdout_text
            if 'host_mtu=%s' % mtu_size in qemu_mtu_info:
                logging.info('PASS on qemu cmd line check.')
            else:
                error += 'Fail on qemu cmd line check.'
        if error:
            test.fail(error)

    def check_mtu_in_vm(fn_login, mtu_size):
        """
        Check if mtu meets expectations in vm
        """
        session = fn_login()
        check_cmd = 'ifconfig'
        output = session.cmd(check_cmd)
        session.close()
        logging.debug(output)
        if 'mtu %s' % mtu_size not in output:
            test.fail('MTU check inside vm failed.')
        else:
            logging.debug("MTU check inside vm passed.")

    try:
        bk_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
        bk_netxml = NetworkXML.new_from_net_dumpxml(DEFAULT_NET)
        if add_pkg:
            add_pkg = add_pkg.split()
            utils_package.package_install(add_pkg)
        if 'openvswitch' in add_pkg:
            br = 'ovsbr0' + utils_misc.generate_random_string(3)
            process.run('systemctl start openvswitch.service',
                        shell=True,
                        verbose=True)
            process.run('ovs-vsctl add-br %s' % br, shell=True, verbose=True)
            process.run('ovs-vsctl show', shell=True, verbose=True)

        if not check or check in ['save', 'managedsave', 'hotplug_save']:
            # Create bridge or network and set mtu
            iface_type = 'network'
            if net_type in ('bridge', 'openvswitch'):
                if net_type == 'bridge':
                    params['con_name'], br = create_bridge()
                if mtu_type == 'network':
                    test_net = create_network_xml(bridge_name,
                                                  net_type,
                                                  bridge_name=br)
                    virsh.net_create(test_net, debug=True)
                    virsh.net_dumpxml(bridge_name, debug=True)
                if mtu_type == 'interface':
                    iface_type = net_type
                    bridge_name = br
            elif net_type == 'network':
                if mtu_type == 'network':
                    set_network(mtu_size)

            iface_mtu = 0
            if mtu_type == 'interface':
                iface_mtu = mtu_size
            if mtu_type == 'network' and with_iface:
                mtu_size = str(int(mtu_size) // 2)
                iface_mtu = mtu_size

            source_net = bridge_name if net_type in (
                'bridge', 'openvswitch') else 'default'

            # set mtu in vm interface
            set_interface(iface_mtu,
                          source_network=source_net,
                          iface_type=iface_type,
                          iface_model=model)
            vm.start()
            vm_login = vm.wait_for_serial_login if net_type in (
                'bridge', 'openvswitch') else vm.wait_for_login
            vm_login().close()
            check_qemu = True if mtu_type == 'interface' else False

            # Test mtu after save vm
            if check in ('save', 'hotplug_save'):
                if check == 'hotplug_save':
                    iface = create_iface('network',
                                         source_net='default',
                                         mtu=mtu_size,
                                         model_net=model)
                    params['mac'] = iface.mac_address
                    virsh.attach_device(vm_name, iface.xml, debug=True)
                    virsh.dumpxml(vm_name, debug=True)
                    dom_xml = vm_xml.VMXML.new_from_dumpxml(vm_name)
                    if params['mac'] not in str(dom_xml):
                        test.fail('Failed to attach interface with mtu')
                save_path = os.path.join(data_dir.get_tmp_dir(),
                                         vm_name + '.save')
                virsh.save(vm_name, save_path, debug=True)
                virsh.restore(save_path, debug=True)
            if check == 'managedsave':
                virsh.managedsave(vm_name, debug=True)
                virsh.start(vm_name, debug=True)

            # Check in both host and vm
            check_mtu(mtu_size, check_qemu)
            if mtu_type == 'interface' or with_iface:
                check_mtu_in_vm(vm_login, mtu_size)
                vm_login(timeout=60).close()

            if check == 'hotplug_save':
                virsh.detach_interface(vm_name,
                                       'network %s' % params['mac'],
                                       debug=True)
                time.sleep(5)
                dom_xml = vm_xml.VMXML.new_from_dumpxml(vm_name)
                if params['mac'] in str(dom_xml):
                    test.fail(
                        'Failed to detach interface with mtu after save-restore'
                    )

        else:
            hotplug = 'yes' == params.get('hotplug', 'False')
            if check == 'net_update':
                result = virsh.net_update(DEFAULT_NET,
                                          'modify',
                                          'mtu',
                                          '''"<mtu size='%s'/>"''' % mtu_size,
                                          debug=True)
            if check in ('macvtap', 'bridge_net', 'ovswitch_net'):
                base_if = get_default_if()
                macv_name = 'direct-macvtap' + utils_misc.generate_random_string(
                    3)

                # Test mtu in different type of network
                if mtu_type == 'network':
                    if check == 'macvtap':
                        test_net = create_network_xml(macv_name,
                                                      'macvtap',
                                                      base_if,
                                                      mtu=mtu_size)
                    if check == 'bridge_net':
                        params['con_name'], br = create_bridge()
                        test_net = create_network_xml(bridge_name,
                                                      'bridge',
                                                      mtu=mtu_size,
                                                      bridge_name=br)
                    if check == 'ovswitch_net':
                        test_net = create_network_xml(bridge_name,
                                                      'openvswitch',
                                                      mtu=mtu_size,
                                                      bridge_name=br)
                    if 'net_create' in params['id']:
                        result = virsh.net_create(test_net, debug=True)
                    if 'net_define' in params['id']:
                        result = virsh.net_define(test_net, debug=True)

                # Test mtu with or without a binding network
                elif mtu_type == 'interface':
                    vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
                    if with_net:
                        test_net = create_network_xml(macv_name, 'macvtap',
                                                      base_if)
                        virsh.net_create(test_net, debug=True)
                        iface = create_iface('network',
                                             source_net=macv_name,
                                             mtu=mtu_size,
                                             model_net=model)
                        if hotplug:
                            result = virsh.attach_device(vm_name,
                                                         iface.xml,
                                                         debug=True)
                        else:
                            vmxml.add_device(iface)
                            vmxml.sync()
                            result = virsh.start(vm_name)
                    else:
                        iface = create_iface('direct',
                                             base_if=base_if,
                                             mtu=mtu_size,
                                             model_net=model)
                        if hotplug:
                            result = virsh.attach_device(vm_name,
                                                         iface.xml,
                                                         debug=True)
                        else:
                            vmxml.add_device(iface)
                            result = virsh.define(vmxml.xml, debug=True)
            if check == 'invalid_val':
                iface = create_iface('network',
                                     source_net='default',
                                     mtu=mtu_size,
                                     model_net=model)
                result = virsh.attach_device(vm_name, iface.xml, debug=True)

            # Check result
            libvirt.check_exit_status(result, status_error)
            libvirt.check_result(result, [error_msg])

    finally:
        bk_xml.sync()
        bk_netxml.sync()
        if 'test_net' in locals():
            virsh.net_destroy(bridge_name, debug=True)
        if params.get('con_name'):
            process.run('nmcli con del %s' % params['con_name'], verbose=True)
        if add_pkg:
            process.run("ovs-vsctl del-br %s" % br, verbose=True)
            utils_package.package_remove(add_pkg)
Пример #35
0
def run(test, params, env):
    """
    Test virsh undefine command.

    Undefine an inactive domain, or convert persistent to transient.
    1.Prepare test environment.
    2.Backup the VM's information to a xml file.
    3.When the libvirtd == "off", stop the libvirtd service.
    4.Perform virsh undefine operation.
    5.Recover test environment.(libvirts service,VM)
    6.Confirm the test result.
    """

    vm_ref = params.get("undefine_vm_ref", "vm_name")
    extra = params.get("undefine_extra", "")
    option = params.get("undefine_option", "")
    libvirtd_state = params.get("libvirtd", "on")
    status_error = ("yes" == params.get("status_error", "no"))
    undefine_twice = ("yes" == params.get("undefine_twice", 'no'))
    local_ip = params.get("local_ip", "LOCAL.EXAMPLE.COM")
    remote_ip = params.get("remote_ip", "REMOTE.EXAMPLE.COM")
    remote_user = params.get("remote_user", "user")
    remote_pwd = params.get("remote_pwd", "password")
    remote_prompt = params.get("remote_prompt", "#")
    pool_type = params.get("pool_type")
    pool_name = params.get("pool_name", "test")
    pool_target = params.get("pool_target")
    volume_size = params.get("volume_size", "1G")
    vol_name = params.get("vol_name", "test_vol")
    emulated_img = params.get("emulated_img", "emulated_img")
    emulated_size = "%sG" % (int(volume_size[:-1]) + 1)
    disk_target = params.get("disk_target", "vdb")
    wipe_data = "yes" == params.get("wipe_data", "no")
    if wipe_data:
        option += " --wipe-storage"

    vm_name = params.get("main_vm", "virt-tests-vm1")
    vm = env.get_vm(vm_name)
    vm_id = vm.get_id()
    vm_uuid = vm.get_uuid()

    # polkit acl related params
    uri = params.get("virsh_uri")
    unprivileged_user = params.get('unprivileged_user')
    if unprivileged_user:
        if unprivileged_user.count('EXAMPLE'):
            unprivileged_user = '******'

    if not libvirt_version.version_compare(1, 1, 1):
        if params.get('setup_libvirt_polkit') == 'yes':
            raise error.TestNAError("API acl test not supported in current" +
                                    " libvirt version.")

    # Back up xml file.Xen host has no guest xml file to define a guset.
    backup_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)

    # Confirm how to reference a VM.
    if vm_ref == "vm_name":
        vm_ref = vm_name
    elif vm_ref == "id":
        vm_ref = vm_id
    elif vm_ref == "hex_vm_id":
        vm_ref = hex(int(vm_id))
    elif vm_ref == "uuid":
        vm_ref = vm_uuid
    elif vm_ref.find("invalid") != -1:
        vm_ref = params.get(vm_ref)

    volume = None
    pvtest = None
    status3 = None

    try:
        save_file = "/var/lib/libvirt/qemu/save/%s.save" % vm_name
        if option.count("managedsave") and vm.is_alive():
            virsh.managedsave(vm_name)

        if not vm.is_lxc():
            snp_list = virsh.snapshot_list(vm_name)
            if option.count("snapshot"):
                snp_file_list = []
                if not len(snp_list):
                    virsh.snapshot_create(vm_name)
                    logging.debug("Create a snapshot for test!")
                else:
                    # Backup snapshots for domain
                    for snp_item in snp_list:
                        tmp_file = os.path.join(test.tmpdir, snp_item + ".xml")
                        virsh.snapshot_dumpxml(vm_name,
                                               snp_item,
                                               to_file=tmp_file)
                        snp_file_list.append(tmp_file)
            else:
                if len(snp_list):
                    raise error.TestNAError("This domain has snapshot(s), "
                                            "cannot be undefined!")
        if option.count("remove-all-storage"):
            for disk in vm_xml.VMXML.get_disk_source(vm_name):
                try:
                    disk_source = disk.find('source')
                    disk_path = disk_source.get('file') or disk_source.get(
                        'dev')
                except AttributeError:
                    continue
                if disk_path in utlv.get_all_vol_paths():
                    raise error.TestNAError("This case will remove '%s',"
                                            " it's dangerous that skip"
                                            " this case" % disk_path)
            pvtest = utlv.PoolVolumeTest(test, params)
            pvtest.pre_pool(pool_name, pool_type, pool_target, emulated_img,
                            emulated_size)
            new_pool = libvirt_storage.PoolVolume(pool_name)
            if not new_pool.create_volume(vol_name, volume_size):
                raise error.TestFail("Create volume %s failed." % vol_name)
            volumes = new_pool.list_volumes()
            volume = volumes[vol_name]
            virsh.attach_disk(vm_name, volume, disk_target, "--config")

        # Turn libvirtd into certain state.
        if libvirtd_state == "off":
            utils_libvirtd.libvirtd_stop()

        # Test virsh undefine command.
        output = ""
        if vm_ref != "remote":
            vm_ref = "%s %s" % (vm_ref, extra)
            cmdresult = virsh.undefine(vm_ref,
                                       option,
                                       unprivileged_user=unprivileged_user,
                                       uri=uri,
                                       ignore_status=True,
                                       debug=True)
            status = cmdresult.exit_status
            output = cmdresult.stdout.strip()
            if status:
                logging.debug("Error status, command output: %s",
                              cmdresult.stderr.strip())
            if undefine_twice:
                status2 = virsh.undefine(vm_ref,
                                         ignore_status=True).exit_status
        else:
            if remote_ip.count("EXAMPLE.COM") or local_ip.count("EXAMPLE.COM"):
                raise error.TestNAError("remote_ip and/or local_ip parameters"
                                        " not changed from default values")
            try:
                uri = libvirt_vm.complete_uri(local_ip)
                session = remote.remote_login("ssh", remote_ip, "22",
                                              remote_user, remote_pwd,
                                              remote_prompt)
                cmd_undefine = "virsh -c %s undefine %s" % (uri, vm_name)
                status, output = session.cmd_status_output(cmd_undefine)
                logging.info("Undefine output: %s", output)
            except (error.CmdError, remote.LoginError, aexpect.ShellError), de:
                logging.error("Detail: %s", de)
                status = 1

        # Recover libvirtd state.
        if libvirtd_state == "off":
            utils_libvirtd.libvirtd_start()

        # Shutdown VM.
        if virsh.domain_exists(vm.name):
            try:
                if vm.is_alive():
                    vm.destroy(gracefully=False)
            except error.CmdError, detail:
                logging.error("Detail: %s", detail)
Пример #36
0
def run(test, params, env):
    """
    Test command: virsh start.

    1) Get the params from params.
    2) Prepare libvirtd's status.
    3) Do the start operation.
    4) Result check.
    5) clean up.
    """
    # get the params from params
    vm_name = params.get("main_vm", "avocado-vt-vm1")
    vm_ref = params.get("vm_ref", "vm1")
    opt = params.get("vs_opt", "")

    # Backup for recovery.
    vmxml_backup = libvirt_xml.vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)

    backup_name = vm_ref
    vm = None
    if vm_ref is not "":
        vm = env.get_vm(vm_ref)
    vmxml = libvirt_xml.VMXML()

    libvirtd_state = params.get("libvirtd", "on")
    pre_operation = params.get("vs_pre_operation", "")
    status_error = params.get("status_error", "no")

    try:
        # prepare before start vm
        if libvirtd_state == "on":
            utils_libvirtd.libvirtd_start()
        elif libvirtd_state == "off":
            utils_libvirtd.libvirtd_stop()

        if pre_operation == "rename":
            new_vm_name = params.get("vs_new_vm_name", "virsh_start_vm1")
            vm = libvirt_xml.VMXML.vm_rename(vm, new_vm_name)
            vm_ref = new_vm_name
        elif pre_operation == "undefine":
            vmxml = vmxml.new_from_dumpxml(vm_ref)
            vmxml.undefine()

        # do the start operation
        try:
            if pre_operation == "remote":
                # get the params for remote test
                remote_ip = params.get("remote_ip", "ENTER.YOUR.REMOTE.IP")
                remote_user = params.get("remote_user", "root")
                remote_pwd = params.get("remote_pwd", "ENTER.YOUR.REMOTE.PASSWORD")
                if pre_operation == "remote" and remote_ip.count("ENTER.YOUR."):
                    test.cancel("Remote test parameters not configured")

                ssh_key.setup_ssh_key(remote_ip, remote_user, remote_pwd)
                remote_uri = "qemu+ssh://%s/system" % remote_ip
                cmd_result = virsh.start(vm_ref, ignore_status=True, debug=True,
                                         uri=remote_uri)
                if cmd_result.exit_status:
                    test.fail("Start vm failed.\n Detail: %s" % cmd_result)
            elif opt.count("console"):
                # With --console, start command will print the
                # dmesg of guest in starting and turn into the
                # login prompt. In this case, we start it with
                # --console and login vm in console by
                # remote.handle_prompts().
                cmd = "start %s --console" % vm_ref
                virsh_session = virsh.VirshSession(virsh_exec=virsh.VIRSH_EXEC, auto_close=True)
                virsh_session.sendline(cmd)
                remote.handle_prompts(virsh_session, params.get("username", ""),
                                      params.get("password", ""), r"[\#\$]\s*$",
                                      timeout=60, debug=True)
            elif opt.count("autodestroy"):
                # With --autodestroy, vm will be destroyed when
                # virsh session closed. Then we execute start
                # command in a virsh session and start vm with
                # --autodestroy. Then we closed the virsh session,
                # and check the vm is destroyed or not.
                virsh_session = virsh.VirshSession(virsh_exec=virsh.VIRSH_EXEC, auto_close=True)
                cmd = "start %s --autodestroy" % vm_ref
                status = virsh_session.cmd_status(cmd)
                if status:
                    test.fail("Failed to start vm with --autodestroy.")
                # Close the session, then the vm should be destroyed.
                virsh_session.close()
            elif opt.count("force-boot"):
                # With --force-boot, VM will be stared from boot
                # even we have saved it with virsh managedsave.
                # In this case, we start vm and execute sleep 1000&,
                # then save it with virsh managedsave. At last, we
                # start vm with --force-boot. To verify the result,
                # we check the sleep process. If the process exists,
                # force-boot failed, else case pass.
                vm.start()
                session = vm.wait_for_login()
                status = session.cmd_status("sleep 1000&")
                if status:
                    test.error("Can not execute command in guest.")
                sleep_pid = session.cmd_output("echo $!").strip()
                virsh.managedsave(vm_ref)
                virsh.start(vm_ref, options=opt)
            else:
                cmd_result = virsh.start(vm_ref, options=opt)
                if cmd_result.exit_status:
                    if status_error == "no":
                        test.fail("Start vm failed.\n Detail: %s"
                                  % cmd_result)
                else:
                    # start vm successfully
                    if status_error == "yes":
                        if libvirtd_state == "off" and libvirt_version.version_compare(5, 6, 0):
                            logging.info("From libvirt version 5.6.0 libvirtd is restarted,"
                                         " command should succeed.")
                        else:
                            test.fail("Run successfully with wrong "
                                      "command!\n Detail:%s"
                                      % cmd_result)

            if opt.count("paused"):
                if not (vm.state() == "paused"):
                    test.fail("VM is not paused when started with "
                              "--paused.")
            elif opt.count("autodestroy"):
                if vm.is_alive():
                    test.fail("VM was started with --autodestroy,"
                              "but not destroyed when virsh session "
                              "closed.")
            elif opt.count("force-boot"):
                session = vm.wait_for_login()
                status = session.cmd_status("ps %s |grep '[s]leep 1000'"
                                            % sleep_pid)
                if not status:
                    test.fail("VM was started with --force-boot,"
                              "but it is restored from a"
                              " managedsave.")
            else:
                if status_error == "no" and not vm.is_alive() and pre_operation != "remote":
                    test.fail("VM was started but it is not alive.")

        except remote.LoginError as detail:
            test.fail("Failed to login guest.")
    finally:
        # clean up
        if libvirtd_state == "off":
            utils_libvirtd.libvirtd_start()

        elif pre_operation == "rename":
            libvirt_xml.VMXML.vm_rename(vm, backup_name)
        elif pre_operation == "remote":
            virsh.destroy(vm_ref, ignore_status=False, debug=True, uri=remote_uri)

        if vm and vm.is_paused():
            vm.resume()

        # Restore VM
        vmxml_backup.sync()
Пример #37
0
def run(test, params, env):
    """
    Test command: virsh list.

    1) Filt parameters according libvirtd's version
    2) Prepare domain's exist state:transient,managed-save.
    3) Prepare libvirt's status.
    4) Execute list command.
    5) Result check.
    """
    def list_local_domains_on_remote(options_ref, remote_ip, remote_passwd,
                                     local_ip, remote_user, local_user,
                                     local_pwd):
        """
        Create a virsh list command and execute it on remote host.
        It will list local domains on remote host.

        :param options_ref:options in virsh list command.
        :param remote_ip:remote host's ip.
        :param remote_passwd:remote host's password.
        :param local_ip:local ip, to create uri in virsh list.
        :return:return status and output of the virsh list command.
        """
        complete_uri = libvirt_vm.complete_uri(local_ip)
        command_on_remote = ("virsh -c %s list %s"
                             % (complete_uri, options_ref))
        try:
            # setup autologin for ssh from remote machine to execute commands
            # remotely
            config_opt = ["StrictHostKeyChecking=no"]
            ssh_key.setup_remote_ssh_key(remote_ip, remote_user,
                                         remote_passwd, hostname2=local_ip,
                                         user2=local_user,
                                         password2=local_pwd,
                                         config_options=config_opt)
            session = remote.remote_login("ssh", remote_ip, "22", remote_user,
                                          remote_passwd, "#")
            time.sleep(5)
            status, output = session.cmd_status_output(
                command_on_remote, internal_timeout=30)
        except Exception as info:
            logging.error("Shell failed to execute command from"
                          " remote")
            return 1, info
        time.sleep(5)
        session.close()
        return int(status), output

    vm_name = params.get("main_vm", "avocado-vt-vm1")
    options_ref = params.get("list_options_ref", "")
    list_ref = params.get("list_type_ref", "")
    vm_ref = params.get("vm_ref", "")
    status_error = params.get("status_error", "no")
    addition_status_error = params.get("addition_status_error", "no")
    desc = params.get("list_desc", "")
    libvirtd = params.get("libvirtd", "on")
    remote_ref = params.get("remote_ref", "")
    remote_ip = params.get("remote_ip")
    remote_pwd = params.get("remote_pwd", None)
    local_ip = params.get("local_ip")
    remote_user = params.get("remote_user", "root")
    local_user = params.get("username", "root")
    local_pwd = params.get("local_pwd", None)

    vm = env.get_vm(vm_name)
    domuuid = vm.get_uuid()
    domid = vm.get_id()

    # Some parameters are not supported on old libvirt, skip them.
    help_info = virsh.help("list").stdout.strip()
    if vm_ref and not re.search(vm_ref, help_info):
        raise exceptions.TestSkipError("This version do not support vm type:%s"
                                       % vm_ref)
    if list_ref and not re.search(list_ref, help_info):
        raise exceptions.TestSkipError("This version do not support list"
                                       " type:%s" % list_ref)

    # If a transient domain is destroyed, it will disappear.
    if vm_ref == "transient" and options_ref == "inactive":
        logging.info("Set addition_status_error to yes")
        logging.info(
            "because transient domain will disappear after destroyed.")
        addition_status_error = "yes"

    if vm_ref == "transient":
        vm_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
        vm.undefine()
    elif vm_ref == "managed-save":
        virsh.managedsave(vm_name, ignore_status=True, print_info=True)

    try:
        # run test case
        if list_ref == "--uuid":
            result_expected = domuuid
            logging.info("%s's uuid is: %s", vm_name, domuuid)
        elif list_ref == "--title":
            vm_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
            if options_ref == "inactive":
                virsh.desc(vm_name, "--config --title", desc)
            else:
                virsh.desc(vm_name, "--live --title", desc)
            result_expected = desc
            logging.info("%s's title is: %s", vm_name, desc)
        else:
            result_expected = vm_name
            logging.info("domain's name is: %s", vm_name)

        if options_ref == "vm_id":
            logging.info("%s's running-id is: %s", vm_name, domid)
            options_ref = "%s %s" % (domid, list_ref)
        elif options_ref == "vm_uuid":
            logging.info("%s's uuid is: %s", vm_name, domuuid)
            options_ref = "%s %s" % (domuuid, list_ref)
        elif options_ref == "inactive":
            vm.destroy()
            options_ref = "--inactive %s" % list_ref
        elif options_ref == "vm_name":
            options_ref = "%s %s" % (vm_name, list_ref)
        elif options_ref == "all":
            options_ref = "--all %s" % list_ref
        elif options_ref == "":
            options_ref = "%s" % list_ref

        # Prepare libvirtd status
        if libvirtd == "off":
            utils_libvirtd.libvirtd_stop()

        if remote_ref == "remote":
            if remote_ip.count("EXAMPLE.COM") or local_ip.count("EXAMPLE.COM"):
                raise exceptions.TestSkipError(
                    "Remote test parameters unchanged from default")
            logging.info("Execute virsh command on remote host %s.", remote_ip)
            status, output = list_local_domains_on_remote(options_ref,
                                                          remote_ip,
                                                          remote_pwd,
                                                          local_ip,
                                                          remote_user,
                                                          local_user,
                                                          local_pwd)
            logging.info("Status:%s", status)
            logging.info("Output:\n%s", output)
        else:
            if vm_ref:
                options_ref = "%s --%s" % (options_ref, vm_ref)
            result = virsh.dom_list(
                options_ref, ignore_status=True, print_info=True)
            status = result.exit_status
            output = result.stdout.strip()

    except Exception as output:
        status = True
        logging.error("Exception: %s" % output)

    finally:
        # Recover libvirtd service status
        if libvirtd == "off":
            utils_libvirtd.libvirtd_start()

        # Recover of domain
        if vm_ref == "transient" or list_ref == "--title":
            vm_backup.sync()
        elif vm_ref == "managed-save":
            # Recover saved guest.
            virsh.managedsave_remove(vm_name, ignore_status=True,
                                     print_info=True)

        # Check result
        status_error = (status_error == "no") and \
                       (addition_status_error == "no")
        if vm_ref == "managed-save":
            saved_output = re.search(vm_name + "\s+saved", output)
            if saved_output:
                output = saved_output.group(0)
            else:
                output = ""

        if not status_error:
            if not status and re.search(result_expected, output):
                raise exceptions.TestFail("Run successful with wrong command!")
        else:
            if status:
                raise exceptions.TestFail("Run failed with right command.")
            if not re.search(result_expected, output):
                raise exceptions.TestFail("Run successful but result is not"
                                          " expected.")
Пример #38
0
def run(test, params, env):
    """
    Test rbd disk device.

    1.Prepare test environment,destroy or suspend a VM.
    2.Prepare disk image.
    3.Edit disks xml and start the domain.
    4.Perform test operation.
    5.Recover test environment.
    """
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    virsh_dargs = {'debug': True, 'ignore_status': True}
    # Global variable to store max/current memory,
    # it may change after attach/detach
    new_max_mem = None
    new_cur_mem = None

    def get_vm_memtotal(session):
        """
        Get guest total memory
        """
        proc_meminfo = session.cmd_output("cat /proc/meminfo")
        # verify format and units are expected
        return int(re.search(r'MemTotal:\s+(\d+)\s+[kK]B',
                             proc_meminfo).group(1))

    def consume_vm_mem(size=1000, timeout=360):
        """
        To consume guest memory, default size is 1000M
        """
        session = vm.wait_for_login()
        # Mount tmpfs on /mnt and write to a file on it,
        # it is the memory operation
        sh_cmd = ("swapoff -a; mount -t tmpfs -o size={0}M tmpfs "
                  "/mnt; dd if=/dev/urandom of=/mnt/test bs=1M"
                  " count={0}".format(size))
        session.cmd(sh_cmd, timeout=timeout)
        session.close()

    def check_qemu_cmd():
        """
        Check qemu command line options.
        """
        cmd = ("ps -ef | grep %s | grep -v grep " % vm_name)
        if max_mem_rt:
            cmd += (" | grep 'slots=%s,maxmem=%sk'"
                    % (max_mem_slots, max_mem_rt))
        if tg_size:
            size = int(tg_size) * 1024
            cmd += (" | grep 'memory-backend-ram,id=memdimm0,size=%s"
                    % size)
            if pg_size:
                cmd += ",host-nodes=%s" % node_mask
                if numa_memnode:
                    for node in numa_memnode:
                        if ('nodeset' in node and
                                node['nodeset'] in node_mask):
                            cmd += ",policy=%s" % node['mode']
                cmd += ".*pc-dimm,node=%s" % tg_node
            if mem_addr:
                cmd += (".*slot=%s,addr=%s" %
                        (mem_addr['slot'], int(mem_addr['base'], 16)))
            cmd += "'"
        # Run the command
        utils.run(cmd)

    def check_guest_meminfo(old_mem):
        """
        Check meminfo on guest.
        """
        assert old_mem is not None
        session = vm.wait_for_login()
        # Hot-plugged memory should be online by udev rules
        udev_file = "/lib/udev/rules.d/80-hotplug-cpu-mem.rules"
        udev_rules = ('SUBSYSTEM=="memory", ACTION=="add", TEST=="state",'
                      ' ATTR{state}=="offline", ATTR{state}="online"')
        cmd = ("grep memory %s || echo '%s' >> %s"
               % (udev_file, udev_rules, udev_file))
        session.cmd(cmd)
        # Wait a while for new memory to be detected.
        utils_misc.wait_for(
            lambda: get_vm_memtotal(session) != int(old_mem), 5)
        new_mem = get_vm_memtotal(session)
        session.close()
        logging.debug("Memtotal on guest: %s", new_mem)
        if new_mem != int(old_mem) + int(tg_size):
            raise error.TestFail("Total memory on guest couldn't"
                                 " changed after attach memory "
                                 "device")

    def check_dom_xml(at_mem=False, dt_mem=False):
        """
        Check domain xml options.
        """
        # Global variable to store max/current memory
        global new_max_mem
        global new_cur_mem
        if attach_option.count("config"):
            dom_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
        else:
            dom_xml = vm_xml.VMXML.new_from_dumpxml(vm_name)
        try:
            xml_max_mem_rt = int(dom_xml.max_mem_rt)
            xml_max_mem = int(dom_xml.max_mem)
            xml_cur_mem = int(dom_xml.current_mem)
            assert int(max_mem_rt) == xml_max_mem_rt

            # Check attached/detached memory
            if at_mem:
                assert int(max_mem) + int(tg_size) == xml_max_mem
                # Bug 1220702, skip the check for current memory
                #assert int(cur_mem) + int(tg_size) == xml_cur_mem
                new_max_mem = xml_max_mem
                new_cur_mem = xml_cur_mem
                mem_dev = dom_xml.get_devices("memory")
                if len(mem_dev) != 1:
                    raise error.TestFail("Found wrong number of"
                                         " memory device")
                assert int(tg_size) == int(mem_dev[0].target.size)
                assert int(tg_node) == int(mem_dev[0].target.node)
            elif dt_mem:
                assert int(new_max_mem) - int(tg_size) == xml_max_mem
                # Bug 1220702, skip the check for current memory
                #assert int(new_cur_mem) - int(tg_size) == xml_cur_mem
        except AssertionError:
            utils.log_last_traceback()
            raise error.TestFail("Found unmatched memory setting"
                                 " from domain xml")

    def check_save_restore():
        """
        Test save and restore operation
        """
        save_file = os.path.join(test.tmpdir,
                                 "%s.save" % vm_name)
        ret = virsh.save(vm_name, save_file, **virsh_dargs)
        libvirt.check_exit_status(ret)
        ret = virsh.restore(save_file, **virsh_dargs)
        libvirt.check_exit_status(ret)
        if os.path.exists(save_file):
            os.remove(save_file)
        # Login to check vm status
        vm.wait_for_login().close()

    def create_mem_xml():
        """
        Create memory device xml.
        """
        mem_xml = memory.Memory()
        mem_model = params.get("mem_model", "dimm")
        mem_xml.mem_model = mem_model
        if tg_size:
            tg_xml = memory.Memory.Target()
            tg_xml.size = int(tg_size)
            tg_xml.size_unit = tg_sizeunit
            tg_xml.node = int(tg_node)
            mem_xml.target = tg_xml
        if pg_size:
            src_xml = memory.Memory.Source()
            src_xml.pagesize = int(pg_size)
            src_xml.pagesize_unit = pg_unit
            src_xml.nodemask = node_mask
            mem_xml.source = src_xml
        if mem_addr:
            mem_xml.address = mem_xml.new_mem_address(
                **{"attrs": mem_addr})

        logging.debug("Memory device xml: %s", mem_xml)
        return mem_xml.copy()

    def add_device(dev_xml, at_error=False):
        """
        Add memory device by attachment or modify domain xml.
        """
        if attach_device:
            ret = virsh.attach_device(vm_name, dev_xml.xml,
                                      flagstr=attach_option)
            libvirt.check_exit_status(ret, at_error)
        else:
            vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm.name)
            if numa_cells:
                del vmxml.max_mem
                del vmxml.current_mem
            vmxml.add_device(dev_xml)
            vmxml.sync()

    def modify_domain_xml():
        """
        Modify domain xml and define it.
        """
        vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm.name)
        mem_unit = params.get("mem_unit", "KiB")
        vcpu = params.get("vcpu", "4")
        if max_mem_rt:
            vmxml.max_mem_rt = int(max_mem_rt)
            vmxml.max_mem_rt_slots = max_mem_slots
            vmxml.max_mem_rt_unit = mem_unit
        if vcpu:
            vmxml.vcpu = int(vcpu)
            vcpu_placement = params.get("vcpu_placement", "static")
            vmxml.placement = vcpu_placement
        if numa_memnode:
            vmxml.numa_memory = {}
            vmxml.numa_memnode = numa_memnode
        else:
            try:
                del vmxml.numa_memory
                del vmxml.numa_memnode
            except:
                # Not exists
                pass

        if numa_cells:
            cells = [ast.literal_eval(x) for x in numa_cells]
            cpu_xml = vm_xml.VMCPUXML()
            cpu_xml.xml = "<cpu><numa/></cpu>"
            cpu_mode = params.get("cpu_mode")
            model_fallback = params.get("model_fallback")
            if cpu_mode:
                cpu_xml.mode = cpu_mode
            if model_fallback:
                cpu_xml.fallback = model_fallback
            cpu_xml.numa_cell = cells
            vmxml.cpu = cpu_xml
            # Delete memory and currentMemory tag,
            # libvirt will fill it automatically
            del vmxml.max_mem
            del vmxml.current_mem

        # hugepages setting
        if huge_pages:
            membacking = vm_xml.VMMemBackingXML()
            hugepages = vm_xml.VMHugepagesXML()
            pagexml_list = []
            for i in range(len(huge_pages)):
                pagexml = hugepages.PageXML()
                pagexml.update(huge_pages[i])
                pagexml_list.append(pagexml)
            hugepages.pages = pagexml_list
            membacking.hugepages = hugepages
            vmxml.mb = membacking

        logging.debug("vm xml: %s", vmxml)
        vmxml.sync()

    pre_vm_state = params.get("pre_vm_state", "running")
    attach_device = "yes" == params.get("attach_device", "no")
    detach_device = "yes" == params.get("detach_device", "no")
    attach_error = "yes" == params.get("attach_error", "no")
    start_error = "yes" == params.get("start_error", "no")
    detach_error = "yes" == params.get("detach_error", "no")
    maxmem_error = "yes" == params.get("maxmem_error", "no")
    attach_option = params.get("attach_option", "")
    test_qemu_cmd = "yes" == params.get("test_qemu_cmd", "no")
    test_managedsave = "yes" == params.get("test_managedsave", "no")
    test_save_restore = "yes" == params.get("test_save_restore", "no")
    test_mem_binding = "yes" == params.get("test_mem_binding", "no")
    restart_libvirtd = "yes" == params.get("restart_libvirtd", "no")
    add_mem_device = "yes" == params.get("add_mem_device", "no")
    test_dom_xml = "yes" == params.get("test_dom_xml", "no")
    max_mem = params.get("max_mem")
    max_mem_rt = params.get("max_mem_rt")
    max_mem_slots = params.get("max_mem_slots", "16")
    #cur_mem = params.get("current_mem")
    numa_cells = params.get("numa_cells", "").split()
    set_max_mem = params.get("set_max_mem")

    # params for attached device
    tg_size = params.get("tg_size")
    tg_sizeunit = params.get("tg_sizeunit", 'KiB')
    tg_node = params.get("tg_node", 0)
    pg_size = params.get("page_size")
    pg_unit = params.get("page_unit", "KiB")
    node_mask = params.get("node_mask", "0")
    mem_addr = ast.literal_eval(params.get("memory_addr", "{}"))
    huge_pages = [ast.literal_eval(x)
                  for x in params.get("huge_pages", "").split()]
    numa_memnode = [ast.literal_eval(x)
                    for x in params.get("numa_memnode", "").split()]

    # Back up xml file.
    vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)

    try:
        # Drop caches first for host has enough memory
        drop_caches()
        # Destroy domain first
        if vm.is_alive():
            vm.destroy(gracefully=False)
        modify_domain_xml()

        # Start the domain any way if attach memory device
        old_mem_total = None
        if attach_device:
            vm.start()
            session = vm.wait_for_login()
            old_mem_total = get_vm_memtotal(session)
            logging.debug("Memtotal on guest: %s", old_mem_total)
            session.close()
        dev_xml = None

        # To attach the memory device.
        if add_mem_device:
            at_times = int(params.get("attach_times", 1))
            dev_xml = create_mem_xml()
            for x in xrange(at_times):
                # If any error excepted, command error status should be
                # checked in the last time
                if x == at_times - 1:
                    add_device(dev_xml, attach_error)
                else:
                    add_device(dev_xml)

        # Check domain xml after attach device.
        if test_dom_xml:
            check_dom_xml(at_mem=attach_device)

        # Set domain state
        if pre_vm_state == "transient":
            logging.info("Creating %s...", vm_name)
            vmxml_for_test = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
            if vm.is_alive():
                vm.destroy(gracefully=False)
            vm.undefine()
            if virsh.create(vmxml_for_test.xml,
                            **virsh_dargs).exit_status:
                vmxml_backup.define()
                raise error.TestFail("Cann't create the domain")
        elif vm.is_dead():
            try:
                vm.start()
                vm.wait_for_login().close()
            except virt_vm.VMStartError:
                if start_error:
                    pass
                else:
                    raise error.TestFail("VM Failed to start"
                                         " for some reason!")

        # Set memory operation
        if set_max_mem:
            max_mem_option = params.get("max_mem_option", "")
            ret = virsh.setmaxmem(vm_name, set_max_mem,
                                  flagstr=max_mem_option)
            libvirt.check_exit_status(ret, maxmem_error)

        # Check domain xml after start the domain.
        if test_dom_xml:
            check_dom_xml(at_mem=attach_device)

        # Check qemu command line
        if test_qemu_cmd:
            check_qemu_cmd()

        # Check guest meminfo after attachment
        if (attach_device and not attach_option.count("config") and
                not any([attach_error, start_error])):
            check_guest_meminfo(old_mem_total)

        # Consuming memory on guest,
        # to verify memory changes by numastat
        if test_mem_binding:
            pid = vm.get_pid()
            old_numastat = read_from_numastat(pid, "Total")
            logging.debug("Numastat: %s", old_numastat)
            consume_vm_mem()
            new_numastat = read_from_numastat(pid, "Total")
            logging.debug("Numastat: %s", new_numastat)
            # Only check total memory which is the last element
            if float(new_numastat[-1]) - float(old_numastat[-1]) < 0:
                raise error.TestFail("Numa memory can't be consumed"
                                     " on guest")

        # Run managedsave command to check domain xml.
        if test_managedsave:
            ret = virsh.managedsave(vm_name, **virsh_dargs)
            libvirt.check_exit_status(ret)
            vm.start()
            vm.wait_for_login().close()
            if test_dom_xml:
                check_dom_xml(at_mem=attach_device)

        # Run save and restore command to check domain xml
        if test_save_restore:
            check_save_restore()
            if test_dom_xml:
                check_dom_xml(at_mem=attach_device)

        # Check domain xml after restarting libvirtd
        if restart_libvirtd:
            libvirtd = utils_libvirtd.Libvirtd()
            libvirtd.restart()
            if test_dom_xml:
                check_dom_xml(at_mem=attach_device)

        # Detach the memory device
        if detach_device:
            if not dev_xml:
                dev_xml = create_mem_xml()
            ret = virsh.detach_device(vm_name, dev_xml.xml,
                                      flagstr=attach_option)
            libvirt.check_exit_status(ret, detach_error)
            if test_dom_xml:
                check_dom_xml(dt_mem=detach_device)

    finally:
        # Delete snapshots.
        snapshot_lists = virsh.snapshot_list(vm_name)
        if len(snapshot_lists) > 0:
            libvirt.clean_up_snapshots(vm_name, snapshot_lists)
            for snap in snapshot_lists:
                virsh.snapshot_delete(vm_name, snap, "--metadata")

        # Recover VM.
        if vm.is_alive():
            vm.destroy(gracefully=False)
        logging.info("Restoring vm...")
        vmxml_backup.sync()
Пример #39
0
def run(test, params, env):
    """
    Test command: virsh start.

    1) Get the params from params.
    2) Prepare libvirtd's status.
    3) Do the start operation.
    4) Result check.
    5) clean up.
    """
    # get the params from params
    vm_name = params.get("main_vm", "virt-tests-vm1")
    vm_ref = params.get("vm_ref", "vm1")
    opt = params.get("vs_opt", "")

    # Backup for recovery.
    vmxml_backup = libvirt_xml.vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)

    backup_name = vm_ref
    vm = None
    if vm_ref is not "":
        vm = env.get_vm(vm_ref)
    vmxml = libvirt_xml.VMXML()

    libvirtd_state = params.get("libvirtd", "on")
    pre_operation = params.get("vs_pre_operation", "")
    status_error = params.get("status_error", "no")

    # get the params for remote test
    remote_ip = params.get("remote_ip", "ENTER.YOUR.REMOTE.IP")
    remote_pwd = params.get("remote_pwd", "ENTER.YOUR.REMOTE.PASSWORD")
    local_ip = params.get("local_ip", "ENTER.YOUR.LOCAL.IP")
    if pre_operation == "remote" and (remote_ip.count("ENTER.YOUR.") or
                                      local_ip.count("ENTER.YOUR.")):
        raise error.TestNAError("Remote test parameters not configured")

    try:
        # prepare before start vm
        if libvirtd_state == "on":
            utils_libvirtd.libvirtd_start()
        elif libvirtd_state == "off":
            utils_libvirtd.libvirtd_stop()

        if pre_operation == "rename":
            new_vm_name = params.get("vs_new_vm_name", "virsh_start_vm1")
            vm = libvirt_xml.VMXML.vm_rename(vm, new_vm_name)
            vm_ref = new_vm_name
        elif pre_operation == "undefine":
            vmxml = vmxml.new_from_dumpxml(vm_ref)
            vmxml.undefine()

        # do the start operation
        try:
            if pre_operation == "remote":
                # get remote session
                session = remote.wait_for_login("ssh", remote_ip, "22", "root",
                                                remote_pwd, "#")
                # get uri of local
                uri = libvirt_vm.complete_uri(local_ip)

                cmd = "virsh -c %s start %s" % (uri, vm_ref)
                status, output = session.cmd_status_output(cmd)
                if status:
                    raise error.TestError(vm_ref, output)
            elif opt.count("console"):
                # With --console, start command will print the
                # dmesg of guest in starting and turn into the
                # login prompt. In this case, we start it with
                # --console and login vm in console by
                # remote.handle_prompts().
                cmd = "start %s --console" % vm_ref
                virsh_session = virsh.VirshSession(virsh_exec=virsh.VIRSH_EXEC, auto_close=True)
                virsh_session.sendline(cmd)
                remote.handle_prompts(virsh_session, params.get("username", ""),
                                      params.get("password", ""), r"[\#\$]\s*$",
                                      timeout=60, debug=True)
            elif opt.count("autodestroy"):
                # With --autodestroy, vm will be destroyed when
                # virsh session closed. Then we execute start
                # command in a virsh session and start vm with
                # --autodestroy. Then we closed the virsh session,
                # and check the vm is destroyed or not.
                virsh_session = virsh.VirshSession(virsh_exec=virsh.VIRSH_EXEC, auto_close=True)
                cmd = "start %s --autodestroy" % vm_ref
                status = virsh_session.cmd_status(cmd)
                if status:
                    raise error.TestFail("Failed to start vm with --autodestroy.")
                # Close the session, then the vm shoud be destroyed.
                virsh_session.close()
            elif opt.count("force-boot"):
                # With --force-boot, VM will be stared from boot
                # even we have saved it with virsh managedsave.
                # In this case, we start vm and execute sleep 1000&,
                # then save it with virsh managedsave. At last, we
                # start vm with --force-boot. To verify the result,
                # we check the sleep process. If the process exists,
                # force-boot failed, else case pass.
                vm.start()
                session = vm.wait_for_login()
                status = session.cmd_status("sleep 1000&")
                if status:
                    raise error.TestError("Can not execute command in guest.")
                sleep_pid = session.cmd_output("echo $!").strip()
                virsh.managedsave(vm_ref)
                virsh.start(vm_ref, options=opt)
            else:
                cmd_result = virsh.start(vm_ref, options=opt)
                if cmd_result.exit_status:
                    if status_error == "no":
                        raise error.TestFail("Start vm failed.\n Detail: %s"
                                             % cmd_result)
                else:
                    # start vm successfully
                    if status_error == "yes":
                        raise error.TestFail("Run successfully with wrong "
                                             "command!\n Detail:%s"
                                             % cmd_result)

            if opt.count("paused"):
                if not (vm.state() == "paused"):
                    raise error.TestFail("VM is not paused when started with "
                                         "--paused.")
            elif opt.count("autodestroy"):
                if vm.is_alive():
                    raise error.TestFail("VM was started with --autodestroy,"
                                         "but not destroyed when virsh session "
                                         "closed.")
            elif opt.count("force-boot"):
                session = vm.wait_for_login()
                status = session.cmd_status("ps %s |grep '[s]leep 1000'"
                                            % sleep_pid)
                if not status:
                    raise error.TestFail("VM was started with --force-boot,"
                                         "but it is restored from a"
                                         " managedsave.")
            else:
                if status_error == "no" and not vm.is_alive():
                    raise error.TestFail("VM was started but it is not alive.")

        except remote.LoginError, detail:
            raise error.TestFail("Failed to login guest.")
    finally:
        # clean up
        if libvirtd_state == "off":
            utils_libvirtd.libvirtd_start()

        elif pre_operation == "rename":
            libvirt_xml.VMXML.vm_rename(vm, backup_name)

        if vm and vm.is_paused():
            vm.resume()

        # Restore VM
        vmxml_backup.sync()
Пример #40
0
                                  "0", flags), flags)
            # Wait for VM in running state
            wait_for_state("running")
        elif test_libvirt_guests:
            logging.debug("libvirt-guests status: %s", libvirt_guests.status())
            if multi_guests:
                check_multi_guests(multi_guests,
                                   start_delay, libvirt_guests)

            if check_flags:
                check_guest_flags(bash_cmd, flags)

        else:
            # Ensure VM is running
            utils_misc.wait_for(lambda: vm.state() == "running", 10)
            ret = virsh.managedsave(vm_ref, options=option, ignore_status=True)
            status = ret.exit_status
            # The progress information outputed in error message
            error_msg = ret.stderr.strip()
            if move_saved_file:
                cmd = "echo > %s" % managed_save_file
                utils.run(cmd)

            # recover libvirtd service start
            if libvirtd_state == "off":
                libvirtd.start()

            if status_error:
                if not status:
                    raise error.TestFail("Run successfully with wrong command!")
            else:
Пример #41
0
    def set_condition(vm_name, condn, reset=False, guestbt=None):
        """
        Set domain to given state or reset it.
        """
        bt = None
        if not reset:
            if condn == "avocadotest":
                bt = utils_test.run_avocado_bg(vm, params, test)
                if not bt:
                    test.cancel("guest stress failed to start")
                # Allow stress to start
                time.sleep(condn_sleep_sec)
                return bt
            elif condn == "stress":
                utils_test.load_stress("stress_in_vms", params=params, vms=[vm])
            elif condn in ["save", "managedsave"]:
                # No action
                pass
            elif condn == "suspend":
                result = virsh.suspend(vm_name, ignore_status=True, debug=True)
                libvirt.check_exit_status(result)
            elif condn == "hotplug":
                result = virsh.setvcpus(vm_name, max_vcpu, "--live",
                                        ignore_status=True, debug=True)
                libvirt.check_exit_status(result)
                exp_vcpu = {'max_config': max_vcpu, 'max_live': max_vcpu,
                            'cur_config': current_vcpu, 'cur_live': max_vcpu,
                            'guest_live': max_vcpu}
                result = utils_hotplug.check_vcpu_value(vm, exp_vcpu,
                                                        option="--live")
            elif condn == "host_smt":
                if cpu.get_cpu_arch() == 'power9':
                    result = process.run("ppc64_cpu --smt=4", shell=True)
                else:
                    test.cancel("Host SMT changes not allowed during guest live")
            else:
                logging.debug("No operation for the domain")

        else:
            if condn == "save":
                save_file = os.path.join(data_dir.get_tmp_dir(), vm_name + ".save")
                result = virsh.save(vm_name, save_file,
                                    ignore_status=True, debug=True)
                libvirt.check_exit_status(result)
                time.sleep(condn_sleep_sec)
                if os.path.exists(save_file):
                    result = virsh.restore(save_file, ignore_status=True,
                                           debug=True)
                    libvirt.check_exit_status(result)
                    os.remove(save_file)
                else:
                    test.error("No save file for domain restore")
            elif condn == "managedsave":
                result = virsh.managedsave(vm_name,
                                           ignore_status=True, debug=True)
                libvirt.check_exit_status(result)
                time.sleep(condn_sleep_sec)
                result = virsh.start(vm_name, ignore_status=True, debug=True)
                libvirt.check_exit_status(result)
            elif condn == "suspend":
                result = virsh.resume(vm_name, ignore_status=True, debug=True)
                libvirt.check_exit_status(result)
            elif condn == "avocadotest":
                guestbt.join(ignore_status=True)
            elif condn == "stress":
                utils_test.unload_stress("stress_in_vms", params=params, vms=[vm])
            elif condn == "hotplug":
                result = virsh.setvcpus(vm_name, current_vcpu, "--live",
                                        ignore_status=True, debug=True)
                libvirt.check_exit_status(result)
                exp_vcpu = {'max_config': max_vcpu, 'max_live': current_vcpu,
                            'cur_config': current_vcpu, 'cur_live': current_vcpu,
                            'guest_live': current_vcpu}
                result = utils_hotplug.check_vcpu_value(vm, exp_vcpu,
                                                        option="--live")
            elif condn == "host_smt":
                result = process.run("ppc64_cpu --smt=2", shell=True)
                # Change back the host smt
                result = process.run("ppc64_cpu --smt=4", shell=True)
                # Work around due to known cgroup issue after cpu hot(un)plug
                # sequence
                root_cpuset_path = utils_cgroup.get_cgroup_mountpoint("cpuset")
                machine_cpuset_paths = []
                if os.path.isdir(os.path.join(root_cpuset_path,
                                              "machine.slice")):
                    machine_cpuset_paths.append(os.path.join(root_cpuset_path,
                                                             "machine.slice"))
                if os.path.isdir(os.path.join(root_cpuset_path, "machine")):
                    machine_cpuset_paths.append(os.path.join(root_cpuset_path,
                                                             "machine"))
                if not machine_cpuset_paths:
                    logging.warning("cgroup cpuset might not recover properly "
                                    "for guests after host smt changes, "
                                    "restore it manually")
                root_cpuset_cpus = os.path.join(root_cpuset_path, "cpuset.cpus")
                for path in machine_cpuset_paths:
                    machine_cpuset_cpus = os.path.join(path, "cpuset.cpus")
                    # check if file content differs
                    cmd = "diff %s %s" % (root_cpuset_cpus,
                                          machine_cpuset_cpus)
                    if process.system(cmd, verbose=True, ignore_status=True):
                        cmd = "cp %s %s" % (root_cpuset_cpus,
                                            machine_cpuset_cpus)
                        process.system(cmd, verbose=True)

            else:
                logging.debug("No need recover the domain")
        return bt
Пример #42
0
def run_virsh_managedsave(test, params, env):
    """
    Test command: virsh managedsave.

    This command can save and destroy a
    running domain, so it can be restarted
    from the same state at a later time.
    """

    vm_name = params.get("main_vm")
    vm = env.get_vm(params["main_vm"])

    #define function
    def vm_recover_check(guest_name):
        """
        Check if the vm can be recovered correctly.

        @param: guest_name : Checked vm's name.
        """
        ret = virsh.dom_list()
        #This time vm should not be in the list
        if re.search(guest_name, ret.stdout):
            raise error.TestFail("virsh list output invalid")
        virsh.start(guest_name)
        if params.get("paused_after_start_vm") == "yes":
            virsh.resume(guest_name)
        #This time vm should be in the list
        ret = virsh.dom_list()
        if  not re.search(guest_name, ret.stdout):
            raise error.TestFail("virsh list output invalid")

    domid = vm.get_id()
    domuuid = vm.get_uuid()

    libvirtd = params.get("managedsave_libvirtd","on")

    #run test case
    vm_ref = params.get("managedsave_vm_ref")
    if vm_ref == "id":
        vm_ref = domid
    elif vm_ref == "uuid":
        vm_ref = domuuid
    elif vm_ref == "hex_id":
        vm_ref = hex(int(domid))
    elif vm_ref == "managedsave_invalid_id" or\
         vm_ref == "managedsave_invalid_uuid":
        vm_ref = params.get(vm_ref)
    elif vm_ref == "name" or vm_ref == "extra_parame":
        vm_ref = "%s %s" % (vm_name, params.get("managedsave_extra_parame"))

    #stop the libvirtd service
    if libvirtd == "off":
        libvirt_vm.libvirtd_stop()

    #Ignore exception with "ignore_status=True"
    ret = virsh.managedsave(vm_ref, ignore_status=True)
    status = ret.exit_status

    #recover libvirtd service start
    if libvirtd == "off":
        libvirt_vm.libvirtd_start()

    #check status_error
    status_error = params.get("status_error")
    if status_error == "yes":
        if status == 0:
            if not virsh.has_command_help_match('managedsave', r'\s+--running\s+'):
                # Older libvirt does not have --running parameter
                raise error.TestNAError("Older libvirt does not handle arguments consistently")
            else:
                raise error.TestFail("Run successfully with wrong command!")
    elif status_error == "no":
        if status != 0:
            raise error.TestFail("Run failed with right command")
        vm_recover_check(vm_name)
Пример #43
0
 def managedsave_restore():
     result = virsh.managedsave(vm_name, debug=True)
     utils_test.libvirt.check_exit_status(result, expect_error=False)
     result = virsh.start(vm_name)
     utils_test.libvirt.check_exit_status(result, expect_error=False)
Пример #44
0
def run(test, params, env):
    """
    Test virsh undefine command.

    Undefine an inactive domain, or convert persistent to transient.
    1.Prepare test environment.
    2.Backup the VM's information to a xml file.
    3.When the libvirtd == "off", stop the libvirtd service.
    4.Perform virsh undefine operation.
    5.Recover test environment.(libvirts service,VM)
    6.Confirm the test result.
    """

    vm_ref = params.get("undefine_vm_ref", "vm_name")
    extra = params.get("undefine_extra", "")
    option = params.get("undefine_option", "")
    libvirtd_state = params.get("libvirtd", "on")
    status_error = ("yes" == params.get("status_error", "no"))
    undefine_twice = ("yes" == params.get("undefine_twice", 'no'))
    local_ip = params.get("local_ip", "LOCAL.EXAMPLE.COM")
    local_pwd = params.get("local_pwd", "password")
    remote_ip = params.get("remote_ip", "REMOTE.EXAMPLE.COM")
    remote_user = params.get("remote_user", "user")
    remote_pwd = params.get("remote_pwd", "password")
    remote_prompt = params.get("remote_prompt", "#")
    pool_type = params.get("pool_type")
    pool_name = params.get("pool_name", "test")
    pool_target = params.get("pool_target")
    volume_size = params.get("volume_size", "1G")
    vol_name = params.get("vol_name", "test_vol")
    emulated_img = params.get("emulated_img", "emulated_img")
    emulated_size = "%sG" % (int(volume_size[:-1]) + 1)
    disk_target = params.get("disk_target", "vdb")
    wipe_data = "yes" == params.get("wipe_data", "no")
    if wipe_data:
        option += " --wipe-storage"
    nvram_o = None
    if platform.machine() == 'aarch64':
        nvram_o = " --nvram"
        option += nvram_o

    vm_name = params.get("main_vm", "avocado-vt-vm1")
    vm = env.get_vm(vm_name)
    vm_id = vm.get_id()
    vm_uuid = vm.get_uuid()

    # polkit acl related params
    uri = params.get("virsh_uri")
    unprivileged_user = params.get('unprivileged_user')
    if unprivileged_user:
        if unprivileged_user.count('EXAMPLE'):
            unprivileged_user = '******'

    if not libvirt_version.version_compare(1, 1, 1):
        if params.get('setup_libvirt_polkit') == 'yes':
            test.cancel("API acl test not supported in current"
                        " libvirt version.")

    # Back up xml file.Xen host has no guest xml file to define a guset.
    backup_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)

    # Confirm how to reference a VM.
    if vm_ref == "vm_name":
        vm_ref = vm_name
    elif vm_ref == "id":
        vm_ref = vm_id
    elif vm_ref == "hex_vm_id":
        vm_ref = hex(int(vm_id))
    elif vm_ref == "uuid":
        vm_ref = vm_uuid
    elif vm_ref.find("invalid") != -1:
        vm_ref = params.get(vm_ref)

    volume = None
    pvtest = None
    status3 = None

    elems = backup_xml.xmltreefile.findall('/devices/disk/source')
    existing_images = [elem.get('file') for elem in elems]

    # Backup images since remove-all-storage could remove existing libvirt
    # managed guest images
    if existing_images and option.count("remove-all-storage"):
        for img in existing_images:
            backup_img = img + '.bak'
            logging.info('Backup %s to %s', img, backup_img)
            shutil.copyfile(img, backup_img)

    try:
        save_file = "/var/lib/libvirt/qemu/save/%s.save" % vm_name
        if option.count("managedsave") and vm.is_alive():
            virsh.managedsave(vm_name)

        if not vm.is_lxc():
            snp_list = virsh.snapshot_list(vm_name)
            if option.count("snapshot"):
                snp_file_list = []
                if not len(snp_list):
                    virsh.snapshot_create(vm_name)
                    logging.debug("Create a snapshot for test!")
                else:
                    # Backup snapshots for domain
                    for snp_item in snp_list:
                        tmp_file = os.path.join(data_dir.get_tmp_dir(), snp_item + ".xml")
                        virsh.snapshot_dumpxml(vm_name, snp_item, to_file=tmp_file)
                        snp_file_list.append(tmp_file)
            else:
                if len(snp_list):
                    test.cancel("This domain has snapshot(s), "
                                "cannot be undefined!")
        if option.count("remove-all-storage"):
            pvtest = utlv.PoolVolumeTest(test, params)
            pvtest.pre_pool(pool_name, pool_type, pool_target, emulated_img,
                            emulated_size=emulated_size)
            new_pool = libvirt_storage.PoolVolume(pool_name)
            if not new_pool.create_volume(vol_name, volume_size):
                test.fail("Creation of volume %s failed." % vol_name)
            volumes = new_pool.list_volumes()
            volume = volumes[vol_name]
            virsh.attach_disk(vm_name, volume, disk_target, "--config")

        # Turn libvirtd into certain state.
        if libvirtd_state == "off":
            utils_libvirtd.libvirtd_stop()

        # Test virsh undefine command.
        output = ""
        if vm_ref != "remote":
            vm_ref = "%s %s" % (vm_ref, extra)
            cmdresult = virsh.undefine(vm_ref, option,
                                       unprivileged_user=unprivileged_user,
                                       uri=uri,
                                       ignore_status=True, debug=True)
            status = cmdresult.exit_status
            output = cmdresult.stdout.strip()
            if status:
                logging.debug("Error status, command output: %s",
                              cmdresult.stderr.strip())
            if undefine_twice:
                status2 = virsh.undefine(vm_ref, nvram_o,
                                         ignore_status=True).exit_status
        else:
            if remote_ip.count("EXAMPLE.COM") or local_ip.count("EXAMPLE.COM"):
                test.cancel("remote_ip and/or local_ip parameters"
                            " not changed from default values")
            try:
                local_user = params.get("username", "root")
                uri = libvirt_vm.complete_uri(local_ip)
                # setup ssh auto login from remote machine to test machine
                # for the command to execute remotely
                ssh_key.setup_remote_ssh_key(remote_ip, remote_user,
                                             remote_pwd, hostname2=local_ip,
                                             user2=local_user,
                                             password2=local_pwd)
                session = remote.remote_login("ssh", remote_ip, "22",
                                              remote_user, remote_pwd,
                                              remote_prompt)
                cmd_undefine = "virsh -c %s undefine %s" % (uri, vm_name)
                status, output = session.cmd_status_output(cmd_undefine)
                logging.info("Undefine output: %s", output)
            except (process.CmdError, remote.LoginError, aexpect.ShellError) as de:
                logging.error("Detail: %s", de)
                status = 1

        # Recover libvirtd state.
        if libvirtd_state == "off":
            utils_libvirtd.libvirtd_start()

        # Shutdown VM.
        if virsh.domain_exists(vm.name):
            try:
                if vm.is_alive():
                    vm.destroy(gracefully=False)
            except process.CmdError as detail:
                logging.error("Detail: %s", detail)

        # After vm.destroy, virsh.domain_exists returns True due to
        # timing issue and tests fails.
        time.sleep(2)
        # Check if VM exists.
        vm_exist = virsh.domain_exists(vm_name)

        # Check if xml file exists.
        xml_exist = False
        if vm.is_qemu() and os.path.exists("/etc/libvirt/qemu/%s.xml" % vm_name):
            xml_exist = True
        if vm.is_lxc() and os.path.exists("/etc/libvirt/lxc/%s.xml" % vm_name):
            xml_exist = True
        if vm.is_xen() and os.path.exists("/etc/xen/%s" % vm_name):
            xml_exist = True

        # Check if save file exists if use --managed-save
        save_exist = os.path.exists(save_file)

        # Check if save file exists if use --managed-save
        volume_exist = volume and os.path.exists(volume)

        # Test define with acl control and recover domain.
        if params.get('setup_libvirt_polkit') == 'yes':
            if virsh.domain_exists(vm.name):
                virsh.undefine(vm_ref, nvram_o, ignore_status=True)
            cmd = "chmod 666 %s" % backup_xml.xml
            process.run(cmd, ignore_status=False, shell=True)
            s_define = virsh.define(backup_xml.xml,
                                    unprivileged_user=unprivileged_user,
                                    uri=uri, ignore_status=True, debug=True)
            status3 = s_define.exit_status

    finally:
        # Recover main VM.
        try:
            backup_xml.sync()
        except LibvirtXMLError:
            # sync() tries to undefines and define the xml to sync
            # but virsh_undefine test would have undefined already
            # may lead to error out
            backup_xml.define()

        # Recover existing guest images
        if existing_images and option.count("remove-all-storage"):
            for img in existing_images:
                backup_img = img + '.bak'
                logging.info('Recover image %s to %s', backup_img, img)
                shutil.move(backup_img, img)

        # Clean up pool
        if pvtest:
            pvtest.cleanup_pool(pool_name, pool_type,
                                pool_target, emulated_img)
        # Recover VM snapshots.
        if option.count("snapshot") and (not vm.is_lxc()):
            logging.debug("Recover snapshots for domain!")
            for file_item in snp_file_list:
                virsh.snapshot_create(vm_name, file_item)

    # Check results.
    if status_error:
        if not status:
            test.fail("virsh undefine return unexpected result.")
        if params.get('setup_libvirt_polkit') == 'yes':
            if status3 == 0:
                test.fail("virsh define with false acl permission" +
                          " should failed.")
    else:
        if status:
            test.fail("virsh undefine failed.")
        if undefine_twice:
            if not status2:
                test.fail("Undefine the same VM twice succeeded.")
        if vm_exist:
            test.fail("VM still exists after undefine.")
        if xml_exist:
            test.fail("Xml file still exists after undefine.")
        if option.count("managedsave") and save_exist:
            test.fail("Save file still exists after undefine.")
        if option.count("remove-all-storage") and volume_exist:
            test.fail("Volume file '%s' still exists after"
                      " undefine." % volume)
        if wipe_data and option.count("remove-all-storage"):
            if not output.count("Wiping volume '%s'" % disk_target):
                test.fail("Command didn't wipe volume storage!")
        if params.get('setup_libvirt_polkit') == 'yes':
            if status3:
                test.fail("virsh define with right acl permission" +
                          " should succeeded")
Пример #45
0
def run(test, params, env):
    """
    Test memory management of nvdimm
    """
    vm_name = params.get('main_vm')

    nvdimm_file = params.get('nvdimm_file')
    check = params.get('check', '')
    status_error = "yes" == params.get('status_error', 'no')
    error_msg = params.get('error_msg', '')
    qemu_checks = params.get('qemu_checks', '').split('`')
    wait_sec = int(params.get('wait_sec', 5))
    test_str = 'This is a test'

    def check_boot_config(session):
        """
        Check /boot/config-$KVER file
        """
        check_list = [
            'CONFIG_LIBNVDIMM=m', 'CONFIG_BLK_DEV_PMEM=m', 'CONFIG_ACPI_NFIT=m'
        ]
        current_boot = session.cmd('uname -r').strip()
        content = session.cmd('cat /boot/config-%s' % current_boot).strip()
        for item in check_list:
            if item in content:
                logging.info(item)
            else:
                logging.error(item)
                test.fail('/boot/config content not correct')

    def check_file_in_vm(session, path, expect=True):
        """
        Check whether the existence of file meets expectation
        """
        exist = session.cmd_status('ls %s' % path)
        logging.debug(exist)
        exist = True if exist == 0 else False
        status = '' if exist else 'NOT'
        logging.info('File %s does %s exist', path, status)
        if exist != expect:
            err_msg = 'Existance doesn\'t meet expectation: %s ' % path
            if expect:
                err_msg += 'should exist.'
            else:
                err_msg += 'should not exist'
            test.fail(err_msg)

    def create_cpuxml():
        """
        Create cpu xml for test
        """
        cpu_params = {
            k: v
            for k, v in params.items() if k.startswith('cpuxml_')
        }
        logging.debug(cpu_params)
        cpu_xml = vm_xml.VMCPUXML()
        cpu_xml.xml = "<cpu><numa/></cpu>"
        if 'cpuxml_numa_cell' in cpu_params:
            cpu_params['cpuxml_numa_cell'] = cpu_xml.dicts_to_cells(
                eval(cpu_params['cpuxml_numa_cell']))
        for attr_key in cpu_params:
            val = cpu_params[attr_key]
            logging.debug('Set cpu params')
            setattr(cpu_xml, attr_key.replace('cpuxml_', ''),
                    eval(val) if ':' in val else val)
        logging.debug(cpu_xml)
        return cpu_xml.copy()

    def create_nvdimm_xml(**mem_param):
        """
        Create xml of nvdimm memory device
        """
        mem_xml = utils_hotplug.create_mem_xml(
            tg_size=mem_param['target_size'],
            mem_addr={'slot': mem_param['address_slot']},
            tg_sizeunit=mem_param['target_size_unit'],
            tg_node=mem_param['target_node'],
            mem_discard=mem_param.get('discard'),
            mem_model="nvdimm",
            lb_size=mem_param.get('label_size'),
            lb_sizeunit=mem_param.get('label_size_unit'),
            mem_access=mem_param['mem_access'],
            uuid=mem_param.get('uuid'))

        source_xml = memory.Memory.Source()
        source_xml.path = mem_param['source_path']
        mem_xml.source = source_xml
        logging.debug(mem_xml)

        return mem_xml.copy()

    def check_nvdimm_file(file_name):
        """
        check if the file exists in nvdimm memory device

        :param file_name: the file name in nvdimm device
        """
        vm_session = vm.wait_for_login()
        if test_str not in vm_session.cmd('cat /mnt/%s ' % file_name):
            test.fail('"%s" should be in output' % test_str)

    bkxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)

    IS_PPC_TEST = 'ppc64le' in platform.machine().lower()
    if IS_PPC_TEST:
        if not libvirt_version.version_compare(6, 2, 0):
            test.cancel('Libvirt version should be > 6.2.0'
                        ' to support nvdimm on pseries')

    try:
        vm = env.get_vm(vm_name)
        # Create nvdimm file on the host
        process.run('truncate -s 512M %s' % nvdimm_file, verbose=True)
        vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)

        # Set cpu according to params
        cpu_xml = create_cpuxml()
        vmxml.cpu = cpu_xml

        # Update other vcpu, memory info according to params
        update_vm_args = {
            k: params[k]
            for k in params if k.startswith('setvm_')
        }
        logging.debug(update_vm_args)
        for key, value in list(update_vm_args.items()):
            attr = key.replace('setvm_', '')
            logging.debug('Set %s = %s', attr, value)
            setattr(vmxml, attr, int(value) if value.isdigit() else value)
        logging.debug(virsh.dumpxml(vm_name).stdout_text)

        # Add an nvdimm mem device to vm xml
        nvdimm_params = {
            k.replace('nvdimmxml_', ''): v
            for k, v in params.items() if k.startswith('nvdimmxml_')
        }
        nvdimm_xml = create_nvdimm_xml(**nvdimm_params)
        vmxml.add_device(nvdimm_xml)
        check_define_list = ['ppc_no_label', 'discard']
        if libvirt_version.version_compare(7, 0, 0):
            check_define_list.append('less_than_256')
        if check in check_define_list:
            result = virsh.define(vmxml.xml, debug=True)
            libvirt.check_result(result, expected_fails=[error_msg])
            return
        vmxml.sync()
        logging.debug(virsh.dumpxml(vm_name).stdout_text)

        if IS_PPC_TEST:
            # Check whether uuid is automatically created
            new_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
            if not new_xml.xml.find('/devices/memory/uuid'):
                test.fail('uuid should be generated automatically.')
            vm_nvdimm_xml = new_xml.get_devices('memory')[0]
            qemu_checks.append('uuid=%s' % vm_nvdimm_xml.uuid)

            # Check memory target size
            target_size = vm_nvdimm_xml.target.size
            logging.debug('Target size: %s', target_size)

            if check == 'less_than_256':
                if not libvirt_version.version_compare(7, 0, 0):
                    result = virsh.start(vm_name, debug=True)
                    libvirt.check_exit_status(result, status_error)
                    libvirt.check_result(result, error_msg)
                    return

        virsh.start(vm_name, debug=True, ignore_status=False)

        # Check qemu command line one by one
        if IS_PPC_TEST:
            list(map(libvirt.check_qemu_cmd_line, qemu_checks))

        alive_vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)

        # Check if the guest support NVDIMM:
        # check /boot/config-$KVER file
        vm_session = vm.wait_for_login()
        if not IS_PPC_TEST:
            check_boot_config(vm_session)

        # ppc test requires ndctl
        if IS_PPC_TEST:
            if not utils_package.package_install('ndctl', session=vm_session):
                test.error('Cannot install ndctl to vm')
            logging.debug(
                vm_session.cmd_output(
                    'ndctl create-namespace --mode=fsdax --region=region0'))

        # check /dev/pmem0 existed inside guest
        check_file_in_vm(vm_session, '/dev/pmem0')

        if check == 'back_file':
            # Create a file system on /dev/pmem0
            if any(platform.platform().find(ver) for ver in ('el8', 'el9')):
                vm_session.cmd('mkfs.xfs -f /dev/pmem0 -m reflink=0')
            else:
                vm_session.cmd('mkfs.xfs -f /dev/pmem0')

            vm_session.cmd('mount -o dax /dev/pmem0 /mnt')
            vm_session.cmd('echo \"%s\" >/mnt/foo' % test_str)
            vm_session.cmd('umount /mnt')
            vm_session.close()

            # Shutdown the guest, then start it, remount /dev/pmem0,
            # check if the test file is still on the file system
            vm.destroy()
            vm.start()
            vm_session = vm.wait_for_login()

            vm_session.cmd('mount -o dax /dev/pmem0 /mnt')
            if test_str not in vm_session.cmd('cat /mnt/foo'):
                test.fail('\"%s\" should be in /mnt/foo' % test_str)

            # From the host, check the file has changed:
            host_output = process.run('hexdump -C /tmp/nvdimm',
                                      shell=True,
                                      verbose=True).stdout_text
            if test_str not in host_output:
                test.fail('\"%s\" should be in output' % test_str)

            # Shutdown the guest, and edit the xml,
            # include: access='private'
            vm_session.close()
            vm.destroy()
            vm_devices = vmxml.devices
            nvdimm_device = vm_devices.by_device_tag('memory')[0]
            nvdimm_index = vm_devices.index(nvdimm_device)
            vm_devices[nvdimm_index].mem_access = 'private'
            vmxml.devices = vm_devices
            vmxml.sync()

            # Login to the guest, mount the /dev/pmem0 and .
            # create a file: foo-private
            vm.start()
            vm_session = vm.wait_for_login()

            if IS_PPC_TEST:
                libvirt.check_qemu_cmd_line('mem-path=/tmp/nvdimm,share=no')

            private_str = 'This is a test for foo-private'
            vm_session.cmd('mount -o dax /dev/pmem0 /mnt/')

            file_private = 'foo-private'
            vm_session.cmd("echo '%s' >/mnt/%s" % (private_str, file_private))
            if private_str not in vm_session.cmd('cat /mnt/%s' % file_private):
                test.fail('"%s" should be in output' % private_str)

            # Shutdown the guest, then start it,
            # check the file: foo-private is no longer existed
            vm_session.close()
            vm.destroy()

            vm.start()
            vm_session = vm.wait_for_login()
            vm_session.cmd('mount -o dax /dev/pmem0 /mnt/')
            if file_private in vm_session.cmd('ls /mnt/'):
                test.fail('%s should not exist, for it was '
                          'created when access=private' % file_private)

        if check == 'label_back_file':
            # Create an xfs file system on /dev/pmem0
            if any(platform.platform().find(ver) for ver in ('el8', 'el9')):
                vm_session.cmd(
                    'mkfs.xfs -f -b size=4096 /dev/pmem0 -m reflink=0')
            else:
                vm_session.cmd('mkfs.xfs -f -b size=4096 /dev/pmem0')

            # Mount the file system with DAX enabled for page cache bypass
            output = vm_session.cmd_output('mount -o dax /dev/pmem0 /mnt/')
            logging.info(output)

            # Create a file on the nvdimm device.
            test_str = 'This is a test with label'
            vm_session.cmd('echo "%s" >/mnt/foo-label' % test_str)
            if test_str not in vm_session.cmd('cat /mnt/foo-label '):
                test.fail('"%s" should be in the output of cat cmd' % test_str)

            vm_session.cmd('umount /mnt')
            # Reboot the guest, and remount the nvdimm device in the guest.
            # Check the file foo-label is exited
            vm_session.close()
            virsh.reboot(vm_name, debug=True)
            vm_session = vm.wait_for_login()

            vm_session.cmd('mount -o dax /dev/pmem0  /mnt')
            if test_str not in vm_session.cmd('cat /mnt/foo-label '):
                test.fail('"%s" should be in output' % test_str)

            if params.get('check_life_cycle', 'no') == 'yes':
                virsh.managedsave(vm_name, ignore_status=False, debug=True)
                vm.start()
                check_nvdimm_file('foo-label')

                vm_s1 = vm_name + ".s1"
                virsh.save(vm_name, vm_s1, ignore_status=False, debug=True)
                virsh.restore(vm_s1, ignore_status=False, debug=True)
                check_nvdimm_file('foo-label')

                virsh.snapshot_create_as(vm_name,
                                         vm_s1,
                                         ignore_status=False,
                                         debug=True)
                virsh.snapshot_revert(vm_name,
                                      vm_s1,
                                      ignore_status=False,
                                      debug=True)
                virsh.snapshot_delete(vm_name,
                                      vm_s1,
                                      ignore_status=False,
                                      debug=True)

        if check == 'hot_plug':
            # Create file for 2nd nvdimm device
            nvdimm_file_2 = params.get('nvdimm_file_2')
            process.run('truncate -s 512M %s' % nvdimm_file_2)

            # Add 2nd nvdimm device to vm xml
            nvdimm2_params = {
                k.replace('nvdimmxml2_', ''): v
                for k, v in params.items() if k.startswith('nvdimmxml2_')
            }
            nvdimm2_xml = create_nvdimm_xml(**nvdimm2_params)

            ori_devices = vm_xml.VMXML.new_from_dumpxml(vm_name).get_devices(
                'memory')
            logging.debug('Starts with %d memory devices', len(ori_devices))

            result = virsh.attach_device(vm_name, nvdimm2_xml.xml, debug=True)
            libvirt.check_exit_status(result)

            # After attach, there should be an extra memory device
            devices_after_attach = vm_xml.VMXML.new_from_dumpxml(
                vm_name).get_devices('memory')
            logging.debug('After detach, vm has %d memory devices',
                          len(devices_after_attach))
            if len(ori_devices) != len(devices_after_attach) - 1:
                test.fail(
                    'Number of memory devices after attach is %d, should be %d'
                    % (len(devices_after_attach), len(ori_devices) + 1))

            # Create namespace for ppc tests
            if IS_PPC_TEST:
                logging.debug(
                    vm_session.cmd_output(
                        'ndctl create-namespace --mode=fsdax --region=region1')
                )

            time.sleep(wait_sec)
            check_file_in_vm(vm_session, '/dev/pmem1')

            nvdimm_detach = alive_vmxml.get_devices('memory')[-1]
            logging.debug(nvdimm_detach)

            # Hot-unplug nvdimm device
            result = virsh.detach_device(vm_name,
                                         nvdimm_detach.xml,
                                         debug=True)
            libvirt.check_exit_status(result)

            vm_session.close()
            vm_session = vm.wait_for_login()

            logging.debug(virsh.dumpxml(vm_name).stdout_text)

            left_devices = vm_xml.VMXML.new_from_dumpxml(vm_name).get_devices(
                'memory')
            logging.debug(left_devices)

            if len(left_devices) != len(ori_devices):
                test.fail(
                    'Number of memory devices after detach is %d, should be %d'
                    % (len(left_devices), len(ori_devices)))

            time.sleep(5)
            check_file_in_vm(vm_session, '/dev/pmem1', expect=False)
    finally:
        if vm.is_alive():
            vm.destroy(gracefully=False)
        bkxml.sync()
        os.remove(nvdimm_file)
        if 'nvdimm_file_2' in locals():
            os.remove(nvdimm_file_2)
Пример #46
0
    def manipulate_domain(vm_name, vm_operation, recover=False):
        """
        Operate domain to given state or recover it.

        :params vm_name: Name of the VM domain
        :params vm_operation: Operation to be performed on VM domain
                              like save, managedsave, suspend
        :params recover: flag to inform whether to set or reset
                         vm_operation
        """
        global vm_uptime_init
        save_file = os.path.join(data_dir.get_tmp_dir(), vm_name + ".save")
        if not recover:
            if vm_operation == "save":
                save_option = ""
                result = virsh.save(vm_name,
                                    save_file,
                                    save_option,
                                    ignore_status=True,
                                    debug=True)
                libvirt.check_exit_status(result)
            elif vm_operation == "managedsave":
                managedsave_option = ""
                result = virsh.managedsave(vm_name,
                                           managedsave_option,
                                           ignore_status=True,
                                           debug=True)
                libvirt.check_exit_status(result)
            elif vm_operation == "s3":
                suspend_target = "mem"
                result = virsh.dompmsuspend(vm_name,
                                            suspend_target,
                                            ignore_status=True,
                                            debug=True)
                libvirt.check_exit_status(result)
            elif vm_operation == "s4":
                suspend_target = "disk"
                result = virsh.dompmsuspend(vm_name,
                                            suspend_target,
                                            ignore_status=True,
                                            debug=True)
                libvirt.check_exit_status(result)
                # Wait domain state change: 'in shutdown' -> 'shut off'
                utils_misc.wait_for(lambda: virsh.is_dead(vm_name), 5)
            elif vm_operation == "suspend":
                result = virsh.suspend(vm_name, ignore_status=True, debug=True)
                libvirt.check_exit_status(result)
            elif vm_operation == "reboot":
                vm.reboot()
                vm_uptime_init = vm.uptime()
            else:
                logging.debug("No operation for the domain")

        else:
            if vm_operation == "save":
                if os.path.exists(save_file):
                    result = virsh.restore(save_file,
                                           ignore_status=True,
                                           debug=True)
                    libvirt.check_exit_status(result)
                    os.remove(save_file)
                else:
                    test.error("No save file for domain restore")
            elif vm_operation in ["managedsave", "s4"]:
                result = virsh.start(vm_name, ignore_status=True, debug=True)
                libvirt.check_exit_status(result)
            elif vm_operation == "s3":
                suspend_target = "mem"
                result = virsh.dompmwakeup(vm_name,
                                           ignore_status=True,
                                           debug=True)
                libvirt.check_exit_status(result)
            elif vm_operation == "suspend":
                result = virsh.resume(vm_name, ignore_status=True, debug=True)
                libvirt.check_exit_status(result)
            elif vm_operation == "reboot":
                pass
            else:
                logging.debug("No need recover the domain")
Пример #47
0
def run(test, params, env):
    """
    Test command: virsh dompmsuspend <domain> <target>
    The command suspends a running domain using guest OS's power management.
    """

    # MAIN TEST CODE ###
    # Process cartesian parameters
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)

    vm_state = params.get("vm_state", "running")
    suspend_target = params.get("pm_suspend_target", "mem")
    pm_enabled = params.get("pm_enabled", "not_set")
    test_managedsave = "yes" == params.get("test_managedsave", "no")
    test_save_restore = "yes" == params.get("test_save_restore", "no")
    test_suspend_resume = "yes" == params.get("test_suspend_resume", "no")
    pmsuspend_error = 'yes' == params.get("pmsuspend_error", 'no')

    # Libvirt acl test related params
    uri = params.get("virsh_uri")
    unprivileged_user = params.get('unprivileged_user')
    if unprivileged_user:
        if unprivileged_user.count('EXAMPLE'):
            unprivileged_user = '******'

    if not libvirt_version.version_compare(1, 1, 1):
        if params.get('setup_libvirt_polkit') == 'yes':
            raise error.TestNAError("API acl test not supported in current"
                                    " libvirt version.")

    # A backup of original vm
    vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    vmxml_backup = vmxml.copy()

    # Expected possible fail patterns.
    # Error output should match one of these patterns.
    # An empty list mean test should succeed.
    fail_pat = []
    virsh_dargs = {'debug': True, 'ignore_status': True}
    if params.get('setup_libvirt_polkit') == 'yes':
        virsh_dargs_copy = virsh_dargs.copy()
        virsh_dargs_copy['uri'] = uri
        virsh_dargs_copy['unprivileged_user'] = unprivileged_user
        if pmsuspend_error:
            fail_pat.append('access denied')

    # Setup possible failure patterns
    if pm_enabled == 'not_set':
        fail_pat.append('not supported')
    if pm_enabled == 'no':
        fail_pat.append('disabled')

    if vm_state == 'paused':
        fail_pat.append('not responding')
    elif vm_state == 'shutoff':
        fail_pat.append('not running')

    try:
        if vm.is_alive():
            vm.destroy()

        # Set pm tag in domain's XML if needed.
        if pm_enabled == 'not_set':
            try:
                if vmxml.pm:
                    del vmxml.pm
            except xcepts.LibvirtXMLNotFoundError:
                pass
        else:
            pm_xml = vm_xml.VMPMXML()
            if suspend_target == 'mem':
                pm_xml.mem_enabled = pm_enabled
            elif suspend_target == 'disk':
                pm_xml.disk_enabled = pm_enabled
            elif suspend_target == 'hybrid':
                pm_xml.mem_enabled = pm_enabled
                pm_xml.disk_enabled = pm_enabled
            vmxml.pm = pm_xml
        vmxml.sync()

        vm.prepare_guest_agent()

        # Create swap partition/file if nessesary.
        need_mkswap = False
        if suspend_target in ['disk', 'hybrid']:
            need_mkswap = not vm.has_swap()
        if need_mkswap:
            logging.debug("Creating swap partition.")
            vm.create_swap_partition()

        try:
            libvirtd = utils_libvirtd.Libvirtd()
            savefile = os.path.join(test.tmpdir, "%s.save" % vm_name)
            session = vm.wait_for_login()
            # Touch a file on guest to test managed save command.
            if test_managedsave:
                session.cmd_status("touch pmtest")

            # Set vm state
            if vm_state == "paused":
                vm.pause()
            elif vm_state == "shutoff":
                vm.destroy()

            # Run test case
            result = virsh.dompmsuspend(vm_name, suspend_target, debug=True,
                                        uri=uri,
                                        unprivileged_user=unprivileged_user)
            if result.exit_status == 0:
                if fail_pat:
                    raise error.TestFail("Expected failed with %s, but run succeed"
                                         ":\n%s" % (fail_pat, result))
            else:
                if not fail_pat:
                    raise error.TestFail("Expected success, but run failed:\n%s"
                                         % result)
                #if not any_pattern_match(fail_pat, result.stderr):
                if not any(p in result.stderr for p in fail_pat):
                    raise error.TestFail("Expected failed with one of %s, but "
                                         "failed with:\n%s" % (fail_pat, result))
            if test_managedsave:
                ret = virsh.managedsave(vm_name, **virsh_dargs)
                libvirt.check_exit_status(ret)
                # Dompmwakeup should return false here
                ret = virsh.dompmwakeup(vm_name, **virsh_dargs)
                libvirt.check_exit_status(ret, True)
                ret = virsh.start(vm_name)
                libvirt.check_exit_status(ret)
                if not vm.is_paused():
                    raise error.TestFail("Vm status is not paused before pm wakeup")
                if params.get('setup_libvirt_polkit') == 'yes':
                    ret = virsh.dompmwakeup(vm_name, **virsh_dargs_copy)
                else:
                    ret = virsh.dompmwakeup(vm_name, **virsh_dargs)
                libvirt.check_exit_status(ret)
                if not vm.is_paused():
                    raise error.TestFail("Vm status is not paused after pm wakeup")
                ret = virsh.resume(vm_name, **virsh_dargs)
                libvirt.check_exit_status(ret)
                sess = vm.wait_for_login()
                if sess.cmd_status("ls pmtest && rm -f pmtest"):
                    raise error.TestFail("Check managed save failed on guest")
                sess.close()
            if test_save_restore:
                # Run a series of operations to check libvirtd status.
                ret = virsh.dompmwakeup(vm_name, **virsh_dargs)
                libvirt.check_exit_status(ret)
                # Wait for vm is started
                vm.wait_for_login()
                ret = virsh.save(vm_name, savefile, **virsh_dargs)
                libvirt.check_exit_status(ret)
                ret = virsh.restore(savefile, **virsh_dargs)
                libvirt.check_exit_status(ret)
                # Wait for vm is started
                vm.wait_for_login()
                # run pmsuspend again
                ret = virsh.dompmsuspend(vm_name, suspend_target, **virsh_dargs)
                libvirt.check_exit_status(ret)
                # save and restore the guest again.
                ret = virsh.save(vm_name, savefile, **virsh_dargs)
                libvirt.check_exit_status(ret)
                ret = virsh.restore(savefile, **virsh_dargs)
                libvirt.check_exit_status(ret)
                ret = virsh.destroy(vm_name, **virsh_dargs)
                libvirt.check_exit_status(ret)
                if not libvirtd.is_running():
                    raise error.TestFail("libvirtd crashed")
            if test_suspend_resume:
                ret = virsh.suspend(vm_name)
                libvirt.check_exit_status(ret, expect_error=True)
                if vm.state() != 'pmsuspended':
                    raise error.TestFail("VM state should be pmsuspended")
                ret = virsh.resume(vm_name)
                libvirt.check_exit_status(ret, expect_error=True)
                if vm.state() != 'pmsuspended':
                    raise error.TestFail("VM state should be pmsuspended")
        finally:
            libvirtd.restart()
            # Remove the tmp file
            if os.path.exists(savefile):
                os.remove(savefile)
            # Restore VM state
            if vm_state == "paused":
                vm.resume()

            if suspend_target in ['mem', 'hybrid']:
                if vm.state() == "pmsuspended":
                    virsh.dompmwakeup(vm_name)
            else:
                if vm.state() == "in shutdown":
                    vm.wait_for_shutdown()
                if vm.is_dead():
                    vm.start()

            # Cleanup
            session.close()

            if need_mkswap:
                vm.cleanup_swap()

    finally:
        # Destroy the vm.
        if vm.is_alive():
            vm.destroy()
        # Recover xml of vm.
        vmxml_backup.sync()
Пример #48
0
def run(test, params, env):
    """
    Test the tpm virtual devices
    1. prepare a guest with different tpm devices
    2. check whether the guest can be started
    3. check the xml and qemu cmd line, even swtpm for vtpm
    4. check tpm usage in guest os
    """
    # Tpm passthrough supported since libvirt 1.0.5.
    if not libvirt_version.version_compare(1, 0, 5):
        test.cancel("Tpm device is not supported "
                    "on current libvirt version.")
    # Tpm passthrough supported since qemu 2.12.0-49.
    if not utils_misc.compare_qemu_version(2, 9, 0, is_rhev=False):
        test.cancel("Tpm device is not supported " "on current qemu version.")

    tpm_model = params.get("tpm_model")
    backend_type = params.get("backend_type")
    backend_version = params.get("backend_version")
    device_path = params.get("device_path")
    tpm_num = int(params.get("tpm_num", 1))
    # After first start of vm with vtpm, do operations, check it still works
    vm_operate = params.get("vm_operate")
    # Sub-operation(e.g.domrename) under vm_operate(e.g.restart)
    vm_oprt = params.get("vm_oprt")
    secret_uuid = params.get("secret_uuid")
    secret_value = params.get("secret_value")
    # Change encryption state: from plain to encrypted, or reverse.
    encrypt_change = params.get("encrypt_change")
    secret_uuid = params.get("secret_uuid")
    prepare_secret = ("yes" == params.get("prepare_secret", "no"))
    remove_dev = ("yes" == params.get("remove_dev", "no"))
    multi_vms = ("yes" == params.get("multi_vms", "no"))
    # Remove swtpm state file
    rm_statefile = ("yes" == params.get("rm_statefile", "no"))
    test_suite = ("yes" == params.get("test_suite", "no"))
    restart_libvirtd = ("yes" == params.get("restart_libvirtd", "no"))
    no_backend = ("yes" == params.get("no_backend", "no"))
    status_error = ("yes" == params.get("status_error", "no"))
    err_msg = params.get("xml_errmsg", "")
    loader = params.get("loader", "")
    nvram = params.get("nvram", "")
    uefi_disk_url = params.get("uefi_disk_url", "")
    download_file_path = os.path.join(data_dir.get_tmp_dir(),
                                      "uefi_disk.qcow2")

    # Check tpm chip on host for passthrough testing
    if backend_type == "passthrough":
        dmesg_info = process.getoutput("dmesg|grep tpm -wi", shell=True)
        logging.debug("dmesg info about tpm:\n %s", dmesg_info)
        dmesg_error = re.search("No TPM chip found|TPM is disabled",
                                dmesg_info)
        if dmesg_error:
            test.cancel(dmesg_error.group())
        else:
            # Try to check host tpm chip version
            tpm_v = None
            if re.search("2.0 TPM", dmesg_info):
                tpm_v = "2.0"
                if not utils_package.package_install("tpm2-tools"):
                    # package_install() return 'True' if succeed
                    test.error("Failed to install tpm2-tools on host")
            else:
                if re.search("1.2 TPM", dmesg_info):
                    tpm_v = "1.2"
                # If "1.2 TPM" or no version info in dmesg, try to test a tpm1.2 at first
                if not utils_package.package_install("tpm-tools"):
                    test.error("Failed to install tpm-tools on host")
    # Check host env for vtpm testing
    elif backend_type == "emulator":
        if not utils_misc.compare_qemu_version(4, 0, 0, is_rhev=False):
            test.cancel("vtpm(emulator backend) is not supported "
                        "on current qemu version.")
        # Install swtpm pkgs on host for vtpm emulation
        if not utils_package.package_install("swtpm*"):
            test.error("Failed to install swtpm swtpm-tools on host")

    def replace_os_disk(vm_xml, vm_name, nvram):
        """
        Replace os(nvram) and disk(uefi) for x86 vtpm test

        :param vm_xml: current vm's xml
        :param vm_name: current vm name
        :param nvram: nvram file path of vm
        """
        # Add loader, nvram in <os>
        nvram = nvram.replace("<VM_NAME>", vm_name)
        dict_os_attrs = {
            "loader_readonly": "yes",
            "secure": "yes",
            "loader_type": "pflash",
            "loader": loader,
            "nvram": nvram
        }
        vm_xml.set_os_attrs(**dict_os_attrs)
        logging.debug("Set smm=on in VMFeaturesXML")
        # Add smm in <features>
        features_xml = vm_xml.features
        features_xml.smm = "on"
        vm_xml.features = features_xml
        vm_xml.sync()
        # Replace disk with an uefi image
        if not utils_package.package_install("wget"):
            test.error("Failed to install wget on host")
        if uefi_disk_url.count("EXAMPLE"):
            test.error("Please provide the URL %s" % uefi_disk_url)
        else:
            download_cmd = ("wget %s -O %s" %
                            (uefi_disk_url, download_file_path))
            process.system(download_cmd, verbose=False, shell=True)
        vm = env.get_vm(vm_name)
        uefi_disk = {'disk_source_name': download_file_path}
        libvirt.set_vm_disk(vm, uefi_disk)

    vm_names = params.get("vms").split()
    vm_name = vm_names[0]
    vm = env.get_vm(vm_name)
    vm_xml = VMXML.new_from_inactive_dumpxml(vm_name)
    vm_xml_backup = vm_xml.copy()
    os_xml = getattr(vm_xml, "os")
    host_arch = platform.machine()
    if backend_type == "emulator" and host_arch == 'x86_64':
        if not utils_package.package_install("OVMF"):
            test.error("Failed to install OVMF or edk2-ovmf pkgs on host")
        if os_xml.xmltreefile.find('nvram') is None:
            replace_os_disk(vm_xml, vm_name, nvram)
            vm_xml = VMXML.new_from_inactive_dumpxml(vm_name)
    if vm.is_alive():
        vm.destroy()

    vm2 = None
    if multi_vms:
        if len(vm_names) > 1:
            vm2_name = vm_names[1]
            vm2 = env.get_vm(vm2_name)
            vm2_xml = VMXML.new_from_inactive_dumpxml(vm2_name)
            vm2_xml_backup = vm2_xml.copy()
        else:
            # Clone additional vms if needed
            try:
                utils_path.find_command("virt-clone")
            except utils_path.CmdNotFoundError:
                if not utils_package.package_install(["virt-install"]):
                    test.cancel("Failed to install virt-install on host")
            vm2_name = "vm2_" + utils_misc.generate_random_string(5)
            ret_clone = utils_libguestfs.virt_clone_cmd(vm_name,
                                                        vm2_name,
                                                        True,
                                                        timeout=360,
                                                        debug=True)
            if ret_clone.exit_status:
                test.error(
                    "Need more than one domains, but error occured when virt-clone."
                )
            vm2 = vm.clone(vm2_name)
            vm2_xml = VMXML.new_from_inactive_dumpxml(vm2_name)
        if vm2.is_alive():
            vm2.destroy()

    service_mgr = service.ServiceManager()

    def check_dumpxml(vm_name):
        """
        Check whether the added devices are shown in the guest xml

        :param vm_name: current vm name
        """
        logging.info("------Checking guest dumpxml------")
        if tpm_model:
            pattern = '<tpm model="%s">' % tpm_model
        else:
            # The default tpm model is "tpm-tis"
            pattern = '<tpm model="tpm-tis">'
        # Check tpm model
        xml_after_adding_device = VMXML.new_from_dumpxml(vm_name)
        logging.debug("xml after add tpm dev is %s", xml_after_adding_device)
        if pattern not in astring.to_text(xml_after_adding_device):
            test.fail("Can not find the %s tpm device xml "
                      "in the guest xml file." % tpm_model)
        # Check backend type
        pattern = '<backend type="%s"' % backend_type
        if pattern not in astring.to_text(xml_after_adding_device):
            test.fail("Can not find the %s backend type xml for tpm dev "
                      "in the guest xml file." % backend_type)
        # Check backend version
        if backend_version:
            pattern = '"emulator" version="%s"' % backend_version
            if pattern not in astring.to_text(xml_after_adding_device):
                test.fail(
                    "Can not find the %s backend version xml for tpm dev "
                    "in the guest xml file." % backend_version)
        # Check device path
        if backend_type == "passthrough":
            pattern = '<device path="/dev/tpm0"'
            if pattern not in astring.to_text(xml_after_adding_device):
                test.fail("Can not find the %s device path xml for tpm dev "
                          "in the guest xml file." % device_path)
        # Check encryption secret
        if prepare_secret:
            pattern = '<encryption secret="%s" />' % encryption_uuid
            if pattern not in astring.to_text(xml_after_adding_device):
                test.fail("Can not find the %s secret uuid xml for tpm dev "
                          "in the guest xml file." % encryption_uuid)
        logging.info('------PASS on guest dumpxml check------')

    def check_qemu_cmd_line(vm, vm_name, domid):
        """
        Check whether the added devices are shown in the qemu cmd line

        :param vm: current vm
        :param vm_name: current vm name
        :param domid: domain id for checking vtpm socket file
        """
        logging.info("------Checking qemu cmd line------")
        if not vm.get_pid():
            test.fail('VM pid file missing.')
        with open('/proc/%s/cmdline' % vm.get_pid()) as cmdline_file:
            cmdline = cmdline_file.read()
            logging.debug("Qemu cmd line info:\n %s", cmdline)
        # Check tpm model
        pattern_list = ["-device.%s" % tpm_model]
        # Check backend type
        if backend_type == "passthrough":
            dev_num = re.search(r"\d+", device_path).group()
            backend_segment = "id=tpm-tpm%s" % dev_num
        else:
            # emulator backend
            backend_segment = "id=tpm-tpm0,chardev=chrtpm"
        pattern_list.append("-tpmdev.%s,%s" % (backend_type, backend_segment))
        # Check chardev socket for vtpm
        if backend_type == "emulator":
            pattern_list.append(
                "-chardev.socket,id=chrtpm,"
                "path=.*/run/libvirt/qemu/swtpm/%s-%s-swtpm.sock" %
                (domid, vm_name))
        for pattern in pattern_list:
            if not re.search(pattern, cmdline):
                if not remove_dev:
                    test.fail("Can not find the %s for tpm device "
                              "in qemu cmd line." % pattern)
            elif remove_dev:
                test.fail("%s still exists after remove vtpm and restart" %
                          pattern)
        logging.info("------PASS on qemu cmd line check------")

    def check_swtpm(domid, domuuid, vm_name):
        """
        Check swtpm cmdline and files for vtpm.

        :param domid: domain id for checking vtpm files
        :param domuuid: domain uuid for checking vtpm state file
        :param vm_name: current vm name
        """
        logging.info("------Checking swtpm cmdline and files------")
        # Check swtpm cmdline
        swtpm_pid = utils_misc.get_pid("%s-swtpm.pid" % vm_name)
        if not swtpm_pid:
            if not remove_dev:
                test.fail('swtpm pid file missing.')
            else:
                return
        elif remove_dev:
            test.fail(
                'swtpm pid file still exists after remove vtpm and restart')
        with open('/proc/%s/cmdline' % swtpm_pid) as cmdline_file:
            cmdline = cmdline_file.read()
            logging.debug("Swtpm cmd line info:\n %s", cmdline)
        pattern_list = [
            "--daemon", "--ctrl", "--tpmstate", "--log", "--tpm2", "--pid"
        ]
        if prepare_secret:
            pattern_list.extend(["--key", "--migration-key"])
        for pattern in pattern_list:
            if not re.search(pattern, cmdline):
                test.fail("Can not find the %s for tpm device "
                          "in swtpm cmd line." % pattern)
        # Check swtpm files
        file_list = [
            "/var/run/libvirt/qemu/swtpm/%s-%s-swtpm.sock" % (domid, vm_name)
        ]
        file_list.append("/var/lib/libvirt/swtpm/%s/tpm2" % domuuid)
        file_list.append("/var/log/swtpm/libvirt/qemu/%s-swtpm.log" % vm_name)
        file_list.append("/var/run/libvirt/qemu/swtpm/%s-%s-swtpm.pid" %
                         (domid, vm_name))
        for swtpm_file in file_list:
            if not os.path.exists(swtpm_file):
                test.fail("Swtpm file: %s does not exist" % swtpm_file)
        logging.info("------PASS on Swtpm cmdline and files check------")

    def get_tpm2_tools_cmd(session=None):
        """
        Get tpm2-tools pkg version and return corresponding getrandom cmd

        :session: guest console session
        :return: tpm2_getrandom cmd usage
        """
        cmd = 'rpm -q tpm2-tools'
        get_v_tools = session.cmd(cmd) if session else process.run(
            cmd).stdout_text
        v_tools_list = get_v_tools.strip().split('-')
        if session:
            logging.debug("The tpm2-tools version is %s", v_tools_list[2])
        v_tools = int(v_tools_list[2].split('.')[0])
        return "tpm2_getrandom 8" if v_tools < 4 else "tpm2_getrandom -T device:/dev/tpm0 8 --hex"

    def get_host_tpm_bef(tpm_v):
        """
        Test host tpm function and identify its real version before passthrough
        Since sometimes dmesg info doesn't include tpm msg, need use tpm-tool or
        tpm2-tools to try the function.

        :param tpm_v: host tpm version get from dmesg info
        :return: host tpm version
        """
        logging.info("------Checking host tpm device before passthrough------")
        # Try tcsd tool for suspected tpm1.2 chip on host
        tpm_real_v = tpm_v
        if tpm_v != "2.0":
            if not service_mgr.start('tcsd'):
                # service_mgr.start() return 'True' if succeed
                if tpm_v == "1.2":
                    test.fail("Host tcsd.serivce start failed")
                else:
                    # Means tpm_v got nothing from dmesg, log failure here and
                    # go to next 'if' to try tpm2.0 tools.
                    logging.info("Host tcsd.serivce start failed")
            else:
                tpm_real_v = "1.2"
                logging.info("Host tpm version info:")
                result = process.run("tpm_version", ignore_status=False)
                logging.debug("[host]# tpm_version\n %s", result.stdout)
                time.sleep(2)
                service_mgr.stop('tcsd')
        if tpm_v != "1.2":
            # Try tpm2.0 tools
            if not utils_package.package_install("tpm2-tools"):
                test.error("Failed to install tpm2-tools on host")
            tpm2_getrandom_cmd = get_tpm2_tools_cmd()
            if process.run(tpm2_getrandom_cmd, ignore_status=True).exit_status:
                test.cancel("Both tcsd and tpm2-tools can not work, "
                            "pls check your host tpm version and test env.")
            else:
                tpm_real_v = "2.0"
        logging.info("------PASS on host tpm device check------")
        return tpm_real_v

    def test_host_tpm_aft(tpm_real_v):
        """
        Test host tpm function after passthrough

        :param tpm_real_v: host tpm real version indentified from testing
        """
        logging.info("------Checking host tpm device after passthrough------")
        if tpm_real_v == "1.2":
            if service_mgr.start('tcsd'):
                time.sleep(2)
                service_mgr.stop('tcsd')
                test.fail(
                    "Host tpm should not work after passthrough to guest.")
            else:
                logging.info("Expected failure: Tpm is being used by guest.")
        elif tpm_real_v == "2.0":
            tpm2_getrandom_cmd = get_tpm2_tools_cmd()
            if not process.run(tpm2_getrandom_cmd,
                               ignore_status=True).exit_status:
                test.fail(
                    "Host tpm should not work after passthrough to guest.")
            else:
                logging.info("Expected failure: Tpm is being used by guest.")
        logging.info("------PASS on host tpm device check------")

    def test_guest_tpm(expect_version, session, expect_fail):
        """
        Test tpm function in guest

        :param expect_version: guest tpm version, as host version, or emulator specified
        :param session: Guest session to be tested
        :param expect_fail: guest tpm is expectedly fail to work
        """
        logging.info("------Checking guest tpm device work------")
        if expect_version == "1.2":
            # Install tpm-tools and test by tcsd method
            if not utils_package.package_install(["tpm-tools"], session, 360):
                test.error("Failed to install tpm-tools package in guest")
            else:
                status, output = session.cmd_status_output(
                    "systemctl start tcsd")
                logging.debug("Command output: %s", output)
                if status:
                    if expect_fail:
                        test.cancel(
                            "tpm-crb passthrough only works with host tpm2.0, "
                            "but your host tpm version is 1.2")
                    else:
                        test.fail("Failed to start tcsd.service in guest")
                else:
                    dev_output = session.cmd_output("ls /dev/|grep tpm")
                    logging.debug("Command output: %s", dev_output)
                    status, output = session.cmd_status_output("tpm_version")
                    logging.debug("Command output: %s", output)
                    if status:
                        test.fail("Guest tpm can not work")
        else:
            # If expect_version is tpm2.0, install and test by tpm2-tools
            if not utils_package.package_install(["tpm2-tools"], session, 360):
                test.error("Failed to install tpm2-tools package in guest")
            else:
                tpm2_getrandom_cmd = get_tpm2_tools_cmd(session)
                status1, output1 = session.cmd_status_output(
                    "ls /dev/|grep tpm")
                logging.debug("Command output: %s", output1)
                status2, output2 = session.cmd_status_output(
                    tpm2_getrandom_cmd)
                logging.debug("Command output: %s", output2)
                if status1 or status2:
                    if not expect_fail:
                        test.fail("Guest tpm can not work")
                    else:
                        d_status, d_output = session.cmd_status_output("date")
                        if d_status:
                            test.fail("Guest OS doesn't work well")
                        logging.debug("Command output: %s", d_output)
                elif expect_fail:
                    test.fail("Expect fail but guest tpm still works")
        logging.info("------PASS on guest tpm device work check------")

    def run_test_suite_in_guest(session):
        """
        Run kernel test suite for guest tpm.

        :param session: Guest session to be tested
        """
        logging.info("------Checking kernel test suite for guest tpm------")
        boot_info = session.cmd('uname -r').strip().split('.')
        kernel_version = '.'.join(boot_info[:2])
        # Download test suite per current guest kernel version
        parent_path = "https://cdn.kernel.org/pub/linux/kernel"
        if float(kernel_version) < 5.3:
            major_version = "5"
            file_version = "5.3"
        else:
            major_version = boot_info[0]
            file_version = kernel_version
        src_url = "%s/v%s.x/linux-%s.tar.xz" % (parent_path, major_version,
                                                file_version)
        download_cmd = "wget %s -O %s" % (src_url, "/root/linux.tar.xz")
        output = session.cmd_output(download_cmd, timeout=480)
        logging.debug("Command output: %s", output)
        # Install neccessary pkgs to build test suite
        if not utils_package.package_install(
            ["tar", "make", "gcc", "rsync", "python2"], session, 360):
            test.fail("Failed to install specified pkgs in guest OS.")
        # Unzip the downloaded test suite
        status, output = session.cmd_status_output(
            "tar xvJf /root/linux.tar.xz -C /root")
        if status:
            test.fail("Uzip failed: %s" % output)
        # Specify using python2 to run the test suite per supporting
        test_path = "/root/linux-%s/tools/testing/selftests" % file_version
        sed_cmd = "sed -i 's/python -m unittest/python2 -m unittest/g' %s/tpm2/test_*.sh" % test_path
        output = session.cmd_output(sed_cmd)
        logging.debug("Command output: %s", output)
        # Build and and run the .sh files of test suite
        status, output = session.cmd_status_output(
            "make -C %s TARGETS=tpm2 run_tests" % test_path, timeout=360)
        logging.debug("Command output: %s", output)
        if status:
            test.fail("Failed to run test suite in guest OS.")
        for test_sh in ["test_smoke.sh", "test_space.sh"]:
            pattern = "ok .* selftests: tpm2: %s" % test_sh
            if not re.search(pattern, output) or ("not ok" in output):
                test.fail("test suite check failed.")
        logging.info("------PASS on kernel test suite check------")

    def reuse_by_vm2(tpm_dev):
        """
        Try to add same tpm to a second guest, when it's being used by one guest.

        :param tpm_dev: tpm device to be added into guest xml
        """
        logging.info("------Trying to add same tpm to a second domain------")
        vm2_xml.remove_all_device_by_type('tpm')
        vm2_xml.add_device(tpm_dev)
        vm2_xml.sync()
        ret = virsh.start(vm2_name, ignore_status=True, debug=True)
        if backend_type == "passthrough":
            if ret.exit_status:
                logging.info("Expected failure when try to passthrough a tpm"
                             " that being used by another guest")
                return
            test.fail("Reuse a passthroughed tpm should not succeed.")
        elif ret.exit_status:
            # emulator backend
            test.fail(
                "Vtpm for each guest should not interfere with each other")

    try:
        tpm_real_v = None
        sec_uuids = []
        new_name = ""
        virsh_dargs = {"debug": True, "ignore_status": False}
        vm_xml.remove_all_device_by_type('tpm')
        tpm_dev = Tpm()
        if tpm_model:
            tpm_dev.tpm_model = tpm_model
        if not no_backend:
            backend = tpm_dev.Backend()
            if backend_type != 'none':
                backend.backend_type = backend_type
                if backend_type == "passthrough":
                    tpm_real_v = get_host_tpm_bef(tpm_v)
                    logging.debug("The host tpm real version is %s",
                                  tpm_real_v)
                    if device_path:
                        backend.device_path = device_path
                if backend_type == "emulator":
                    if backend_version != 'none':
                        backend.backend_version = backend_version
                    if prepare_secret:
                        auth_sec_dict = {
                            "sec_ephemeral": "no",
                            "sec_private": "yes",
                            "sec_desc": "sample vTPM secret",
                            "sec_usage": "vtpm",
                            "sec_name": "VTPM_example"
                        }
                        encryption_uuid = libvirt.create_secret(auth_sec_dict)
                        if secret_value != 'none':
                            virsh.secret_set_value(encryption_uuid,
                                                   "open sesame",
                                                   encode=True,
                                                   debug=True)
                        sec_uuids.append(encryption_uuid)
                        if encrypt_change != 'encrpt':
                            # plain_to_encrypt will not add encryption on first start
                            if secret_uuid == 'invalid':
                                encryption_uuid = encryption_uuid[:-1]
                            backend.encryption_secret = encryption_uuid
                        if secret_uuid == "change":
                            auth_sec_dict["sec_desc"] = "sample2 vTPM secret"
                            auth_sec_dict["sec_name"] = "VTPM_example2"
                            new_encryption_uuid = libvirt.create_secret(
                                auth_sec_dict)
                            virsh.secret_set_value(new_encryption_uuid,
                                                   "open sesame",
                                                   encode=True,
                                                   debug=True)
                            sec_uuids.append(new_encryption_uuid)
                    if secret_uuid == 'nonexist':
                        backend.encryption_secret = "aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee"
            tpm_dev.backend = backend
        logging.debug("tpm dev xml to add is:\n %s", tpm_dev)
        for num in range(tpm_num):
            vm_xml.add_device(tpm_dev, True)
        ret = virsh.define(vm_xml.xml, ignore_status=True, debug=True)
        expected_match = ""
        if not err_msg:
            expected_match = "Domain %s defined from %s" % (vm_name,
                                                            vm_xml.xml)
        libvirt.check_result(ret, err_msg, "", False, expected_match)
        if err_msg:
            # Stop test when get expected failure
            return
        if vm_operate != "restart":
            check_dumpxml(vm_name)
        # For default model, no need start guest to test
        if tpm_model:
            expect_fail = False
            try:
                vm.start()
            except VMStartError as detail:
                if secret_value == 'none' or secret_uuid == 'nonexist':
                    logging.debug("Expected failure: %s", detail)
                    return
                else:
                    test.fail(detail)
            domuuid = vm.get_uuid()
            if vm_operate or restart_libvirtd:
                # Make sure OS works before vm operate or restart libvirtd
                session = vm.wait_for_login()
                test_guest_tpm("2.0", session, False)
                session.close()
                if restart_libvirtd:
                    utils_libvirtd.libvirtd_restart()
                swtpm_statedir = "/var/lib/libvirt/swtpm/%s" % domuuid
                if vm_operate == "resume":
                    virsh.suspend(vm_name, **virsh_dargs)
                    time.sleep(3)
                    virsh.resume(vm_name, **virsh_dargs)
                elif vm_operate == "snapshot":
                    virsh.snapshot_create_as(
                        vm_name, "sp1 --memspec file=/tmp/testvm_sp1",
                        **virsh_dargs)
                elif vm_operate in ["restart", "create"]:
                    vm.destroy()
                    if vm_operate == "create":
                        virsh.undefine(vm_name,
                                       options="--nvram",
                                       **virsh_dargs)
                        if os.path.exists(swtpm_statedir):
                            test.fail(
                                "Swtpm state dir: %s still exist after vm undefine"
                                % swtpm_statedir)
                        virsh.create(vm_xml.xml, **virsh_dargs)
                    else:
                        if vm_oprt == "domrename":
                            new_name = "vm_" + utils_misc.generate_random_string(
                                5)
                            virsh.domrename(vm_name, new_name, **virsh_dargs)
                            new_vm = libvirt_vm.VM(new_name, vm.params,
                                                   vm.root_dir,
                                                   vm.address_cache)
                            vm = new_vm
                            vm_name = new_name
                        elif secret_value == 'change':
                            logging.info("Changing secret value...")
                            virsh.secret_set_value(encryption_uuid,
                                                   "new sesame",
                                                   encode=True,
                                                   debug=True)
                        elif not restart_libvirtd:
                            # remove_dev or do other vm operations during restart
                            vm_xml.remove_all_device_by_type('tpm')
                            if secret_uuid == "change" or encrypt_change:
                                # Change secret uuid, or change encrytion state:from plain to encrypted, or on the contrary
                                if encrypt_change == 'plain':
                                    # Change from encrypted state to plain:redefine a tpm dev without encryption
                                    tpm_dev = Tpm()
                                    tpm_dev.tpm_model = tpm_model
                                    backend = tpm_dev.Backend()
                                    backend.backend_type = backend_type
                                    backend.backend_version = backend_version
                                else:
                                    # Use a new secret's uuid
                                    if secret_uuid == "change":
                                        encryption_uuid = new_encryption_uuid
                                    backend.encryption_secret = encryption_uuid
                                tpm_dev.backend = backend
                                logging.debug(
                                    "The new tpm dev xml to add for restart vm is:\n %s",
                                    tpm_dev)
                                vm_xml.add_device(tpm_dev, True)
                            if encrypt_change in ['encrpt', 'plain']:
                                # Avoid sync() undefine removing the state file
                                vm_xml.define()
                            else:
                                vm_xml.sync()
                        if rm_statefile:
                            swtpm_statefile = "%s/tpm2/tpm2-00.permall" % swtpm_statedir
                            logging.debug("Removing state file: %s",
                                          swtpm_statefile)
                            os.remove(swtpm_statefile)
                        ret = virsh.start(vm_name,
                                          ignore_status=True,
                                          debug=True)
                        libvirt.check_exit_status(ret, status_error)
                        if status_error and ret.exit_status != 0:
                            return
                    if not remove_dev:
                        check_dumpxml(vm_name)
                elif vm_operate == 'managedsave':
                    virsh.managedsave(vm_name, **virsh_dargs)
                    time.sleep(5)
                    if secret_value == 'change':
                        logging.info("Changing secret value...")
                        virsh.secret_set_value(encryption_uuid,
                                               "new sesame",
                                               encode=True,
                                               debug=True)
                        if rm_statefile:
                            swtpm_statefile = "%s/tpm2/tpm2-00.permall" % swtpm_statedir
                            logging.debug("Removing state file: %s",
                                          swtpm_statefile)
                            os.remove(swtpm_statefile)
                    ret = virsh.start(vm_name, ignore_status=True, debug=True)
                    libvirt.check_exit_status(ret, status_error)
                    if status_error and ret.exit_status != 0:
                        return
            domid = vm.get_id()
            check_qemu_cmd_line(vm, vm_name, domid)
            if backend_type == "passthrough":
                if tpm_real_v == "1.2" and tpm_model == "tpm-crb":
                    expect_fail = True
                expect_version = tpm_real_v
                test_host_tpm_aft(tpm_real_v)
            else:
                # emulator backend
                if remove_dev:
                    expect_fail = True
                expect_version = backend_version
                check_swtpm(domid, domuuid, vm_name)
            session = vm.wait_for_login()
            if test_suite:
                run_test_suite_in_guest(session)
            else:
                test_guest_tpm(expect_version, session, expect_fail)
            session.close()
            if multi_vms:
                reuse_by_vm2(tpm_dev)
                if backend_type != "passthrough":
                    #emulator backend
                    check_dumpxml(vm2_name)
                    domid = vm2.get_id()
                    domuuid = vm2.get_uuid()
                    check_qemu_cmd_line(vm2, vm2_name, domid)
                    check_swtpm(domid, domuuid, vm2_name)
                    session = vm2.wait_for_login()
                    test_guest_tpm(backend_version, session, expect_fail)
                    session.close()

    finally:
        # Remove renamed domain if it exists
        if new_name:
            virsh.remove_domain(new_name, "--nvram", debug=True)
        if os.path.exists("/var/log/swtpm/libvirt/qemu/%s-swtpm.log" %
                          new_name):
            os.remove("/var/log/swtpm/libvirt/qemu/%s-swtpm.log" % new_name)
        # Remove snapshot if exists
        if vm_operate == "snapshot":
            snapshot_lists = virsh.snapshot_list(vm_name)
            if len(snapshot_lists) > 0:
                libvirt.clean_up_snapshots(vm_name, snapshot_lists)
                for snap in snapshot_lists:
                    virsh.snapshot_delete(vm_name, snap, "--metadata")
                if os.path.exists("/tmp/testvm_sp1"):
                    os.remove("/tmp/testvm_sp1")
        # Clear guest os
        if test_suite:
            session = vm.wait_for_login()
            logging.info("Removing dir /root/linux-*")
            output = session.cmd_output("rm -rf /root/linux-*")
            logging.debug("Command output:\n %s", output)
            session.close()
        if vm_operate == "create":
            vm.define(vm_xml.xml)
        vm_xml_backup.sync(options="--nvram --managed-save")
        # Remove swtpm log file in case of impact on later runs
        if os.path.exists("/var/log/swtpm/libvirt/qemu/%s-swtpm.log" %
                          vm.name):
            os.remove("/var/log/swtpm/libvirt/qemu/%s-swtpm.log" % vm.name)
        for sec_uuid in set(sec_uuids):
            virsh.secret_undefine(sec_uuid, ignore_status=True, debug=True)
        if vm2:
            if len(vm_names) > 1:
                vm2_xml_backup.sync(options="--nvram")
            else:
                virsh.remove_domain(vm2_name,
                                    "--nvram --remove-all-storage",
                                    debug=True)
            if os.path.exists("/var/log/swtpm/libvirt/qemu/%s-swtpm.log" %
                              vm2.name):
                os.remove("/var/log/swtpm/libvirt/qemu/%s-swtpm.log" %
                          vm2.name)
Пример #49
0
def run(test, params, env):
    """
    Test command: virsh list.

    1) Filt parameters according libvirtd's version
    2) Prepare domain's exist state:transient,managed-save.
    3) Prepare libvirt's status.
    4) Execute list command.
    5) Result check.
    """
    def list_local_domains_on_remote(options_ref, remote_ip, remote_passwd, local_ip):
        """
        Create a virsh list command and execute it on remote host.
        It will list local domains on remote host.

        :param options_ref:options in virsh list command.
        :param remote_ip:remote host's ip.
        :param remote_passwd:remote host's password.
        :param local_ip:local ip, to create uri in virsh list.
        :return:return status and output of the virsh list command.
        """
        complete_uri = libvirt_vm.complete_uri(local_ip)
        command_on_remote = "virsh -c %s list %s" % (complete_uri, options_ref)
        session = remote.remote_login(
            "ssh", remote_ip, "22", "root", remote_passwd, "#")
        time.sleep(5)
        status, output = session.cmd_status_output(
            command_on_remote, internal_timeout=5)
        time.sleep(5)
        session.close()
        return int(status), output

    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)

    options_ref = params.get("list_options_ref", "")
    list_ref = params.get("list_type_ref", "")
    vm_ref = params.get("vm_ref", "")

    # Some parameters are not supported on old libvirt, skip them.
    help_info = virsh.command("help list").stdout.strip()
    if vm_ref and not re.search(vm_ref, help_info):
        raise error.TestNAError("This version do not support vm type:%s"
                                % vm_ref)
    if list_ref and not re.search(list_ref, help_info):
        raise error.TestNAError("This version do not support list type:%s"
                                % list_ref)

    status_error = params.get("status_error", "no")
    addition_status_error = params.get("addition_status_error", "no")
    domuuid = vm.get_uuid().strip()
    # If a transient domain is destroyed, it will disappear.
    if vm_ref == "transient" and options_ref == "inactive":
        logging.info("Set addition_status_error to yes")
        logging.info(
            "because transient domain will disappear after destroyed.")
        addition_status_error = "yes"

    if vm_ref == "transient":
        tmp_xml = vm.backup_xml()
        vm.undefine()
    elif vm_ref == "managed-save":
        virsh.managedsave(vm_name, ignore_status=True, print_info=True)

    # Prepare libvirtd status
    libvirtd = params.get("libvirtd", "on")
    if libvirtd == "off":
        utils_libvirtd.libvirtd_stop()

    # run test case
    if list_ref == "--uuid":
        result_expected = domuuid
        logging.info("%s's uuid is: %s", vm_name, domuuid)
    else:
        result_expected = vm_name
        logging.info("domain's name is: %s", vm_name)

    if options_ref == "vm_id":
        domid = vm.get_id().strip()
        logging.info("%s's running-id is: %s", vm_name, domid)
        options_ref = "%s %s" % (domid, list_ref)
    elif options_ref == "vm_uuid":
        logging.info("%s's uuid is: %s", vm_name, domuuid)
        options_ref = "%s %s" % (domuuid, list_ref)
    elif options_ref == "inactive":
        vm.destroy()
        options_ref = "--inactive %s" % list_ref
    elif options_ref == "vm_name":
        options_ref = "%s %s" % (vm_name, list_ref)
    elif options_ref == "all":
        options_ref = "--all %s" % list_ref
    elif options_ref == "":
        options_ref = "%s" % list_ref

    remote_ref = params.get("remote_ref", "local")
    if remote_ref == "remote":
        remote_ip = params.get("remote_ip", "REMOTE.EXAMPLE.COM")
        remote_pwd = params.get("remote_pwd", None)
        local_ip = params.get("local_ip", "LOCAL.EXAMPLE.COM")
        if remote_ip.count("EXAMPLE.COM") or local_ip.count("EXAMPLE.COM"):
            raise error.TestNAError(
                "Remote test parameters unchanged from default")
        logging.info("Execute virsh command on remote host %s.", remote_ip)
        status, output = list_local_domains_on_remote(
            options_ref, remote_ip, remote_pwd, local_ip)
        logging.info("Status:%s", status)
        logging.info("Output:\n%s", output)
    else:
        if vm_ref:
            options_ref = "%s --%s" % (options_ref, vm_ref)
        result = virsh.dom_list(
            options_ref, ignore_status=True, print_info=True)
        status = result.exit_status
        output = result.stdout.strip()

    # Recover libvirtd service status
    if libvirtd == "off":
        utils_libvirtd.libvirtd_start()

    # Recover of domain
    if vm_ref == "transient":
        vm.define(tmp_xml)
    elif vm_ref == "managed-save":
        # Recover saved guest.
        virsh.managedsave_remove(vm_name, ignore_status=True, print_info=True)

    # Check result
    status_error = (status_error == "no") and (addition_status_error == "no")
    if vm_ref == "managed-save":
        saved_output = re.search(vm_name + "\s+saved", output)
        if saved_output:
            output = saved_output.group(0)
        else:
            output = ""

    if not status_error:
        if status == 0 and re.search(result_expected, output):
            raise error.TestFail("Run successful with wrong command!")
    else:
        if status != 0:
            raise error.TestFail("Run failed with right command.")
        if not re.search(result_expected, output):
            raise error.TestFail("Run successful but result is not expected.")
def manipulate_domain(vm_name, vm_operation, recover=False):
    """
    Operate domain to given state or recover it.
    """
    save_file = os.path.join(data_dir.get_tmp_dir(), vm_name + ".save")
    if not recover:
        if vm_operation == "save":
            save_option = ""
            result = virsh.save(vm_name,
                                save_file,
                                save_option,
                                ignore_status=True,
                                debug=True)
            libvirt.check_exit_status(result)
        elif vm_operation == "managedsave":
            managedsave_option = ""
            result = virsh.managedsave(vm_name,
                                       managedsave_option,
                                       ignore_status=True,
                                       debug=True)
            libvirt.check_exit_status(result)
        elif vm_operation == "s3":
            suspend_target = "mem"
            result = virsh.dompmsuspend(vm_name,
                                        suspend_target,
                                        ignore_status=True,
                                        debug=True)
            libvirt.check_exit_status(result)
        elif vm_operation == "s4":
            suspend_target = "disk"
            result = virsh.dompmsuspend(vm_name,
                                        suspend_target,
                                        ignore_status=True,
                                        debug=True)
            libvirt.check_exit_status(result)
            # Wait domain state change: 'in shutdown' -> 'shut off'
            utils_misc.wait_for(lambda: virsh.is_dead(vm_name), 5)
        elif vm_operation == "suspend":
            result = virsh.suspend(vm_name, ignore_status=True, debug=True)
            libvirt.check_exit_status(result)
        else:
            logging.debug("No operation for the domain")

    else:
        if vm_operation == "save":
            if os.path.exists(save_file):
                result = virsh.restore(save_file,
                                       ignore_status=True,
                                       debug=True)
                libvirt.check_exit_status(result)
                os.remove(save_file)
            else:
                raise error.TestError("No save file for domain restore")
        elif vm_operation in ["managedsave", "s4"]:
            result = virsh.start(vm_name, ignore_status=True, debug=True)
            libvirt.check_exit_status(result)
        elif vm_operation == "s3":
            suspend_target = "mem"
            result = virsh.dompmwakeup(vm_name, ignore_status=True, debug=True)
            libvirt.check_exit_status(result)
        elif vm_operation == "suspend":
            result = virsh.resume(vm_name, ignore_status=True, debug=True)
            libvirt.check_exit_status(result)
        else:
            logging.debug("No need recover the domain")