Пример #1
0
def reset_domain(vm, vm_state, needs_agent=False):
    """
    Set domain vcpu number to 4 and current vcpu as 1

    :param vm: the vm object
    :param vm_state: the given vm state string "shut off" or "running"
    """
    if vm.is_alive():
        vm.destroy()
    vm_xml = libvirt_xml.VMXML()
    vm_xml.set_vm_vcpus(vm.name, 4, 1)
    if needs_agent:
        logging.debug("Attempting to set guest agent channel")
        vm_xml.set_agent_channel(vm.name)
    if not vm_state == "shut off":
        vm.start()
        session = vm.wait_for_login()
        if needs_agent:
            # Check if qemu-ga already started automatically
            cmd = "rpm -q qemu-guest-agent || yum install -y qemu-guest-agent"
            stat_install = session.cmd_status(cmd, 300)
            if stat_install != 0:
                raise error.TestFail("Fail to install qemu-guest-agent, make "
                                     "sure that you have usable repo in guest")

            # Check if qemu-ga already started
            stat_ps = session.cmd_status("ps aux |grep [q]emu-ga")
            if stat_ps != 0:
                session.cmd("qemu-ga -d")
                # Check if the qemu-ga really started
                stat_ps = session.cmd_status("ps aux |grep [q]emu-ga")
                if stat_ps != 0:
                    raise error.TestFail("Fail to run qemu-ga in guest")
Пример #2
0
def check_numatune_xml(params):
    """
    Compare mode and nodeset value with guest XML configuration
    @params: the parameter dictionary
    """
    vm_name = params.get("vms")
    mode = params.get("numa_mode", "")
    nodeset = params.get("numa_nodeset", "")

    virt_xml_obj = libvirt_xml.VMXML(virsh_instance=virsh)

    numa_params = virt_xml_obj.get_numa_params(vm_name)
    if not numa_params:
        logging.error("Could not get numa parameters for %s" % vm_name)
        return False

    mode_from_xml = numa_params['mode']
    nodeset_from_xml = numa_params['nodeset']

    if mode and mode != mode_from_xml:
        logging.error("To expect %s: %s", mode, mode_from_xml)
        return False

    # The actual nodeset value is different with guest XML configuration,
    # so need to compare them via a middle result, for example, if you
    # set nodeset is '0,1,2' then it will be a list '0-2' in guest XML
    nodeset = nodeset_parser(nodeset)
    nodeset_from_xml = nodeset_parser(nodeset_from_xml)

    if nodeset and nodeset != nodeset_from_xml:
        logging.error("To expect %s: %s", nodeset, nodeset_from_xml)
        return False

    return True
Пример #3
0
    def do_rename(vm, new_name, uuid=None):
        if vm.is_alive():
            vm.destroy(gracefully=True)

        vmxml = libvirt_xml.VMXML()
        vmxml.new_from_dumpxml(vm.name)
        backup = vmxml.copy()
        # can't do in-place rename, must operate on XML
        try:
            vmxml.undefine()
            # All failures trip a single exception
        except libvirt_xml.LibvirtXMLError, detail:
            del vmxml  # clean up temporary files
            raise error.TestFail("Error reported while undefining VM:" +
                                 detail)
Пример #4
0
def run(test, params, env):
    """
    Test command: virsh setvcpus.

    The conmand can change the number of virtual CPUs in the guest domain.
    1.Prepare test environment,destroy or suspend a VM.
    2.Perform virsh setvcpus operation.
    3.Recover test environment.
    4.Confirm the test result.
    """

    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    xml_file = params.get("setvcpus_xml_file", "vm.xml")
    virsh.dumpxml(vm_name, extra="--inactive", to_file=xml_file)
    tmp_file = params.get("setvcpus_tmp_file", "tmp.xml")
    pre_vm_state = params.get("setvcpus_pre_vm_state")
    command = params.get("setvcpus_command", "setvcpus")
    options = params.get("setvcpus_options")
    domain = params.get("setvcpus_domain")
    count = params.get("setvcpus_count")
    extra_param = params.get("setvcpus_extra_param")
    count_option = "%s %s" % (count, extra_param)
    status_error = params.get("status_error")

    def get_current_vcpus():
        """
        Get current vcpu number.
        """
        vcpus_set = ""
        virsh.dumpxml(vm_name, extra="", to_file=tmp_file)
        dom = parse(tmp_file)
        root = dom.documentElement
        vcpus_2 = root.getElementsByTagName("vcpu")
        for n in vcpus_2:
            vcpus_set += n.getAttribute("current")
            vcpus_set = int(vcpus_set)
        dom.unlink()
        return vcpus_set

    if vm.is_alive():
        vm.destroy()
    vm_xml = libvirt_xml.VMXML()
    vm_xml.set_vm_vcpus(vm_name, 2)
    vm.start()
    vm.wait_for_login()

    if status_error == "no":
        vcpus_new = len(vm.vcpuinfo())
    domid = vm.get_id()
    domuuid = vm.get_uuid()
    if pre_vm_state == "paused":
        vm.pause()
    elif pre_vm_state == "shut off":
        vm.destroy()

    if domain == "remote_name":
        remote_ssh_addr = params.get("remote_ip", None)
        remote_addr = params.get("local_ip", None)
        remote_password = params.get("remote_password", None)
        host_type = virsh.driver()
        if host_type == "qemu":
            remote_string = "qemu+ssh://%s/system" % remote_addr
        elif host_type == "xen":
            remote_string = "xen+ssh://%s" % remote_addr
        command = "virsh -c %s setvcpus %s 1 --live" % (remote_string, vm_name)
        if virsh.has_command_help_match(command, "--live") is None:
            status_error = "yes"
        session = remote.remote_login("ssh", remote_ssh_addr, "22", "root",
                                      remote_password, "#")
        session.cmd_output('LANG=C')
        status, output = session.cmd_status_output(command, internal_timeout=5)
        session.close()
        vcpus_current = len(vm.vcpuinfo())
    else:
        if domain == "name":
            dom_option = vm_name
        elif domain == "id":
            dom_option = domid
            if params.get("setvcpus_hex_id") is not None:
                dom_option = hex(int(domid))
            elif params.get("setvcpus_invalid_id") is not None:
                dom_option = params.get("setvcpus_invalid_id")
        elif domain == "uuid":
            dom_option = domuuid
            if params.get("setvcpus_invalid_uuid") is not None:
                dom_option = params.get("setvcpus_invalid_uuid")
        else:
            dom_option = domain
        option_list = options.split(" ")
        for item in option_list:
            if virsh.has_command_help_match(command, item) is None:
                status_error = "yes"
                break
        status = virsh.setvcpus(dom_option,
                                count_option,
                                options,
                                ignore_status=True).exit_status
        if pre_vm_state == "paused":
            virsh.resume(vm_name, ignore_status=True)
        if status_error == "no":
            if status == 0:
                if pre_vm_state == "shut off":
                    if options == "--config":
                        vcpus_set = len(vm.vcpuinfo())
                    elif options == "--current":
                        vcpus_set = get_current_vcpus()
                    elif options == "--maximum --config":
                        vcpus_set = ""
                        dom = parse("/etc/libvirt/qemu/%s.xml" % vm_name)
                        vcpus_set = dom.getElementsByTagName(
                            "vcpu")[0].firstChild.data
                        vcpus_set = int(vcpus_set)
                        dom.unlink()
                else:
                    vcpus_set = len(vm.vcpuinfo())
                if domain == "id":
                    cmd_chk = "cat /etc/libvirt/qemu/%s.xml" % vm_name
                    output1 = commands.getoutput(cmd_chk)
                    logging.info("guest-info:\n%s" % output1)

    virsh.destroy(vm_name)
    virsh.undefine(vm_name)
    virsh.define(xml_file)
    if os.path.exists(xml_file):
        os.remove(xml_file)
    if os.path.exists(tmp_file):
        os.remove(tmp_file)

    # check status_error
    if status_error == "yes":
        if status == 0:
            raise error.TestFail("Run successfully with wrong command!")
    else:
        if status != 0:
            raise error.TestFail("Run failed with right command")
        else:
            if options == "--maximum --config":
                if vcpus_set != 4:
                    raise error.TestFail("Run failed with right command1")
            elif domain == "id":
                if options == "--config":
                    if vcpus_set != vcpus_new or not re.search(
                            '<vcpu current=\'1\'>%s</vcpu>' % vcpus_new,
                            output1):
                        raise error.TestFail("Run failed with right command2")
                elif options == "--config --live":
                    if vcpus_set != 1 or not re.search(
                            '<vcpu current=\'1\'>%s</vcpu>' % vcpus_new,
                            output1):
                        raise error.TestFail("Run failed with right command3")
                else:
                    if vcpus_set != 1 or re.search(
                            '<vcpu current=\'1\'>%s</vcpu>' % vcpus_new,
                            output1):
                        raise error.TestFail("Run failed with right command4")
            else:
                if vcpus_set != 1:
                    raise error.TestFail("Run failed with right command5")
Пример #5
0
def run(test, params, env):
    """
    Test command: virsh start.

    1) Get the params from params.
    2) Prepare libvirtd's status.
    3) Do the start operation.
    4) Result check.
    5) clean up.
    """
    # get the params from params
    vm_name = params.get("main_vm", "avocado-vt-vm1")
    vm_ref = params.get("vm_ref", "vm1")
    opt = params.get("vs_opt", "")

    # Backup for recovery.
    vmxml_backup = libvirt_xml.vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)

    backup_name = vm_ref
    vm = None
    if vm_ref is not "":
        vm = env.get_vm(vm_ref)
    vmxml = libvirt_xml.VMXML()

    libvirtd_state = params.get("libvirtd", "on")
    pre_operation = params.get("vs_pre_operation", "")
    status_error = params.get("status_error", "no")

    try:
        # prepare before start vm
        if libvirtd_state == "on":
            utils_libvirtd.libvirtd_start()
        elif libvirtd_state == "off":
            utils_libvirtd.libvirtd_stop()

        if pre_operation == "rename":
            new_vm_name = params.get("vs_new_vm_name", "virsh_start_vm1")
            vm = libvirt_xml.VMXML.vm_rename(vm, new_vm_name)
            vm_ref = new_vm_name
        elif pre_operation == "undefine":
            vmxml = vmxml.new_from_dumpxml(vm_ref)
            vmxml.undefine()

        # do the start operation
        try:
            if pre_operation == "remote":
                # get the params for remote test
                remote_ip = params.get("remote_ip", "ENTER.YOUR.REMOTE.IP")
                remote_user = params.get("remote_user", "root")
                remote_pwd = params.get("remote_pwd",
                                        "ENTER.YOUR.REMOTE.PASSWORD")
                if pre_operation == "remote" and remote_ip.count(
                        "ENTER.YOUR."):
                    test.cancel("Remote test parameters not configured")

                ssh_key.setup_ssh_key(remote_ip, remote_user, remote_pwd)
                remote_uri = "qemu+ssh://%s/system" % remote_ip
                cmd_result = virsh.start(vm_ref,
                                         ignore_status=True,
                                         debug=True,
                                         uri=remote_uri)
                if cmd_result.exit_status:
                    test.fail("Start vm failed.\n Detail: %s" % cmd_result)
            elif opt.count("console"):
                # With --console, start command will print the
                # dmesg of guest in starting and turn into the
                # login prompt. In this case, we start it with
                # --console and login vm in console by
                # remote.handle_prompts().
                cmd = "start %s --console" % vm_ref
                virsh_session = virsh.VirshSession(virsh_exec=virsh.VIRSH_EXEC,
                                                   auto_close=True)
                virsh_session.sendline(cmd)
                remote.handle_prompts(virsh_session,
                                      params.get("username", ""),
                                      params.get("password", ""),
                                      r"[\#\$]\s*$",
                                      timeout=60,
                                      debug=True)
            elif opt.count("autodestroy"):
                # With --autodestroy, vm will be destroyed when
                # virsh session closed. Then we execute start
                # command in a virsh session and start vm with
                # --autodestroy. Then we closed the virsh session,
                # and check the vm is destroyed or not.
                virsh_session = virsh.VirshSession(virsh_exec=virsh.VIRSH_EXEC,
                                                   auto_close=True)
                cmd = "start %s --autodestroy" % vm_ref
                status = virsh_session.cmd_status(cmd)
                if status:
                    test.fail("Failed to start vm with --autodestroy.")
                # Close the session, then the vm shoud be destroyed.
                virsh_session.close()
            elif opt.count("force-boot"):
                # With --force-boot, VM will be stared from boot
                # even we have saved it with virsh managedsave.
                # In this case, we start vm and execute sleep 1000&,
                # then save it with virsh managedsave. At last, we
                # start vm with --force-boot. To verify the result,
                # we check the sleep process. If the process exists,
                # force-boot failed, else case pass.
                vm.start()
                session = vm.wait_for_login()
                status = session.cmd_status("sleep 1000&")
                if status:
                    test.error("Can not execute command in guest.")
                sleep_pid = session.cmd_output("echo $!").strip()
                virsh.managedsave(vm_ref)
                virsh.start(vm_ref, options=opt)
            else:
                cmd_result = virsh.start(vm_ref, options=opt)
                if cmd_result.exit_status:
                    if status_error == "no":
                        test.fail("Start vm failed.\n Detail: %s" % cmd_result)
                else:
                    # start vm successfully
                    if status_error == "yes":
                        test.fail("Run successfully with wrong "
                                  "command!\n Detail:%s" % cmd_result)

            if opt.count("paused"):
                if not (vm.state() == "paused"):
                    test.fail("VM is not paused when started with "
                              "--paused.")
            elif opt.count("autodestroy"):
                if vm.is_alive():
                    test.fail("VM was started with --autodestroy,"
                              "but not destroyed when virsh session "
                              "closed.")
            elif opt.count("force-boot"):
                session = vm.wait_for_login()
                status = session.cmd_status("ps %s |grep '[s]leep 1000'" %
                                            sleep_pid)
                if not status:
                    test.fail("VM was started with --force-boot,"
                              "but it is restored from a"
                              " managedsave.")
            else:
                if status_error == "no" and not vm.is_alive(
                ) and pre_operation != "remote":
                    test.fail("VM was started but it is not alive.")

        except remote.LoginError as detail:
            test.fail("Failed to login guest.")
    finally:
        # clean up
        if libvirtd_state == "off":
            utils_libvirtd.libvirtd_start()

        elif pre_operation == "rename":
            libvirt_xml.VMXML.vm_rename(vm, backup_name)
        elif pre_operation == "remote":
            virsh.destroy(vm_ref,
                          ignore_status=False,
                          debug=True,
                          uri=remote_uri)

        if vm and vm.is_paused():
            vm.resume()

        # Restore VM
        vmxml_backup.sync()
Пример #6
0
def run_virsh_snapshot_create_as(test, params, env):
    """
    Test snapshot-create-as command
    Make sure that the clean repo can be used because qemu-guest-agent need to
    be installed in guest

    The command create a snapshot (disk and RAM) from arguments which including
    the following point
    * virsh snapshot-create-as --print-xml --diskspec --name --description
    * virsh snapshot-create-as --print-xml with multi --diskspec
    * virsh snapshot-create-as --print-xml --memspec
    * virsh snapshot-create-as --description
    * virsh snapshot-create-as --no-metadata
    * virsh snapshot-create-as --no-metadata --print-xml (negtive test)
    * virsh snapshot-create-as --atomic --disk-only
    * virsh snapshot-create-as --quiesce --disk-only (positive and negtive)
    * virsh snapshot-create-as --reuse-external
    * virsh snapshot-create-as --disk-only --diskspec
    * virsh snapshot-create-as --memspec --reuse-external --atomic(negtive)
    * virsh snapshot-create-as --disk-only and --memspec (negtive)
    * Create multi snapshots with snapshot-create-as
    * Create snapshot with name a--a a--a--snap1
    """

    if not virsh.has_help_command('snapshot-create-as'):
        raise error.TestNAError("This version of libvirt does not support "
                                "the snapshot-create-as test")

    vm_name = params.get("main_vm")
    status_error = params.get("status_error", "no")
    options = params.get("snap_createas_opts")
    multi_num = params.get("multi_num", "1")
    diskspec_num = params.get("diskspec_num", "1")
    bad_disk = params.get("bad_disk")
    external_disk = params.get("external_disk")
    start_ga =  params.get("start_ga", "yes")
    domain_state = params.get("domain_state")
    memspec_opts = params.get("memspec_opts")
    diskspec_opts = params.get("diskspec_opts")

    opt_names = locals()
    if memspec_opts is not None:
        mem_options = compose_disk_options(test, params, memspec_opts)
        # if the parameters have the disk without "file=" then we only need to
        # add testdir for it.
        if mem_options is None:
            mem_options = os.path.join(test.virtdir, memspec_opts)
        options += " --memspec " + mem_options

    tag_diskspec = 0
    dnum = int(diskspec_num)
    if diskspec_opts is not None:
        tag_diskspec = 1
        opt_names['diskopts_1'] = diskspec_opts

    # diskspec_opts[n] is used in cfg when more than 1 --diskspec is used
    if dnum > 1:
        tag_diskspec = 1
        for i in range(1, dnum + 1):
            opt_names["diskopts_%s" % i] = params.get("diskspec_opts%s" % i)

    if tag_diskspec == 1:
        for i in range(1, dnum + 1):
            disk_options = compose_disk_options(test, params,
                                                opt_names["diskopts_%s" % i])
            options += " --diskspec " + disk_options

    logging.debug("options are %s", options)

    vm = env.get_vm(vm_name)
    option_dict = {}
    option_dict = utils_misc.valued_option_dict(options, r' --(?!-)')
    logging.debug("option_dict is %s", option_dict)

    # A backup of original vm
    vmxml_backup = vm_xml.VMXML.new_from_dumpxml(vm_name)
    logging.debug("original xml is %s", vmxml_backup)

    # Generate empty image for negtive test
    if bad_disk is not None:
        bad_disk = os.path.join(test.virtdir, bad_disk)
        os.open(bad_disk, os.O_RDWR | os.O_CREAT)

    # Gererate external disk
    if external_disk is not None:
        external_disk = os.path.join(test.virtdir, external_disk)
        commands.getoutput("qemu-img create -f qcow2 %s 1G" % external_disk)

    # Start qemu-ga on guest if have --quiesce
    if options.find("quiesce") >= 0:
        if vm.is_alive():
            vm.destroy()
        virt_xml_obj = libvirt_xml.VMXML(virsh_instance=virsh)
        virt_xml_obj.set_agent_channel(vm_name)
        vm.start()
        if start_ga == "yes":
            session = vm.wait_for_login()

            # Check if qemu-ga already started automatically
            cmd = "rpm -q qemu-guest-agent || yum install -y qemu-guest-agent"
            stat_install = session.cmd_status(cmd, 300)
            if stat_install != 0:
                xml_recover(vmxml_backup)
                raise error.TestFail("Fail to install qemu-guest-agent, make"
                                     "sure that you have usable repo in guest")

            # Check if qemu-ga already started
            stat_ps = session.cmd_status("ps aux |grep [q]emu-ga")
            if stat_ps != 0:
                session.cmd("qemu-ga -d")
                # Check if the qemu-ga really started
                stat_ps = session.cmd_status("ps aux |grep [q]emu-ga")
                if stat_ps != 0:
                    xml_recover(vmxml_backup)
                    raise error.TestFail("Fail to run qemu-ga in guest")

        if domain_state == "paused":
            virsh.suspend(vm_name)

    # Record the previous snapshot-list
    snaps_before = virsh.snapshot_list(vm_name)

    # Run virsh command
    # May create several snapshots, according to configuration
    for count in range(int(multi_num)):
        cmd_result = virsh.snapshot_create_as(vm_name, options,
                                              ignore_status=True, debug=True)
        output = cmd_result.stdout.strip()
        status = cmd_result.exit_status

        # check status_error
        if status_error == "yes":
            if status == 0:
                xml_recover(vmxml_backup)
                raise error.TestFail("Run successfully with wrong command!")
            else:
                # Check memspec file should be removed if failed
                if (options.find("memspec") >= 0
                    and options.find("atomic") >= 0):
                    if os.path.isfile(option_dict['memspec']):
                        os.remove(option_dict['memspec'])
                        xml_recover(vmxml_backup)
                        raise error.TestFail("Run failed but file %s exist"
                                             % option_dict['memspec'])
                    else:
                        logging.info("Run failed as expected and memspec file"
                                     " already beed removed")
                else:
                    logging.info("Run failed as expected")

        elif status_error == "no":
            if status != 0:
                xml_recover(vmxml_backup)
                raise error.TestFail("Run failed with right command: %s"
                                     % output)
            else:
                # Check the special options
                snaps_list = virsh.snapshot_list(vm_name)
                logging.debug("snaps_list is %s", snaps_list)

                no_metadata = options.find("--no-metadata")
                fdisks = "disks"

                # command with print-xml will not really create snapshot
                if options.find("print-xml") >= 0:
                    xtf = xml_utils.XMLTreeFile(output)

                    # With --print-xml there isn't new snapshot created
                    if len(snaps_before) != len(snaps_list):
                        xml_recover(vmxml_backup)
                        raise error.TestFail("--print-xml create new snapshot")

                else:
                    # The following does not check with print-xml
                    get_sname = output.split()[2]

                    # check domain/snapshot xml depends on if have metadata
                    if no_metadata < 0:
                        output_dump = virsh.snapshot_dumpxml(vm_name, get_sname)
                    else:
                        output_dump = virsh.dumpxml(vm_name)
                        fdisks = "devices"

                    xtf = xml_utils.XMLTreeFile(output_dump)

                    find = 0
                    for snap in snaps_list:
                        if snap == get_sname:
                            find = 1
                            break

                    # Should find snap in snaplist without --no-metadata
                    if (find == 0 and no_metadata < 0):
                        xml_recover(vmxml_backup)
                        raise error.TestFail("Can not find snapshot %s!"
                                             % get_sname)
                    # Should not find snap in list without metadata
                    elif (find == 1 and no_metadata >= 0):
                        xml_recover(vmxml_backup)
                        raise error.TestFail("Can find snapshot metadata even "
                                             "if have --no-metadata")
                    elif (find == 0 and no_metadata >= 0):
                        logging.info("Can not find snapshot %s as no-metadata "
                                     "is given" % get_sname)

                        # Check snapshot only in qemu-img
                        if (options.find("--disk-only") < 0
                            and options.find("--memspec") < 0):
                            ret = check_snap_in_image(vm_name, get_sname)

                            if ret == False:
                                xml_recover(vmxml_backup)
                                raise error.TestFail("No snap info in image")

                    else:
                        logging.info("Find snapshot %s in snapshot list."
                                     % get_sname)

                    # Check if the disk file exist when disk-only is given
                    if options.find("disk-only") >= 0:
                        for disk in xtf.find(fdisks).findall('disk'):
                            diskpath = disk.find('source').get('file')
                            if os.path.isfile(diskpath):
                                logging.info("disk file %s exist" % diskpath)
                                os.remove(diskpath)
                            else:
                                xml_recover(vmxml_backup)
                                raise error.TestFail("Can not find disk %s"
                                                     % diskpath)

                    # Check if the guest is halted when 'halt' is given
                    if options.find("halt") >= 0:
                        domstate = virsh.domstate(vm_name)
                        if re.match("shut off", domstate.stdout):
                            logging.info("Domain is halted after create "
                                         "snapshot")
                        else:
                            xml_recover(vmxml_backup)
                            raise error.TestFail("Domain is not halted after "
                                                 "snapshot created")



                # Check the snapshot xml regardless of having print-xml or not
                if (options.find("name") >= 0 and no_metadata < 0):
                    if xtf.findtext('name') == option_dict["name"]:
                        logging.info("get snapshot name same as set")
                    else:
                        xml_recover(vmxml_backup)
                        raise error.TestFail("Get wrong snapshot name %s" %
                                             xtf.findtext('name'))

                if (options.find("description") >= 0 and no_metadata < 0):
                    desc = xtf.findtext('description')
                    if desc == option_dict["description"]:
                        logging.info("get snapshot description same as set")
                    else:
                        xml_recover(vmxml_backup)
                        raise error.TestFail("Get wrong description on xml")

                if options.find("diskspec") >= 0:
                    if isinstance(option_dict['diskspec'], list):
                        index = len(option_dict['diskspec'])
                    else:
                        index = 1

                    disks = xtf.find(fdisks).findall('disk')

                    for num in range(index):
                        if isinstance(option_dict['diskspec'], list):
                            option_disk = option_dict['diskspec'][num]
                        else:
                            option_disk = option_dict['diskspec']

                        option_disk = "name=" + option_disk
                        disk_dict = utils_misc.valued_option_dict(option_disk,
                                                                  ",", 0, "=")
                        logging.debug("disk_dict is %s", disk_dict)

                        # For no metadata snapshot do not check name and
                        # snapshot
                        if no_metadata < 0:
                            dname = disks[num].get('name')
                            logging.debug("dname is %s", dname)
                            if dname == disk_dict['name']:
                                logging.info("get disk%d name same as set in "
                                             "diskspec", num)
                            else:
                                xml_recover(vmxml_backup)
                                raise error.TestFail("Get wrong disk%d name %s"
                                                      % num, dname)

                            if option_disk.find('snapshot=') >= 0:
                                dsnap = disks[num].get('snapshot')
                                logging.debug("dsnap is %s", dsnap)
                                if dsnap == disk_dict['snapshot']:
                                    logging.info("get disk%d snapshot type same"
                                                 " as set in diskspec", num)
                                else:
                                    xml_recover(vmxml_backup)
                                    raise error.TestFail("Get wrong disk%d "
                                                         "snapshot type %s" %
                                                          num, dsnap)

                        if option_disk.find('driver=') >= 0:
                            dtype = disks[num].find('driver').get('type')
                            if dtype == disk_dict['driver']:
                                logging.info("get disk%d driver type same as "
                                             "set in diskspec", num)
                            else:
                                xml_recover(vmxml_backup)
                                raise error.TestFail("Get wrong disk%d driver "
                                                     "type %s" % num, dtype)

                        if option_disk.find('file=') >=0:
                            sfile = disks[num].find('source').get('file')
                            if sfile == disk_dict['file']:
                                logging.info("get disk%d source file same as "
                                             "set in diskspec", num)
                            else:
                                xml_recover(vmxml_backup)
                                raise error.TestFail("Get wrong disk%d source "
                                                     "file %s" % num, sfile)


                # For memspec check if the xml is same as setting
                # Also check if the mem file exists
                if options.find("memspec") >= 0:
                    memspec = option_dict['memspec']
                    if re.search('file=', option_dict['memspec']) < 0:
                        memspec = 'file=' + option_dict['memspec']

                    mem_dict = utils_misc.valued_option_dict(memspec, ",", 0,
                                                             "=")
                    logging.debug("mem_dict is %s", mem_dict)

                    if no_metadata < 0:
                        if memspec.find('snapshot=') >= 0:
                            snap = xtf.find('memory').get('snapshot')
                            if snap == mem_dict['snapshot']:
                                logging.info("get memory snapshot type same as"
                                             " set in diskspec")
                            else:
                                xml_recover(vmxml_backup)
                                raise error.TestFail("Get wrong memory snapshot"
                                                     " type on print xml")

                        memfile = xtf.find('memory').get('file')
                        if memfile == mem_dict['file']:
                            logging.info("get memory file same as set in "
                                         "diskspec")
                        else:
                            xml_recover(vmxml_backup)
                            raise error.TestFail("Get wrong memory file on "
                                                 "print xml %s", memfile)

                    if options.find("print-xml") < 0:
                        if os.path.isfile(mem_dict['file']):
                            logging.info("memory file generated")
                            os.remove(mem_dict['file'])
                        else:
                            xml_recover(vmxml_backup)
                            raise error.TestFail("Fail to generate memory file"
                                                 " %s", mem_dict['file'])

    # Environment clean
    if options.find("quiesce") >= 0 and start_ga == "yes":
        session.cmd("rpm -e qemu-guest-agent")

    # recover domain xml
    xml_recover(vmxml_backup)
    path = "/var/lib/libvirt/qemu/snapshot/" + vm_name
    if os.path.isfile(path):
        raise error.TestFail("Still can find snapshot metadata")

    # rm bad disks
    if bad_disk is not None:
        os.remove(bad_disk)
Пример #7
0
def run(test, params, env):
    """
    Test command: virsh start.

    1) Get the params from params.
    2) Prepare libvirtd's status.
    3) Do the start operation.
    4) Result check.
    5) clean up.
    """
    # get the params from params
    vm_name = params.get("main_vm", "virt-tests-vm1")
    vm_ref = params.get("vm_ref", "vm1")

    # Backup for recovery.
    vmxml_backup = libvirt_xml.vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)

    backup_name = vm_ref
    if vm_ref is not "":
        vm = env.get_vm(vm_ref)
    vmxml = libvirt_xml.VMXML()

    libvirtd_state = params.get("libvirtd", "on")
    pre_operation = params.get("vs_pre_operation", "")
    status_error = params.get("status_error", "no")

    # get the params for remote test
    remote_ip = params.get("remote_ip", "ENTER.YOUR.REMOTE.IP")
    remote_password = params.get("remote_password",
                                 "ENTER.YOUR.REMOTE.PASSWORD")
    local_ip = params.get("local_ip", "ENTER.YOUR.LOCAL.IP")
    if pre_operation == "remote" and (remote_ip.count("ENTER.YOUR.")
                                      or local_ip.count("ENTER.YOUR.")):
        raise error.TestNAError("Remote test parameters not configured")

    try:
        # prepare before start vm
        if libvirtd_state == "on":
            utils_libvirtd.libvirtd_start()
        elif libvirtd_state == "off":
            utils_libvirtd.libvirtd_stop()

        if pre_operation == "rename":
            new_vm_name = params.get("vs_new_vm_name", "virsh_start_vm1")
            vm = libvirt_xml.VMXML.vm_rename(vm, new_vm_name)
            vm_ref = new_vm_name
        elif pre_operation == "undefine":
            vmxml = vmxml.new_from_dumpxml(vm_ref)
            vmxml.undefine()

        # do the start operation
        try:
            if pre_operation == "remote":
                # get remote session
                session = remote.wait_for_login("ssh", remote_ip, "22", "root",
                                                remote_password, "#")
                # get uri of local
                uri = libvirt_vm.complete_uri(local_ip)

                cmd = "virsh -c %s start %s" % (uri, vm_ref)
                status, output = session.cmd_status_output(cmd)
                if status:
                    raise StartError(vm_ref, output)
            else:
                do_virsh_start(vm_ref)

            # start vm successfully
            if status_error == "yes":
                raise error.TestFail("Run successfully with wrong command!")

        except StartError, excpt:
            # start vm failed
            if status_error == "no":
                raise error.TestFail("Run failed with right command: %s",
                                     str(excpt))
    finally:
        # clean up
        if libvirtd_state == "off":
            utils_libvirtd.libvirtd_start()

        elif pre_operation == "rename":
            libvirt_xml.VMXML.vm_rename(vm, backup_name)

        # Restore VM
        vmxml_backup.sync()
Пример #8
0
def run(test, params, env):
    """
    Test command: virsh setvcpus.

    The command can change the number of virtual CPUs in the guest domain.
    1.Prepare test environment,destroy or suspend a VM.
    2.Perform virsh setvcpus operation.
    3.Recover test environment.
    4.Confirm the test result.
    """

    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    pre_vm_state = params.get("setvcpus_pre_vm_state")
    command = params.get("setvcpus_command", "setvcpus")
    options = params.get("setvcpus_options")
    vm_ref = params.get("setvcpus_vm_ref", "name")
    count = params.get("setvcpus_count")
    set_current = int(params.get("setvcpus_current", "0"))
    extra_param = params.get("setvcpus_extra_param")
    count_option = "%s %s" % (count, extra_param)
    status_error = params.get("status_error")
    remote_ip = params.get("remote_ip", "REMOTE.EXAMPLE.COM")
    local_ip = params.get("local_ip", "LOCAL.EXAMPLE.COM")
    remote_pwd = params.get("remote_pwd", "")
    remote_prompt = params.get("remote_prompt", "#")
    tmpxml = os.path.join(test.tmpdir, 'tmp.xml')
    test_set_max = 2

    # Early death
    if vm_ref == "remote" and (remote_ip.count("EXAMPLE.COM") or
                               local_ip.count("EXAMPLE.COM")):
        raise error.TestNAError("remote/local ip parameters not set.")

    # Save original configuration
    orig_config_xml = libvirt_xml.VMXML.new_from_inactive_dumpxml(vm_name)

    # Get the number of cpus, current value if set, and machine type
    orig_set, orig_current, mtype = get_xmldata(vm_name, tmpxml, options)
    logging.debug("orig_set=%d orig_current=%d mtype=%s",
                  orig_set, orig_current, mtype)

    # Normal processing of the test is to set the vcpu count to 2 and then
    # adjust the 'current_vcpu' value to 1 effectively removing a vcpu.
    #
    # This is generally fine when the guest is not running; however, the
    # hotswap functionality hasn't always worked very well and is under
    # going lots of change from using the hmp "cpu_set" command in 1.5
    # to a new qmp "cpu-add" added in 1.6 where the "cpu-set" command
    # seems to have been deprecated making things very messy.
    #
    # To further muddy the waters, the "cpu-add" functionality is supported
    # for specific machine type versions. For the purposes of this test that
    # would be "pc-i440fx-1.5" or "pc-q35-1.5" or later type machines (from
    # guest XML "<os> <type ... machine=''/type> </os>"). Depending on which
    # version of qemu/kvm was used to initially create/generate the XML for
    # the machine this could result in a newer qemu still using 1.4 or earlier
    # for the machine type.
    #
    # If the set_current is set, then we are adding CPU's, thus we must
    # set then 'current_vcpu' value to something lower than our count in
    # order to test that if we start with a current=1 and a count=2 that we
    # can set our current up to our count. If our orig_set count is 1, then
    # don't add a vCPU to a VM that perhaps doesn't want one.  We still need
    # to check if 'virsh setvcpus <domain> 1' would work, so continue on.
    #
    if set_current != 0 and orig_set >= 2:
        if vm.is_alive():
            vm.destroy()
        vm_xml = libvirt_xml.VMXML()
        if set_current >= test_set_max:
            raise error.TestFail("Current(%d) >= test set max(%d)" %
                                 (set_current, test_set_max))
        vm_xml.set_vm_vcpus(vm_name, test_set_max, set_current)
        # Restart, unless that's not our test
        if pre_vm_state != "shut off":
            vm.start()
            vm.wait_for_login()

    if orig_set == 1:
        logging.debug("Original vCPU count is 1, just checking if setvcpus "
                      "can still set current.")

    domid = vm.get_id()  # only valid for running
    domuuid = vm.get_uuid()

    if pre_vm_state == "paused":
        vm.pause()
    elif pre_vm_state == "shut off" and vm.is_alive():
        vm.destroy()

    try:
        if vm_ref == "remote":
            (setvcpu_exit_status, status_error,
             setvcpu_exit_stderr) = remote_test(remote_ip,
                                                local_ip,
                                                remote_pwd,
                                                remote_prompt,
                                                vm_name,
                                                status_error)
        else:
            if vm_ref == "name":
                dom_option = vm_name
            elif vm_ref == "id":
                dom_option = domid
                if params.get("setvcpus_hex_id") is not None:
                    dom_option = hex(int(domid))
                elif params.get("setvcpus_invalid_id") is not None:
                    dom_option = params.get("setvcpus_invalid_id")
            elif vm_ref == "uuid":
                dom_option = domuuid
                if params.get("setvcpus_invalid_uuid") is not None:
                    dom_option = params.get("setvcpus_invalid_uuid")
            else:
                dom_option = vm_ref

            option_list = options.split(" ")
            for item in option_list:
                if virsh.has_command_help_match(command, item) is None:
                    raise error.TestNAError("The current libvirt version"
                                            " doesn't support '%s' option"
                                            % item)
            status = virsh.setvcpus(dom_option, count_option, options,
                                    ignore_status=True, debug=True)
            setvcpu_exit_status = status.exit_status
            setvcpu_exit_stderr = status.stderr.strip()

    finally:
        vcpus_set, vcpus_current, mtype = get_xmldata(vm_name, tmpxml, options)

        # Cleanup
        if pre_vm_state == "paused":
            virsh.resume(vm_name, ignore_status=True)
        orig_config_xml.sync()

    # check status_error
    if status_error == "yes":
        if setvcpu_exit_status == 0:
            raise error.TestFail("Run successfully with wrong command!")
    else:
        if setvcpu_exit_status != 0:
            # setvcpu/hotplug is only available as of qemu 1.5 and it's still
            # evolving. In general the addition of vcpu's may use the QMP
            # "cpu_set" (qemu 1.5) or "cpu-add" (qemu 1.6 and later) commands.
            # The removal of vcpu's may work in qemu 1.5 due to how cpu_set
            # can set vcpus online or offline; however, there doesn't appear
            # to be a complementary cpu-del feature yet, so we can add, but
            # not delete in 1.6.

            # A 1.6 qemu will not allow the cpu-add command to be run on
            # a configuration using <os> machine property 1.4 or earlier.
            # That is the XML <os> element with the <type> property having
            # an attribute 'machine' which is a tuple of 3 elements separated
            # by a dash, such as "pc-i440fx-1.5" or "pc-q35-1.5".
            if re.search("unable to execute QEMU command 'cpu-add'",
                         setvcpu_exit_stderr):
                raise error.TestNAError("guest <os> machine property '%s' "
                                        "may be too old to allow hotplug.",
                                        mtype)

            # A qemu older than 1.5 or an unplug for 1.6 will result in
            # the following failure.  In general, any time libvirt determines
            # it cannot support adding or removing a vCPU...
            if re.search("cannot change vcpu count of this domain",
                         setvcpu_exit_stderr):
                raise error.TestNAError("virsh setvcpu hotplug unsupported, "
                                        " mtype=%s" % mtype)

            # Cannot set current vcpu count large than max vcpu count
            if orig_set == 1 and count > orig_set:
                raise error.TestNAError(setvcpu_exit_stderr)

            # Otherwise, it seems we have a real error
            raise error.TestFail("Run failed with right command mtype=%s stderr=%s" %
                                 (mtype, setvcpu_exit_stderr))
        else:
            if "--maximum" in options:
                if vcpus_set != int(count):
                    raise error.TestFail("failed to set --maximum vcpus "
                                         "to %s mtype=%s" %
                                         (count, mtype))
            else:
                if orig_set >= 2 and set_current != 0:
                    # If we're adding a cpu we go from:
                    #    <vcpu ... current='1'...>2</vcpu>
                    # to
                    #    <vcpu ... >2</vcpu>
                    # where vcpus_current will be 0 and vcpus_set will be 2
                    if vcpus_current != 0 and vcpus_set != test_set_max:
                        raise error.TestFail("Failed to add current=%d, "
                                             "set=%d, count=%d mtype=%s" %
                                             (vcpus_current, vcpus_set,
                                              test_set_max, mtype))
                elif orig_set >= 2 and set_current == 0:
                    # If we're removing a cpu we go from:
                    #    <vcpu ... >2</vcpu>
                    # to
                    #    <vcpu ... current='1'...>2</vcpu>
                    # where vcpus_current will be 1 and vcpus_set will be 2
                    if vcpus_current != 1 and vcpus_set != test_set_max:
                        raise error.TestFail("Failed to remove current=%d, "
                                             "set=%d, count=%d mtype=%s" %
                                             (vcpus_current, vcpus_set,
                                              test_set_max, mtype))
                # If we have a starting place of 1 vCPUs, then this is rather
                # boring and innocuous case, but libvirt will succeed, so just
                # handle it
                elif orig_set == 1 and vcpus_current != 0 and vcpus_set != 1:
                    raise error.TestFail("Failed when orig_set is 1 current=%d, "
                                         "set=%d, count=%d mtype=%s" %
                                         (vcpus_current, vcpus_set,
                                          test_set_max, mtype))
Пример #9
0
def run(test, params, env):
    """
    Test snapshot-create-as command
    Make sure that the clean repo can be used because qemu-guest-agent need to
    be installed in guest

    The command create a snapshot (disk and RAM) from arguments which including
    the following point
    * virsh snapshot-create-as --print-xml --diskspec --name --description
    * virsh snapshot-create-as --print-xml with multi --diskspec
    * virsh snapshot-create-as --print-xml --memspec
    * virsh snapshot-create-as --description
    * virsh snapshot-create-as --no-metadata
    * virsh snapshot-create-as --no-metadata --print-xml (negative test)
    * virsh snapshot-create-as --atomic --disk-only
    * virsh snapshot-create-as --quiesce --disk-only (positive and negative)
    * virsh snapshot-create-as --reuse-external
    * virsh snapshot-create-as --disk-only --diskspec
    * virsh snapshot-create-as --memspec --reuse-external --atomic(negative)
    * virsh snapshot-create-as --disk-only and --memspec (negative)
    * Create multi snapshots with snapshot-create-as
    * Create snapshot with name a--a a--a--snap1
    """

    if not virsh.has_help_command('snapshot-create-as'):
        raise error.TestNAError("This version of libvirt does not support "
                                "the snapshot-create-as test")

    vm_name = params.get("main_vm")
    status_error = params.get("status_error", "no")
    options = params.get("snap_createas_opts")
    multi_num = params.get("multi_num", "1")
    diskspec_num = params.get("diskspec_num", "1")
    bad_disk = params.get("bad_disk")
    external_disk = params.get("external_disk")
    start_ga = params.get("start_ga", "yes")
    domain_state = params.get("domain_state")
    memspec_opts = params.get("memspec_opts")
    diskspec_opts = params.get("diskspec_opts")
    create_autodestroy = 'yes' == params.get("create_autodestroy", "no")

    uri = params.get("virsh_uri")
    usr = params.get('unprivileged_user')
    if usr:
        if usr.count('EXAMPLE'):
            usr = '******'

    if not libvirt_version.version_compare(1, 1, 1):
        if params.get('setup_libvirt_polkit') == 'yes':
            raise error.TestNAError("API acl test not supported in current" +
                                    " libvirt version.")

    opt_names = locals()
    if memspec_opts is not None:
        mem_options = compose_disk_options(test, params, memspec_opts)
        # if the parameters have the disk without "file=" then we only need to
        # add testdir for it.
        if mem_options is None:
            mem_options = os.path.join(test.virtdir, memspec_opts)
        options += " --memspec " + mem_options

    tag_diskspec = 0
    dnum = int(diskspec_num)
    if diskspec_opts is not None:
        tag_diskspec = 1
        opt_names['diskopts_1'] = diskspec_opts

    # diskspec_opts[n] is used in cfg when more than 1 --diskspec is used
    if dnum > 1:
        tag_diskspec = 1
        for i in range(1, dnum + 1):
            opt_names["diskopts_%s" % i] = params.get("diskspec_opts%s" % i)

    if tag_diskspec == 1:
        for i in range(1, dnum + 1):
            disk_options = compose_disk_options(test, params,
                                                opt_names["diskopts_%s" % i])
            options += " --diskspec " + disk_options

    logging.debug("options are %s", options)

    vm = env.get_vm(vm_name)
    option_dict = {}
    option_dict = utils_misc.valued_option_dict(options, r' --(?!-)')
    logging.debug("option_dict is %s", option_dict)

    # A backup of original vm
    vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    logging.debug("original xml is %s", vmxml_backup)

    # Generate empty image for negative test
    if bad_disk is not None:
        bad_disk = os.path.join(test.virtdir, bad_disk)
        os.open(bad_disk, os.O_RDWR | os.O_CREAT)

    # Generate external disk
    if external_disk is not None:
        external_disk = os.path.join(test.virtdir, external_disk)
        commands.getoutput("qemu-img create -f qcow2 %s 1G" % external_disk)

    try:
        # Start qemu-ga on guest if have --quiesce
        if options.find("quiesce") >= 0:
            if vm.is_alive():
                vm.destroy()
            virt_xml_obj = libvirt_xml.VMXML(virsh_instance=virsh)
            virt_xml_obj.set_agent_channel(vm_name)
            vm.start()
            session = vm.wait_for_login()

            # Check if qemu-ga already started automatically
            cmd = "rpm -q qemu-guest-agent || yum install -y qemu-guest-agent"
            stat_install = session.cmd_status(cmd, 300)
            if stat_install != 0:
                raise error.TestNAError("Fail to install qemu-guest-agent, "
                                        "make sure that you have usable repo "
                                        "in guest")

            # Check if qemu-ga already started
            stat_ps = session.cmd_status("ps aux |grep [q]emu-ga")
            if stat_ps != 0:
                if start_ga == "yes":
                    session.cmd("qemu-ga -d")
                    # Check if the qemu-ga really started
                    stat_ps = session.cmd_status("ps aux |grep [q]emu-ga")
                    if stat_ps != 0:
                        raise error.TestNAError("Fail to run qemu-ga in guest")
            else:
                if start_ga == "no":
                    # The qemu-ga could be running and should be killed
                    session.cmd("kill -9 `pidof qemu-ga`")
                    # Check if the qemu-ga get killed
                    stat_ps = session.cmd_status("ps aux |grep [q]emu-ga")
                    if not stat_ps:
                        # As managed by systemd and set as autostart, qemu-ga
                        # could be restarted, so use systemctl to stop it.
                        session.cmd("systemctl stop qemu-guest-agent")
                        stat_ps = session.cmd_status("ps aux |grep [q]emu-ga")
                        if not stat_ps:
                            raise error.TestNAError("Fail to stop agent in "
                                                    "guest")

            if domain_state == "paused":
                virsh.suspend(vm_name)

        # Record the previous snapshot-list
        snaps_before = virsh.snapshot_list(vm_name)

        # Run virsh command
        # May create several snapshots, according to configuration
        for count in range(int(multi_num)):
            if create_autodestroy:
                # Run virsh command in interactive mode
                vmxml_backup.undefine()
                vp = virsh.VirshPersistent()
                vp.create(vmxml_backup['xml'], '--autodestroy')
                cmd_result = vp.snapshot_create_as(vm_name,
                                                   options,
                                                   ignore_status=True,
                                                   debug=True)
                vp.close_session()
                vmxml_backup.define()
            else:
                cmd_result = virsh.snapshot_create_as(vm_name,
                                                      options,
                                                      unprivileged_user=usr,
                                                      uri=uri,
                                                      ignore_status=True,
                                                      debug=True)
            output = cmd_result.stdout.strip()
            status = cmd_result.exit_status

            # check status_error
            if status_error == "yes":
                if status == 0:
                    raise error.TestFail(
                        "Run successfully with wrong command!")
                else:
                    # Check memspec file should be removed if failed
                    if (options.find("memspec") >= 0
                            and options.find("atomic") >= 0):
                        if os.path.isfile(option_dict['memspec']):
                            os.remove(option_dict['memspec'])
                            raise error.TestFail(
                                "Run failed but file %s exist" %
                                option_dict['memspec'])
                        else:
                            logging.info(
                                "Run failed as expected and memspec file"
                                " already beed removed")
                    else:
                        logging.info("Run failed as expected")

            elif status_error == "no":
                if status != 0:
                    raise error.TestFail("Run failed with right command: %s" %
                                         output)
                else:
                    # Check the special options
                    snaps_list = virsh.snapshot_list(vm_name)
                    logging.debug("snaps_list is %s", snaps_list)

                    check_snapslist(vm_name, options, option_dict, output,
                                    snaps_before, snaps_list)

    finally:
        # Environment clean
        if options.find("quiesce") >= 0 and start_ga == "yes":
            session.cmd("rpm -e qemu-guest-agent")

        # recover domain xml
        xml_recover(vmxml_backup)
        path = "/var/lib/libvirt/qemu/snapshot/" + vm_name
        if os.path.isfile(path):
            raise error.TestFail("Still can find snapshot metadata")

        # rm bad disks
        if bad_disk is not None:
            os.remove(bad_disk)
Пример #10
0
def run(test, params, env):
    """
    Test snapshot-create-as command
    Make sure that the clean repo can be used because qemu-guest-agent need to
    be installed in guest

    The command create a snapshot (disk and RAM) from arguments which including
    the following point
    * virsh snapshot-create-as --print-xml --diskspec --name --description
    * virsh snapshot-create-as --print-xml with multi --diskspec
    * virsh snapshot-create-as --print-xml --memspec
    * virsh snapshot-create-as --description
    * virsh snapshot-create-as --no-metadata
    * virsh snapshot-create-as --no-metadata --print-xml (negative test)
    * virsh snapshot-create-as --atomic --disk-only
    * virsh snapshot-create-as --quiesce --disk-only (positive and negative)
    * virsh snapshot-create-as --reuse-external
    * virsh snapshot-create-as --disk-only --diskspec
    * virsh snapshot-create-as --memspec --reuse-external --atomic(negative)
    * virsh snapshot-create-as --disk-only and --memspec (negative)
    * Create multi snapshots with snapshot-create-as
    * Create snapshot with name a--a a--a--snap1
    """

    if not virsh.has_help_command('snapshot-create-as'):
        raise error.TestNAError("This version of libvirt does not support "
                                "the snapshot-create-as test")

    vm_name = params.get("main_vm")
    status_error = params.get("status_error", "no")
    options = params.get("snap_createas_opts")
    multi_num = params.get("multi_num", "1")
    diskspec_num = params.get("diskspec_num", "1")
    bad_disk = params.get("bad_disk")
    reuse_external = "yes" == params.get("reuse_external", "no")
    start_ga = params.get("start_ga", "yes")
    domain_state = params.get("domain_state")
    memspec_opts = params.get("memspec_opts")
    config_format = "yes" == params.get("config_format", "no")
    snapshot_image_format = params.get("snapshot_image_format")
    diskspec_opts = params.get("diskspec_opts")
    create_autodestroy = 'yes' == params.get("create_autodestroy", "no")
    unix_channel = "yes" == params.get("unix_channel", "yes")
    dac_denial = "yes" == params.get("dac_denial", "no")
    check_json_no_savevm = "yes" == params.get("check_json_no_savevm", "no")

    uri = params.get("virsh_uri")
    usr = params.get('unprivileged_user')
    if usr:
        if usr.count('EXAMPLE'):
            usr = '******'

    if not libvirt_version.version_compare(1, 1, 1):
        if params.get('setup_libvirt_polkit') == 'yes':
            raise error.TestNAError("API acl test not supported in current"
                                    + " libvirt version.")

    opt_names = locals()
    if memspec_opts is not None:
        mem_options = compose_disk_options(test, params, memspec_opts)
        # if the parameters have the disk without "file=" then we only need to
        # add testdir for it.
        if mem_options is None:
            mem_options = os.path.join(test.tmpdir, memspec_opts)
        options += " --memspec " + mem_options

    tag_diskspec = 0
    dnum = int(diskspec_num)
    if diskspec_opts is not None:
        tag_diskspec = 1
        opt_names['diskopts_1'] = diskspec_opts

    # diskspec_opts[n] is used in cfg when more than 1 --diskspec is used
    if dnum > 1:
        tag_diskspec = 1
        for i in range(1, dnum + 1):
            opt_names["diskopts_%s" % i] = params.get("diskspec_opts%s" % i)

    if tag_diskspec == 1:
        for i in range(1, dnum + 1):
            disk_options = compose_disk_options(test, params,
                                                opt_names["diskopts_%s" % i])
            options += " --diskspec " + disk_options

    logging.debug("options are %s", options)

    vm = env.get_vm(vm_name)
    option_dict = {}
    option_dict = utils_misc.valued_option_dict(options, r' --(?!-)')
    logging.debug("option_dict is %s", option_dict)

    # A backup of original vm
    vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    logging.debug("original xml is %s", vmxml_backup)

    # Generate empty image for negative test
    if bad_disk is not None:
        bad_disk = os.path.join(test.tmpdir, bad_disk)
        os.open(bad_disk, os.O_RDWR | os.O_CREAT)

    # Generate external disk
    if reuse_external:
        disk_path = ''
        for i in range(dnum):
            external_disk = "external_disk%s" % i
            if params.get(external_disk):
                disk_path = os.path.join(test.tmpdir,
                                         params.get(external_disk))
                utils.run("qemu-img create -f qcow2 %s 1G" % disk_path)
        # Only chmod of the last external disk for negative case
        if dac_denial:
            utils.run("chmod 500 %s" % disk_path)

    qemu_conf = None
    libvirtd_conf = None
    libvirtd_log_path = None
    libvirtd = utils_libvirtd.Libvirtd()
    try:
        # Config "snapshot_image_format" option in qemu.conf
        if config_format:
            qemu_conf = utils_config.LibvirtQemuConfig()
            qemu_conf.snapshot_image_format = snapshot_image_format
            logging.debug("the qemu config file content is:\n %s" % qemu_conf)
            libvirtd.restart()

        if check_json_no_savevm:
            libvirtd_conf = utils_config.LibvirtdConfig()
            libvirtd_conf["log_level"] = '1'
            libvirtd_conf["log_filters"] = '"1:json 3:remote 4:event"'
            libvirtd_log_path = os.path.join(test.tmpdir, "libvirtd.log")
            libvirtd_conf["log_outputs"] = '"1:file:%s"' % libvirtd_log_path
            logging.debug("the libvirtd config file content is:\n %s" %
                          libvirtd_conf)
            libvirtd.restart()

        # Start qemu-ga on guest if have --quiesce
        if unix_channel and options.find("quiesce") >= 0:
            if vm.is_alive():
                vm.destroy()
            virt_xml_obj = libvirt_xml.VMXML(virsh_instance=virsh)
            virt_xml_obj.set_agent_channel(vm_name)
            vm.start()
            session = vm.wait_for_login()

            # Check if qemu-ga already started automatically
            cmd = "rpm -q qemu-guest-agent || yum install -y qemu-guest-agent"
            stat_install = session.cmd_status(cmd, 300)
            if stat_install != 0:
                raise error.TestNAError("Fail to install qemu-guest-agent, "
                                        "make sure that you have usable repo "
                                        "in guest")

            # Check if qemu-ga already started
            stat_ps = session.cmd_status("ps aux |grep [q]emu-ga")
            if stat_ps != 0:
                if start_ga == "yes":
                    session.cmd("qemu-ga -d")
                    # Check if the qemu-ga really started
                    stat_ps = session.cmd_status("ps aux |grep [q]emu-ga")
                    if stat_ps != 0:
                        raise error.TestNAError("Fail to run qemu-ga in guest")
            else:
                if start_ga == "no":
                    # The qemu-ga could be running and should be killed
                    session.cmd("kill -9 `pidof qemu-ga`")
                    # Check if the qemu-ga get killed
                    stat_ps = session.cmd_status("ps aux |grep [q]emu-ga")
                    if not stat_ps:
                        # As managed by systemd and set as autostart, qemu-ga
                        # could be restarted, so use systemctl to stop it.
                        session.cmd("systemctl stop qemu-guest-agent")
                        stat_ps = session.cmd_status("ps aux |grep [q]emu-ga")
                        if not stat_ps:
                            raise error.TestNAError("Fail to stop agent in "
                                                    "guest")

            if domain_state == "paused":
                virsh.suspend(vm_name)

        # Record the previous snapshot-list
        snaps_before = virsh.snapshot_list(vm_name)

        # Attach disk before create snapshot if not print xml and multi disks
        # specified in cfg
        if dnum > 1 and "--print-xml" not in options:
            for i in range(1, dnum):
                disk_path = os.path.join(test.tmpdir, 'disk%s.qcow2' % i)
                utils.run("qemu-img create -f qcow2 %s 200M" % disk_path)
                virsh.attach_disk(vm_name, disk_path,
                                  'vd%s' % list(string.lowercase)[i],
                                  debug=True)

        # Run virsh command
        # May create several snapshots, according to configuration
        for count in range(int(multi_num)):
            if create_autodestroy:
                # Run virsh command in interactive mode
                vmxml_backup.undefine()
                vp = virsh.VirshPersistent()
                vp.create(vmxml_backup['xml'], '--autodestroy')
                cmd_result = vp.snapshot_create_as(vm_name, options,
                                                   ignore_status=True,
                                                   debug=True)
                vp.close_session()
                vmxml_backup.define()
            else:
                cmd_result = virsh.snapshot_create_as(vm_name, options,
                                                      unprivileged_user=usr,
                                                      uri=uri,
                                                      ignore_status=True,
                                                      debug=True)
            output = cmd_result.stdout.strip()
            status = cmd_result.exit_status

            # check status_error
            if status_error == "yes":
                if status == 0:
                    raise error.TestFail("Run successfully with wrong command!")
                else:
                    # Check memspec file should be removed if failed
                    if (options.find("memspec") >= 0
                            and options.find("atomic") >= 0):
                        if os.path.isfile(option_dict['memspec']):
                            os.remove(option_dict['memspec'])
                            raise error.TestFail("Run failed but file %s exist"
                                                 % option_dict['memspec'])
                        else:
                            logging.info("Run failed as expected and memspec"
                                         " file already been removed")
                    # Check domain xml is not updated if reuse external fail
                    elif reuse_external and dac_denial:
                        output = virsh.dumpxml(vm_name).stdout.strip()
                        if "reuse_external" in output:
                            raise error.TestFail("Domain xml should not be "
                                                 "updated with snapshot image")
                    else:
                        logging.info("Run failed as expected")

            elif status_error == "no":
                if status != 0:
                    raise error.TestFail("Run failed with right command: %s"
                                         % output)
                else:
                    # Check the special options
                    snaps_list = virsh.snapshot_list(vm_name)
                    logging.debug("snaps_list is %s", snaps_list)

                    check_snapslist(vm_name, options, option_dict, output,
                                    snaps_before, snaps_list)

                    # For cover bug 872292
                    if check_json_no_savevm:
                        pattern = "The command savevm has not been found"
                        with open(libvirtd_log_path) as f:
                            for line in f:
                                if pattern in line and "error" in line:
                                    raise error.TestFail("'%s' was found: %s"
                                                         % (pattern, line))

    finally:
        # recover domain xml
        xml_recover(vmxml_backup)
        path = "/var/lib/libvirt/qemu/snapshot/" + vm_name
        if os.path.isfile(path):
            raise error.TestFail("Still can find snapshot metadata")

        # rm bad disks
        if bad_disk is not None:
            os.remove(bad_disk)
        # rm attach disks and reuse external disks
        if dnum > 1 and "--print-xml" not in options:
            for i in range(dnum):
                disk_path = os.path.join(test.tmpdir, 'disk%s.qcow2' % i)
                if os.path.exists(disk_path):
                    os.unlink(disk_path)
                external_disk = "external_disk%s" % i
                disk_path = os.path.join(test.tmpdir, params.get(external_disk))
                if os.path.exists(disk_path):
                    os.unlink(disk_path)

        # restore config
        if config_format and qemu_conf:
            qemu_conf.restore()

        if libvirtd_conf:
            libvirtd_conf.restore()

        if libvirtd_conf or (config_format and qemu_conf):
            libvirtd.restart()

        if libvirtd_log_path and os.path.exists(libvirtd_log_path):
            os.unlink(libvirtd_log_path)
Пример #11
0
def reset_domain(vm,
                 vm_state,
                 needs_agent=False,
                 guest_cpu_busy=False,
                 password=None):
    """
    Setup guest agent in domain.

    :param vm: the vm object
    :param vm_state: the given vm state string "shut off" or "running"
    """
    if vm.is_alive():
        vm.destroy()
    vm_xml = libvirt_xml.VMXML()
    vm_xml.new_from_dumpxml(vm.name)
    if needs_agent:
        logging.debug("Attempting to set guest agent channel")
        vm_xml.set_agent_channel(vm.name)
    if not vm_state == "shut off":
        vm.start()
        session = vm.wait_for_login()
        if needs_agent:
            # Check if qemu-ga already started automatically
            session = vm.wait_for_login()
            cmd = "rpm -q qemu-guest-agent || yum install -y qemu-guest-agent"
            stat_install = session.cmd_status(cmd, 300)
            if stat_install != 0:
                raise error.TestFail("Fail to install qemu-guest-agent, make "
                                     "sure that you have usable repo in guest")

            # Check if qemu-ga already started
            stat_ps = session.cmd_status("ps aux |grep [q]emu-ga")
            if stat_ps != 0:
                session.cmd("qemu-ga -d")
                # Check if the qemu-ga really started
                stat_ps = session.cmd_status("ps aux |grep [q]emu-ga")
                if stat_ps != 0:
                    raise error.TestFail("Fail to run qemu-ga in guest")
        if guest_cpu_busy:
            shell_file = "/tmp/test.sh"
            cpu_detail_list = [
                'while true', 'do', '    j==\${j:+1}', '    j==\${j:-1}',
                'done'
            ]
            remote_file = remote.RemoteFile(vm.get_address(), 'scp', 'root',
                                            password, 22, shell_file)
            remote_file.truncate()
            remote_file.add(cpu_detail_list)
            session.cmd('chmod 777 %s' % shell_file)
            session.cmd('%s &' % shell_file)
    if vm_state == "paused":
        vm.pause()
    elif vm_state == "halt":
        try:
            session.cmd("halt")
        except (aexpect.ShellProcessTerminatedError, aexpect.ShellStatusError):
            # The halt command always gets these errors, but execution is OK,
            # skip these errors
            pass
    elif vm_state == "pm_suspend":
        # Execute "pm-suspend-hybrid" command directly will get Timeout error,
        # so here execute it in background, and wait for 3s manually
        if session.cmd_status("which pm-suspend-hybrid"):
            raise error.TestNAError("Cannot execute this test for domain"
                                    " doesn't have pm-suspend-hybrid command!")
        session.cmd("pm-suspend-hybrid &")
        time.sleep(3)