コード例 #1
0
ファイル: libvirt_hooks.py プロジェクト: lento-sun/tp-libvirt
 def attach_hook():
     """
     Check attach hooks.
     """
     # Start a domain with qemu command.
     disk_src = vm.get_first_disk_devices()['source']
     vm_test = "foo"
     prepare_hook_file(hook_script %
                       (vm_test, hook_log))
     qemu_bin = params.get("qemu_bin", "/usr/libexec/qemu-kvm")
     if "ppc" in platform.machine():
         qemu_bin = "%s -machine pseries" % qemu_bin
     qemu_cmd = ("%s -drive file=%s,if=none,bus=0,unit=1"
                 " -monitor unix:/tmp/demo,"
                 "server,nowait -name %s" %
                 (qemu_bin, disk_src, vm_test))
     ret = utils.run("%s &" % qemu_cmd)
     pid = utils.run("ps -ef | grep '%s' | grep -v grep | awk"
                     " '{print $2}'" % qemu_cmd).stdout.strip()
     if not pid:
         raise error.TestFail("Cannot get pid of qemu command")
     ret = virsh.qemu_attach(pid, **virsh_dargs)
     if ret.exit_status:
         utils_misc.kill_process_tree(pid)
         raise error.TestFail("Cannot attach qemu process")
     else:
         virsh.destroy(vm_test)
     hook_str = hook_file + " " + vm_test + " attach begin -"
     if not check_hooks(hook_str):
         raise error.TestFail("Failed to check"
                              " attach hooks")
コード例 #2
0
ファイル: virsh_edit.py プロジェクト: FT4VT/FT4VM-L1_test
    def edit_vcpu(source, guest_name):
        """
        Modify vm's cpu information by virsh edit command.

        :param source : virsh edit's option.
        :param guest_name : vm's name.
        :return: True if edit successed,False if edit failed.
        """
        dic_mode = {
            "edit": ":%s /[0-9]*<\/vcpu>/" + expected_vcpu + "<\/vcpu>",
                    "recover": ":%s /[0-9]*<\/vcpu>/" + original_vcpu + "<\/vcpu>"}
        status = modify_vcpu(source, dic_mode["edit"])
        if not status:
            return status
        if params.get("paused_after_start_vm") == "yes":
            virsh.resume(guest_name, ignore_status=True)
            virsh.destroy(guest_name)
        elif params.get("start_vm") == "yes":
            virsh.destroy(guest_name)
        vcpus = vm.dominfo()["CPU(s)"]
        # Recover cpuinfo
        status = modify_vcpu(source, dic_mode["recover"])
        if status and vcpus != expected_vcpu:
            return False
        return status
コード例 #3
0
ファイル: virsh_edit.py プロジェクト: Acidburn0zzz/tp-libvirt
    def edit_vcpu(source, guest_name):
        """
        Modify vm's cpu information by virsh edit command.

        :param source : virsh edit's option.
        :param guest_name : vm's name.
        :return: True if edit successed,False if edit failed.
        """
        dic_mode = {
            "edit": r":%s /[0-9]*<\/vcpu>/" + expected_vcpu + r"<\/vcpu>",
            "recover": r":%s /[0-9]*<\/vcpu>/" + original_vcpu + r"<\/vcpu>"}
        status = modify_vcpu(source, dic_mode["edit"])
        if not status:
            return status
        if params.get("paused_after_start_vm") == "yes":
            virsh.resume(guest_name, ignore_status=True)
            virsh.destroy(guest_name)
        elif params.get("start_vm") == "yes":
            virsh.destroy(guest_name)
        vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
        vcpus = str(vmxml.vcpu)
        # Recover cpuinfo
        # Use name rather than source, since source could be domid
        status = modify_vcpu(guest_name, dic_mode["recover"])
        if status and vcpus != expected_vcpu:
            return False
        return status
コード例 #4
0
 def recover(self, params=None):
     """
     Recover test environment
     """
     cpu_enable = True if self.cpu_status else False
     utils_misc.set_cpu_status(self.cpu_num, cpu_enable)
     tmp_c_file = params.get("tmp_c_file", "/tmp/test.c")
     tmp_exe_file = params.get("tmp_exe_file", "/tmp/test")
     if os.path.exists(tmp_c_file):
         os.remove(tmp_c_file)
     if os.path.exists(tmp_exe_file):
         os.remove(tmp_exe_file)
     if 'memory_pid' in params:
         pid = int(params.get('memory_pid'))
         utils_misc.safe_kill(pid, signal.SIGKILL)
         process.run("swapon -a", shell=True)
     if 'cpu_pid' in params:
         pid = int(params.get('cpu_pid'))
         utils_misc.safe_kill(pid, signal.SIGKILL)
         tmp_sh_file = params.get("tmp_sh_file")
         if os.path.exists(tmp_sh_file):
             os.remove(tmp_sh_file)
     virsh.destroy(self.vm_name)
     if len(self.snp_list) < len(self.current_snp_list):
         self.diff_snp_list = list(set(self.current_snp_list) -
                                   set(self.snp_list))
         for item in self.diff_snp_list:
             virsh.snapshot_delete(self.vm_name, item)
     remove_machine_cgroup()
コード例 #5
0
ファイル: virsh_vcpupin.py プロジェクト: Antique/virt-test
 def run_and_check_vcpupin(vm_name, vcpu, cpu_list, options, pid):
     """
     Run the vcpupin command and then check the result.
     """
     # Execute virsh vcpupin command.
     cmdResult = virsh.vcpupin(vm_name, vcpu, cpu_list, options)
     if cmdResult.exit_status:
         if not status_error:
             # Command fail and it is in positive case.
             raise error.TestFail(cmdResult)
         else:
             # Command fail and it is in negative case.
             return
     else:
         if status_error:
             # Command success and it is in negative case.
             raise error.TestFail(cmdResult)
         else:
             # Command success and it is in positive case.
             # "--config" will take effect after VM destroyed.
             if options == "--config":
                 virsh.destroy(vm_name)
                 pid = None
             # Check the result of vcpupin command.
             check_vcpupin(vm_name, vcpu, cpu_list, pid)
コード例 #6
0
ファイル: virsh_vcpupin.py プロジェクト: Hao-Liu/tp-libvirt
 def run_and_check_vcpupin(vm, vm_ref, vcpu, cpu_list, options):
     """
     Run the vcpupin command and then check the result.
     """
     if vm_ref == "name":
         vm_ref = vm.name
     elif vm_ref == "uuid":
         vm_ref = vm.get_uuid()
     # Execute virsh vcpupin command.
     cmdResult = virsh.vcpupin(vm_ref, vcpu, cpu_list, options, debug=True)
     if cmdResult.exit_status:
         if not status_error:
             # Command fail and it is in positive case.
             raise error.TestFail(cmdResult)
         else:
             # Command fail and it is in negative case.
             return
     else:
         if status_error:
             # Command success and it is in negative case.
             raise error.TestFail(cmdResult)
         else:
             # Command success and it is in positive case.
             # "--config" will take effect after VM destroyed.
             pid = None
             vcpu_pid = None
             if options == "--config":
                 virsh.destroy(vm.name)
             else:
                 pid = vm.get_pid()
                 logging.debug("vcpus_pid: %s", vm.get_vcpus_pid())
                 vcpu_pid = vm.get_vcpus_pid()[vcpu]
             # Check the result of vcpupin command.
             check_vcpupin(vm.name, vcpu, cpu_list, pid, vcpu_pid)
コード例 #7
0
ファイル: specific_kvm.py プロジェクト: waynesun09/tp-libvirt
 def make_unclean_fs():
     """
     Use force off to make unclean file system of win8
     """
     if virsh.start(vm_name, ignore_status=True).exit_status:
         raise exceptions.TestError('Start vm failed')
     time.sleep(10)
     virsh.destroy(vm_name, debug=True)
コード例 #8
0
    def add_cdrom_device(vm_name, init_cdrom):
        """
        Add cdrom device for test vm
        """
        if vm.is_alive():
            virsh.destroy(vm_name)

        virsh.attach_disk(vm_name, init_cdrom,
                          " hdc", " --type cdrom --sourcetype file --config",
                          debug=True)
コード例 #9
0
ファイル: virsh_edit.py プロジェクト: bssrikanth/tp-libvirt
    def edit_vcpu(source):
        """
        Modify vm's cpu information by virsh edit command.

        :param source : virsh edit's option.
        :return: True if edit successed,False if edit failed.
        """
        vcpucount_result = virsh.vcpucount(vm_name,
                                           options="--config --maximum")
        if vcpucount_result.exit_status:
            # Fail back to libvirt_xml way to test vcpucount.
            original_vcpu = str(vmxml.vcpu)
        else:
            original_vcpu = vcpucount_result.stdout.strip()

        expected_vcpu = str(int(original_vcpu) + 1)
        if not status_error == "yes":
            # check if topology is defined and change vcpu accordingly
            try:
                vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(source)
                topology = vmxml_backup.get_cpu_topology()
                sockets = str(int(topology['sockets']) + 1)
                cores = topology['cores']
                threads = topology['threads']
                vmcpu_xml = vm_xml.VMCPUXML()
                vmcpu_xml['topology'] = {'sockets': sockets, 'cores': cores,
                                         'threads': threads}
                vmxml_backup['cpu'] = vmcpu_xml
                vmxml_backup.sync()
                expected_vcpu = str(int(sockets) * int(cores) * int(threads))
            except:
                expected_vcpu = str(int(original_vcpu) + 1)
        dic_mode = {
            "edit": r":%s /[0-9]*<\/vcpu>/" + expected_vcpu + r"<\/vcpu>",
            "recover": r":%s /[0-9]*<\/vcpu>/" + original_vcpu + r"<\/vcpu>"}
        status = libvirt.exec_virsh_edit(source, [dic_mode["edit"]])
        logging.info(status)
        if not status:
            vmxml.sync()
            return status
        if libvirtd_stat == "off":
            return False
        if params.get("paused_after_start_vm") == "yes":
            virsh.resume(vm_name, ignore_status=True)
            virsh.destroy(vm_name)
        elif params.get("start_vm") == "yes":
            virsh.destroy(vm_name)
        new_vcpus = str(vm_xml.VMXML.new_from_inactive_dumpxml(vm_name).vcpu)
        # Recover cpuinfo
        # Use name rather than source, since source could be domid
        status = libvirt.exec_virsh_edit(vm_name, [dic_mode["recover"]])
        vmxml.sync()
        if status and new_vcpus != expected_vcpu:
            return False
        return status
コード例 #10
0
def reset_env(vm_name, xml_file):
    """
    Reset env

    :param vm_name: the vm name
    :xml_file: domain xml file
    """
    virsh.destroy(vm_name)
    virsh.undefine(vm_name)
    virsh.define(xml_file)
    if os.path.exists(xml_file):
        os.remove(xml_file)
コード例 #11
0
 def clean_clone_vm():
     """
     Clean up cloned domain.
     """
     try:
         if virsh.domain_exists(vm_clone_name):
             if virsh.is_alive(vm_clone_name):
                 virsh.destroy(vm_clone_name, ignore_status=False)
             virsh.undefine(vm_clone_name, ignore_status=False)
         if os.path.exists(clone_image):
             os.remove(clone_image)
     except error.CmdError, detail:
         raise error.TestFail("Clean clone guest failed!:%s" % detail)
コード例 #12
0
    def add_cdrom_device(vm_name, init_cdrom):
        """
        Add cdrom device for test vm

        @param: vm_name: guest name
        @param: init_cdrom: source file
        """
        if vm.is_alive():
            virsh.destroy(vm_name)

        virsh.attach_disk(vm_name, init_cdrom,
                          disk_device, " --type cdrom --sourcetype file --config",
                          debug=True)
コード例 #13
0
    def add_device(vm_name, init_source="''"):
        """
        Add device for test vm

        :param vm_name: guest name
        :param init_source: source file
        """
        if vm.is_alive():
            virsh.destroy(vm_name)

        virsh.attach_disk(vm_name, init_source,
                          target_device,
                          "--type %s --sourcetype file --config" % device_type,
                          debug=True)
コード例 #14
0
ファイル: sriov.py プロジェクト: balamuruhans/tp-libvirt
    def attach_interface():
        """
            Attach interface:

            1.Attach interface from xml;
            2.Check the vf driver after attach interface;
            3.Check the live xml after attach interface;
        """
        if managed == "no":
            result = virsh.nodedev_detach(nodedev_pci_addr)
            utils_test.libvirt.check_exit_status(result, expect_error=False)
        logging.debug("attach interface xml:\n %s", new_iface)
        result = virsh.attach_device(vm_name, file_opt=new_iface.xml, flagstr=option, debug=True)
        utils_test.libvirt.check_exit_status(result, expect_error=False)
        if option == "--config":
            result = virsh.start(vm_name)
            utils_test.libvirt.check_exit_status(result, expect_error=False)
        # For option == "--persistent", after VM destroyed and then start, the device should still be there.
        if option == "--persistent":
            virsh.destroy(vm_name)
            result = virsh.start(vm_name, debug=True)
            utils_test.libvirt.check_exit_status(result, expect_error=False)
        live_xml = vm_xml.VMXML.new_from_dumpxml(vm_name)
        logging.debug(live_xml)
        get_ip_by_mac(mac_addr, timeout=60)
        device = live_xml.devices
        if vf_type == "vf" or vf_type == "vf_pool":
            for interface in device.by_device_tag("interface"):
                if interface.type_name == "hostdev":
                    if interface.driver.driver_attr['name'] != 'vfio':
                        test.fail("The driver of the hostdev interface is not vfio\n")
                    break
            vf_addr_attrs = interface.hostdev_address.attrs
            pci_addr = addr_to_pci(vf_addr_attrs)
            nic_driver = os.readlink(os.path.join(pci_device_dir, vf_addr, "driver")).split('/')[-1]
            if nic_driver != "vfio-pci":
                test.fail("The driver of the hostdev interface is not vfio\n")
        elif vf_type == "macvtap" or vf_type == "macvtap_network":
            for interface in device.by_device_tag("interface"):
                if interface.type_name == "direct":
                    if vf_type == "macvtap":
                        if interface.source["dev"] == new_iface.source["dev"]:
                            match = "yes"
                            vf_name = interface.source["dev"]
                    elif interface.source['dev'] in vf_name_list:
                        match = "yes"
                        vf_name = interface.source["dev"]
                if match != "yes":
                    test.fail("The dev name or mode of macvtap interface is wrong after attach\n")
        return interface
コード例 #15
0
def run(test, params, env):
    """
    Verify various kernel panic methods

    1.Prepare test environment.
    2.Execute any needed setup commands
    3.Execute kernel panic command
    4.Verify panic was detected
    5.restore environment
    """

    vm_name = params.get("main_vm", "vm1")
    vm = env.get_vm(vm_name)
    if not vm.is_alive():
        vm.start()

    panic_cmd = params.get("panic_cmd", None)
    status = None
    output = None

    # Setup environment
    session = vm.wait_for_login()
    # Subsequent logins should timeout quickly
    vm.LOGIN_WAIT_TIMEOUT = 10

    # run test case
    try:
        logging.info("Sending panic_cmd command: %s" % panic_cmd)
        status, output = session.cmd_status_output(panic_cmd, timeout=5,
                                                   internal_timeout=5)
    except aexpect.ShellTimeoutError:
        pass  # This is expected
    except Exception:
        # This is unexpected
        raise

    try:
        vm.verify_kernel_crash()
        status = 1  # bad
    except virt_vm.VMDeadKernelCrashError:
        status = 0  # good

    # Restore environment to stable state
    session.close()
    vm.serial_console.close()
    virsh.destroy(vm_name)

    # check status_error
    if status:
        test.fail("Panic command failed to cause panic")
コード例 #16
0
 def recover(self, params=None):
     """
     Recover test environment
     """
     cpu_enable = True if self.cpu_status else False
     utils_misc.set_cpu_status(self.cpu_num, cpu_enable)
     virsh.destroy(self.vm_name)
     if 'cpu_pid' in params:
         pid = int(params.get('cpu_pid'))
         utils_misc.safe_kill(pid, signal.SIGKILL)
         tmp_sh_file = params.get("tmp_sh_file")
         if os.path.exists(tmp_sh_file):
             os.remove(tmp_sh_file)
     if os.path.exists(self.dump_file):
         os.remove(self.dump_file)
     if os.path.exists(self.dump_file1):
         os.remove(self.dump_file1)
     remove_machine_cgroup()
コード例 #17
0
ファイル: ci.py プロジェクト: cheneydc/virt-test-ci
 def remove(self, name):
     dom = name
     if dom['state'] != 'shut off':
         res = virsh.destroy(dom['name'])
         if res.exit_status:
             raise Exception(str(res))
     if dom['persistent'] == 'yes':
         # Make sure the domain is remove anyway
         res = virsh.undefine(
             dom['name'], options='--snapshots-metadata --managed-save')
         if res.exit_status:
             raise Exception(str(res))
コード例 #18
0
ファイル: virsh_edit.py プロジェクト: Antique/tp-libvirt
    def edit_vcpu(source):
        """
        Modify vm's cpu information by virsh edit command.

        :param source : virsh edit's option.
        :return: True if edit successed,False if edit failed.
        """
        vcpucount_result = virsh.vcpucount(vm_name,
                                           options="--config --maximum")
        if vcpucount_result.exit_status:
            # Fail back to libvirt_xml way to test vcpucount.
            original_vcpu = str(vmxml.vcpu)
        else:
            original_vcpu = vcpucount_result.stdout.strip()

        expected_vcpu = str(int(original_vcpu) + 1)
        dic_mode = {
            "edit": r":%s /[0-9]*<\/vcpu>/" + expected_vcpu + r"<\/vcpu>",
            "recover": r":%s /[0-9]*<\/vcpu>/" + original_vcpu + r"<\/vcpu>"}
        status = exec_edit(source, [dic_mode["edit"]])
        logging.info(status)
        if not status:
            return status
        if libvirtd_stat == "off":
            return False
        if params.get("paused_after_start_vm") == "yes":
            virsh.resume(vm_name, ignore_status=True)
            virsh.destroy(vm_name)
        elif params.get("start_vm") == "yes":
            virsh.destroy(vm_name)
        new_vcpus = str(vm_xml.VMXML.new_from_inactive_dumpxml(vm_name).vcpu)
        # Recover cpuinfo
        # Use name rather than source, since source could be domid
        status = exec_edit(vm_name, [dic_mode["recover"]])
        if status and new_vcpus != expected_vcpu:
            return False
        return status
コード例 #19
0
ファイル: virsh_edit.py プロジェクト: nasastry/tp-libvirt
    def edit_memory(source):
        """
        Modify vm's maximum and current memory(unit and value).

        :param source: virsh edit's option.
        :return: True if edit successed,False if edit failed.
        """
        mem_unit = params.get("mem_unit", "K")
        mem_value = params.get("mem_value", "1048576")
        mem_delta = params.get("mem_delta", 1000)
        edit_cmd = []
        del_cmd = r":g/currentMemory/d"
        edit_cmd.append(del_cmd)
        update_cmd = r":%s/<memory unit='KiB'>[0-9]*<\/memory>/<memory unit='"
        update_cmd += mem_unit + "'>" + mem_value + r"<\/memory>"
        edit_cmd.append(update_cmd)
        try:
            expected_mem = int(utils_misc.normalize_data_size(
                mem_value + mem_unit, 'K').split('.')[0])
        except ValueError:
            logging.error("Fail to translate %s to KiB", mem_value + mem_unit)
            return False
        logging.debug("Expected max memory is %s", expected_mem)
        status = libvirt.exec_virsh_edit(source, edit_cmd)
        try:
            if status:
                # Restart vm to check memory value
                virsh.destroy(vm_name)
                virsh.start(vm_name)
                new_mem = vm.get_max_mem()
                if new_mem - expected_mem > int(mem_delta):
                    logging.error("New max memory %s is not excepted", new_mem)
                    return False
        except Exception as e:
            logging.error("Error occured when check domain memory: %s", e)
            return False
        return status
コード例 #20
0
def run_pm_test(params, libvirtd, vm):
    """
    Destroy VM after executed a series of operations about S3 and save restore
    """

    vm_name = vm.name
    vm_xml = VMXML.new_from_inactive_dumpxml(vm_name)
    vm_xml_backup = vm_xml.copy()
    save_path = os.path.join(data_dir.get_tmp_dir(), 'tmp.save')
    try:
        pm_xml = VMPMXML()
        pm_xml.mem_enabled = 'yes'
        vm_xml.pm = pm_xml
        vm_xml.sync()
        vm.prepare_guest_agent()
        virsh.dompmsuspend(vm.name, 'mem')
        virsh.dompmwakeup(vm.name)
        virsh.save(vm.name, save_path)
        virsh.restore(save_path)
        virsh.dompmsuspend(vm.name, 'mem')
        virsh.save(vm.name, save_path)
        virsh.destroy(vm.name)
    finally:
        vm_xml_backup.sync()
コード例 #21
0
ファイル: ci.py プロジェクト: cheneydc/virt-test-ci
    def prepare_env(self):
        """
        Prepare the environment before all tests.
        """
        print 'Running bootstrap'
        self.bootstrap()

        print 'Removing VM',  # TODO: use virt-test api remove VM
        sys.stdout.flush()
        status, res, err_msg = self.run_test(
            'remove_guest.without_disk', need_check=False)
        if not 'PASS' in status:
            virsh.undefine('virt-tests-vm1', '--snapshots-metadata')
            print '   WARNING: Failed to remove guest'

        print 'Installing VM',
        sys.stdout.flush()
        status, res, err_msg = self.run_test(
            'unattended_install.import.import.default_install.aio_native',
            restore_image=True, need_check=False)
        if not 'PASS' in status:
            raise Exception('   ERROR: Failed to install guest \n %s' %
                            res.stderr)
        virsh.destroy('virt-tests-vm1')
コード例 #22
0
ファイル: virsh_edit.py プロジェクト: bonzini/virt-test
    def edit_vcpu(source, guest_name):
        """
        Modify vm's cpu infomation by virsh edit command.

        @param: source : virsh edit's option.
        @param: guest_name : vm's name.
        @return: True if edit successed,False if edit failed.
        """
        dic_mode = {"edit" : ":%s /1<\/vcpu>/2<\/vcpu>",
                    "recover" : ":%s /2<\/vcpu>/1<\/vcpu>"}
        status = modify_vcpu(source, dic_mode["edit"])
        if not status :
            return status
        if params.get("paused_after_start_vm") == "yes":
            virsh.resume(guest_name)
            virsh.destroy(guest_name)
        elif params.get("start_vm") == "yes":
            virsh.destroy(guest_name)
        vcpus = vm.dominfo()["CPU(s)"]
        #Recover cpuinfo
        status = modify_vcpu(source, dic_mode["recover"])
        if status  and vcpus != '2':
            return False
        return status
コード例 #23
0
def run(test, params, env):
    """
    Test for basic controller device function.

    1) Define the VM with specified controller device and check result meets
       expectation.
    2) Start the guest and check if start result meets expectation
    3) Test the function of started controller device
    4) Shutdown the VM and clean up environment
    """
    def remove_all_addresses(vm_xml):
        """
        Remove all addresses for all devices who has one.
        """
        try:
            for elem in vm_xml.xmltreefile.findall('/devices/*/address'):
                vm_xml.xmltreefile.remove(elem)
        except (AttributeError, TypeError):
            pass  # Element already doesn't exist
        vm_xml.xmltreefile.write()

    def remove_usb_devices(vm_xml):
        """
        Remove all USB devices.
        """
        try:
            for xml in vm_xml.xmltreefile.findall('/devices/*'):
                if xml.get('bus') == 'usb':
                    vm_xml.xmltreefile.remove(xml)
        except (AttributeError, TypeError):
            pass  # Element already doesn't exist
        vm_xml.xmltreefile.write()

    def prepare_local_image(image_filename):
        """
        Prepare a local image.

        :param image_filename: The name to the local image.
        :return: The path to the image file.
        """
        image_format = 'qcow2'
        image_size = '10M'
        image_path = os.path.join(data_dir.get_tmp_dir(), image_filename)
        try:
            image_path = libvirt.create_local_disk("file",
                                                   image_path,
                                                   image_size,
                                                   disk_format=image_format)
        except Exception as err:
            test.error("Error happens when prepare local image: %s", err)
        disks_img.append(image_path)
        return image_path

    def prepare_usb_controller(vmxml, index, addr):
        """
        Add usb controllers into vm's xml.

        :param vmxml: The vm's xml.
        """
        # Add disk usb controller(s)
        usb_controller = Controller("controller")
        usb_controller.type = "usb"
        usb_controller.index = str(index)
        usb_controller.model = 'qemu-xhci'
        addr_dict = {
            "domain": '0x0000',
            'funtion': '0x0',
            'bus': addr['bus'],
            'slot': addr['slot']
        }
        usb_controller.address = usb_controller.new_controller_address(
            **{"attrs": addr_dict})
        vmxml.add_device(usb_controller)
        # Redefine domain
        vmxml.sync()

    def prepare_virt_disk_xml(virt_disk_device_target,
                              virt_disk_device_bus,
                              usb_bus=None,
                              virt_disk_bus=None,
                              virt_disk_slot=None):
        """
        Prepare the virt disk xml to be attached/detached.

        :param virt_disk_device_target: The target to the local image.
        :param virt_disk_bus: The bus to the local image.
        :return: The virtual disk xml.
        """
        image_filename = ''.join(
            random.choice(string.ascii_lowercase) for _ in range(8)) + ".qcow2"
        virt_disk_device = 'disk'
        virt_disk_device_type = 'file'
        virt_disk_device_format = 'qcow2'
        disk_xml = Disk(type_name=virt_disk_device_type)
        disk_xml.device = virt_disk_device
        disk_src_dict = {
            'attrs': {
                'file': prepare_local_image(image_filename),
                'type_name': 'file'
            }
        }
        disk_xml.source = disk_xml.new_disk_source(**disk_src_dict)
        driver_dict = {"name": "qemu", "type": virt_disk_device_format}
        disk_xml.driver = driver_dict
        disk_xml.target = {
            "dev": virt_disk_device_target,
            "bus": virt_disk_device_bus
        }
        if virt_disk_device_bus == 'usb':
            disk_addr_dict = {'bus': str(usb_bus), 'port': '1'}
            disk_xml.new_disk_address(type_name='usb',
                                      **{"attrs": disk_addr_dict})
        elif virt_disk_device_bus == 'virtio':
            disk_addr_dict = {
                'bus': virt_disk_bus,
                'slot': virt_disk_slot,
                'domain': '0x0000',
                'function': '0x0'
            }
            disk_xml.address = disk_xml.new_disk_address(
                type_name='pci', **{"attrs": disk_addr_dict})
        return disk_xml

    def prepare_iface_xml(iface_bus, iface_slot):
        """
        Create interface xml file
        """
        iface_xml = Interface(type_name='bridge')
        iface_xml.source = {'bridge': 'virbr0'}
        iface_xml.model = "virtio"
        addr_dict = {
            'bus': iface_bus,
            'slot': iface_slot,
            'domain': '0x0000',
            'function': '0x0'
        }
        iface_xml.address = iface_xml.new_iface_address(type_name='pci',
                                                        **{"attrs": addr_dict})
        return iface_xml

    if 'ppc' not in platform.machine():
        test.cancel('Only support PPC')

    # Additional disk images.
    disks_img = []
    devices_xml = []

    prepare_cntlr = "yes" == params.get('prepare_controller', "no")
    cntlr_type = params.get('controller_type')
    cntlr_model = params.get('controller_model', '')
    with_index = 'yes' == params.get('controller_index', 'yes')
    cntlr_index = params.get('controller_index')
    cntlr_node = params.get('controller_node')
    target_index = params.get('target_index')
    cntlr_num = int(params.get('controller_num', '0'))
    cntlr_cur = int(params.get('controller_current', '0'))
    special_num = params.get('special_num')
    addr_str = params.get('address')
    if addr_str:
        addr_str = eval(addr_str)
    device_num = int(params.get('device_num', '0'))
    device_list = params.get('device_list', '')
    if device_list:
        device_list = eval(device_list)
    vm_name = params.get("main_vm", "avocado-vt-vm1")
    qemu_cmd_check = "yes" == params.get("qemu_cmd_check", "no")
    status_error = "yes" == params.get("status_error", "no")
    numa = "yes" == params.get("numa", "no")
    with_define = 'yes' == params.get("with_define", "no")
    coldplug = "yes" == params.get("coldplug", "no")
    hotplug = "yes" == params.get("hotplug", "no")
    hotunplug = "yes" == params.get("hotunplug", "no")

    def check_index_in_xml(xml):
        """
        Check the used target in guest's xml
        :param xml:  The guest's xml
        :return:  A dict of result
        """
        result = {'sd': 'a', 'vd': 'a', 'index': 1}
        disk_list = xml.xmltreefile.findall("devices/disk/target")
        for disk_target in disk_list:
            dev = disk_target.attrib['dev']
            if dev[-1] >= result[dev[0:-1]]:
                result[dev[0:-1]] = chr(ord(dev[-1]) + 1)
        controller_list = xml.xmltreefile.findall("devices/controller")
        for controller in controller_list:
            if int(controller.get('index')) >= result['index']:
                result['index'] = int(controller.get('index')) + 1
        return result

    def enumerate_index(index_dict, index_key):
        index = index_dict[index_key]
        result = index_key + index if index_key in ['sd', 'vd'] else str(index)
        if index_key in ['sd', 'vd'] and index == 'z':
            index = 'aa'
        elif index_key in ['sd', 'vd']:
            if len(index) > 1:
                index = index[0] + chr(ord(index[-1]) + 1)
            else:
                index = chr(ord(index) + 1)
        else:
            index += 1
        index_dict[index_key] = index
        return result

    def match_new_addr(address):
        """
        Match any device address.
        """
        logging.info("The address is:%s" % address)
        match = re.match(
            r"(?P<bus>[0-9a-f]*):(?P<slot>[0-9a-f]*).(?P<function>[0-9a-f])",
            address)
        if match:
            addr_dict = match.groupdict()
            addr_dict['bus'] = hex(int(addr_dict['bus'], 16))
            addr_dict['slot'] = hex(int(addr_dict['slot'], 16))
            addr_dict['function'] = hex(int(addr_dict['function'], 16))
            addr_dict['domain'] = '0x0000'
            return addr_dict
        return None

    def add_device(type="usb", index="0", model="qemu-xhci"):
        """
        Add new device.
        """
        newcontroller = Controller("controller")
        newcontroller.type = type
        newcontroller.index = index
        newcontroller.model = model
        logging.debug("New controller is:%s", newcontroller)
        return newcontroller

    def setup_controller_xml():
        """
        Prepare controller devices of VM XML according to params.
        """

        if cntlr_type is None:
            type = 'pci'
        else:
            type = cntlr_type
        curcntlr = cntlr_cur
        while curcntlr < cntlr_num:
            ctrl = Controller(type_name=type)
            if cntlr_node:
                ctrl.node = cntlr_node
            if cntlr_model:
                ctrl.model = cntlr_model
                if cntlr_model == 'pci-bridge':
                    ctrl.model_name = {'name': 'pci-bridge'}
            if cntlr_index is not None:
                ctrl.index = cntlr_index
            elif with_index:
                if cntlr_model == 'pci-bridge':
                    for i in range(
                            1,
                            int(match_new_addr(addr_str[curcntlr])['bus'], 16)
                            + 1):
                        vm_xml.add_device(add_device('pci', str(i),
                                                     'pci-root'))
                    ctrl.index = str(
                        int(match_new_addr(addr_str[curcntlr])['bus'], 16) + 1)
                else:
                    ctrl.index = str(curcntlr)
            if target_index is not None:
                ctrl.target = {'index': target_index}
            elif with_index:
                if cntlr_model == 'pci-bridge':
                    ctrl.target = {
                        'chassisNr':
                        str(
                            int(match_new_addr(addr_str[curcntlr])['bus'], 16)
                            + 1)
                    }
                else:
                    ctrl.target = {'index': str(curcntlr)}
            if addr_str is not None:
                for address in addr_str:
                    ctrl.address = ctrl.new_controller_address(
                        attrs=match_new_addr(address))

            logging.debug("Controller XML is:%s", ctrl)
            vm_xml.add_device(ctrl)
            curcntlr += 1
        if special_num:
            spe_num = int(special_num)
            ctrl = Controller(type_name=type)

            if cntlr_model:
                ctrl.model = cntlr_model
            ctrl.index = spe_num
            ctrl.target = {'index': spe_num}
            if addr_str is not None and cntlr_model != 'pci-root':
                for address in addr_str:
                    ctrl.address = ctrl.new_controller_address(
                        attrs=match_new_addr(address))

            logging.debug("Controller XML is:%s", ctrl)
            vm_xml.add_device(ctrl)

    def define_and_check():
        """
        Predict the error message when defining and try to define the guest
        with testing XML.
        """
        fail_patts = []
        known_models = {
            'pci': ['pci-root', 'pci-bridge'],
            'virtio-serial': [],
            'usb': ['qemu-xhci'],
            'scsi': ['virtio-scsi'],
        }
        if status_error:
            if cntlr_type == 'pci' and cntlr_model:
                fail_patts.append(r"Invalid PCI controller model")
            if cntlr_type and cntlr_model not in known_models[cntlr_type]:
                fail_patts.append(r"Unknown model type")
            if cntlr_model == 'pcie-root':
                fail_patts.append(r"Device requires a standard PCI slot")
            if addr_str and '02:00.0' in addr_str:
                fail_patts.append(r"slot must be >= 1")
            elif addr_str and '02:32.0' in addr_str:
                fail_patts.append(r"must be <= 0x1F")
            if cntlr_num > 31 and cntlr_type == 'pci':
                fail_patts.append(r"out of range - must be 0-30")
            if cntlr_index and target_index:
                if (int(cntlr_index) != 0) ^ (int(target_index) != 0):
                    fail_patts.append(
                        r"Only the PCI controller with index 0 can have target index 0"
                    )

            # isdigit will return false on negative number, which just meet the
            # requirement of this test.
            if cntlr_index is not None and not cntlr_index.isdigit():
                fail_patts.append(r"Cannot parse controller index")

        vm_xml.undefine()
        res = vm_xml.virsh.define(vm_xml.xml)
        libvirt.check_result(res, expected_fails=fail_patts)
        return not res.exit_status

    def plug_the_devices(attach_options, dev_index):
        for index, dev in enumerate(device_list):
            if addr_str:
                new_addr = match_new_addr(addr_str[index])
            if dev == 'disk':
                disk_xml = prepare_virt_disk_xml(
                    enumerate_index(dev_index, 'vd'),
                    'virtio',
                    virt_disk_bus=new_addr['bus'],
                    virt_disk_slot=new_addr['slot'])
                logging.debug("The disk xml is: %s" % disk_xml)
                result = virsh.attach_device(vm_name,
                                             disk_xml.xml,
                                             flagstr=attach_options,
                                             ignore_status=True,
                                             debug=True)
                libvirt.check_exit_status(result, status_error)
                devices_xml.append(disk_xml)
            elif dev == 'usb':
                disk_xml = prepare_virt_disk_xml(
                    enumerate_index(dev_index, 'sd'),
                    'usb',
                    usb_bus=enumerate_index(dev_index, 'index'))
                logging.debug("The disk xml is: %s" % disk_xml)
                result = virsh.attach_device(vm_name,
                                             disk_xml.xml,
                                             flagstr=attach_options,
                                             ignore_status=True,
                                             debug=True)
                libvirt.check_exit_status(result, status_error)
                devices_xml.append(disk_xml)
            elif dev == 'interface':
                iface_xml = prepare_iface_xml(iface_bus=new_addr['bus'],
                                              iface_slot=new_addr['slot'])
                logging.debug("The nic xml is: %s" % iface_xml)
                result = virsh.attach_device(vm_name,
                                             iface_xml.xml,
                                             flagstr=attach_options,
                                             ignore_status=True,
                                             debug=True)
                libvirt.check_exit_status(result, status_error)
                devices_xml.append(iface_xml)

    def start_and_check():
        """
        Predict the error message when starting and try to start the guest.
        """
        fail_patts = []
        res = virsh.start(vm_name)
        libvirt.check_result(res, expected_fails=fail_patts)
        vm.wait_for_login().close()
        return not res.exit_status

    def check_qemu_cmdline():
        """
        Check domain qemu command line against expectation.
        """
        cmdline = open('/proc/%s/cmdline' % vm.get_pid()).read()
        logging.debug('Qemu command line: %s', cmdline)
        cmdline_list = cmdline.split('\x00')
        # TODO
        checknum = 1
        if cntlr_num > checknum:
            checknum = cntlr_num
        if special_num and int(special_num) > checknum:
            checknum = int(special_num)
        if addr_str:
            for address in addr_str:
                bus = int(match_new_addr(address)['bus'], 16)
                if bus > checknum:
                    checknum = bus
        if device_num:
            if (device_num + 6) / 31 > checknum:
                checknum = int((device_num + 6) / 31) + 1
        if checknum == 1 and cntlr_num != -1:
            test.fail('Multiple controller is not be used')
        else:
            for i in range(1, checknum):
                restr = r'spapr-pci-host-bridge,index=%s' % i
                if restr not in cmdline:
                    test.fail('The number of %s pci root is not created' % i)

    def check_in_guest_pci_with_addr(check_flag=True):
        def generate_match_line(index):
            match_dict = {
                'disk': 'SCSI storage controller',
                'memballoon': 'Unclassified device',
                'interface': 'Ethernet controller',
                'usb': 'USB controller',
                'pci-bridge': 'PCI bridge',
                'serial': 'Communication controller'
            }
            new_addr = match_new_addr(addr_str[index])
            match_line = '00(0[1-9a-f]|1[0-9a-f]):00:%s.0 ' % new_addr[
                'slot'].split('x')[1].zfill(2)
            if device_list[index] in match_dict.keys():
                match_line += match_dict[device_list[index]]
            else:
                test.fail('Unknown device(%s) in case config' %
                          device_list[index])
            return match_line

        session = vm.wait_for_login()
        cmd = 'lspci'
        try:
            guest_out = str(session.cmd_output_safe(cmd))
            logging.debug(guest_out)
            for i in range(len(addr_str)):
                match_line = generate_match_line(i)
                times = 0
                while True:
                    if not re.search(match_line, guest_out) and check_flag:
                        if times < 5:
                            time.sleep(5)
                            times += 1
                            guest_out = str(session.cmd_output_safe(cmd))
                            logging.debug(guest_out)
                        else:
                            test.fail('Could not find pci device in guest')
                    elif re.search(
                            match_line, guest_out
                    ) and not check_flag and device_list[i] != 'usb':
                        if times < 5:
                            time.sleep(5)
                            times += 1
                            guest_out = str(session.cmd_output_safe(cmd))
                            logging.debug(guest_out)
                        else:
                            test.fail('Could find pci device after detach')
                    else:
                        break
        except Exception as e:
            session.close()
            test.fail(e)
        if cntlr_node and check_flag:
            cmd = "lspci -vv -s %s | grep 'NUMA node:' | grep -o [0-9]*"
            for addr in addr_str:
                guest_out = str(session.cmd_output_safe(cmd % addr)).strip()
                if str(guest_out) != cntlr_node:
                    test.fail('No plug on the right node')
        session.close()

    def check_in_guest_pci_with_num():
        if cntlr_type == 'scsi':
            check_line = 'SCSI storage controller'
        else:
            return

        session = vm.wait_for_login()
        cmd = "lspci | grep '%s' | wc -l" % check_line
        try:
            guest_out = str(session.cmd_output_safe(cmd))
            logging.debug(guest_out)
            if int(guest_out) != int(cntlr_num) + int(default_pci):
                test.fail('The number of controller is not right')
        except Exception as e:
            test.fail(e)
        finally:
            session.close()

    vm = env.get_vm(vm_name)
    vm_xml = VMXML.new_from_inactive_dumpxml(vm_name)
    vm_xml_backup = vm_xml.copy()
    try:
        vm_xml.remove_all_device_by_type('controller')
        remove_usb_devices(vm_xml)
        if int(cntlr_num) > 10:
            virsh.start(vm_name)
            session = vm.wait_for_login()
            cmd = "lspci | grep 'SCSI storage controller' | wc -l"
            try:
                default_pci = str(session.cmd_output_safe(cmd))
            except Exception as e:
                test.fail(e)
            finally:
                session.close()
                virsh.destroy(vm_name)
        spe_device = False
        if numa:
            vmcpuxml = vm_xml.xmltreefile.find('/cpu')
            vmcpuxml.numa_cell = vmcpuxml.dicts_to_cells([{
                'id': '0',
                'cpus': '0',
                'memory': '1048576'
            }, {
                'id': '1',
                'cpus': '1',
                'memory': '1048576'
            }])
            vm_xml.xmltreefile.write()
        if with_define:
            if addr_str:
                for i in range(len(addr_str)):
                    if device_list[i] in ['disk', 'memballoon', 'interface']:
                        spe_device = True
                        spe_ele = vm_xml.xmltreefile.find(
                            '/devices/%s/address' % device_list[i])
                        new_addr = match_new_addr(addr_str[i])
                        spe_ele.attrib['slot'] = str(new_addr['slot'])
                        spe_ele.attrib['bus'] = str(new_addr['bus'])
                        vm_xml.xmltreefile.write()
            if not spe_device:
                remove_all_addresses(vm_xml)

            if cntlr_num or special_num:
                setup_controller_xml()

            if device_num:
                newdev_list = []
                for i in range(1, device_num + 1):
                    newdev_list.append(add_device(index=str(i)))

                dev_list = vm_xml.get_devices()
                dev_list.extend(newdev_list)
                vm_xml.set_devices(dev_list)

        if prepare_cntlr:
            setup_controller_xml()

        if hotplug or coldplug:
            if 'usb' in device_list:
                vm_xml = VMXML.new_from_inactive_dumpxml(vm_name)
                for index, dev in enumerate(device_list):
                    if dev == 'usb':
                        new_addr = match_new_addr(addr_str[index])
                        dev_index = check_index_in_xml(vm_xml)
                        prepare_usb_controller(vm_xml, dev_index['index'],
                                               new_addr)
                        vm_xml = VMXML.new_from_inactive_dumpxml(vm_name)

        logging.debug("Test VM XML is %s" % vm_xml)

        if not define_and_check() and status_error:
            logging.debug("Expected define the VM fail, exiting.")
        else:
            incxml = virsh.dumpxml(vm_name).stdout
            logging.debug("The inactive xml:%s" % incxml)
            if coldplug:
                attach_options = "--config"
                dev_index = check_index_in_xml(vm_xml)
                plug_the_devices(attach_options, dev_index)
                incxml = virsh.dumpxml(vm_name)
                logging.debug("The xml after cold plug:%s" % incxml)
            try:

                if not start_and_check() and status_error:
                    logging.debug("Expected start the VM fail, exiting.")
                else:
                    if hotplug:
                        attach_options = "--live"
                        dev_index = check_index_in_xml(vm_xml)
                        plug_the_devices(attach_options, dev_index)
                        incxml = virsh.dumpxml(vm_name)
                        logging.debug("The xml after hot plug:%s" % incxml)
                    if qemu_cmd_check:
                        check_qemu_cmdline()
                    if addr_str:
                        check_in_guest_pci_with_addr()
                    if int(cntlr_num) > 10:
                        check_in_guest_pci_with_num()
            except virt_vm.VMStartError as detail:
                test.error(detail)

        if hotunplug:
            logging.debug("Try to hot unplug")
            detach_options = "--live"
            for xml in devices_xml:
                result = virsh.detach_device(vm_name,
                                             xml.xml,
                                             flagstr=detach_options,
                                             ignore_status=True,
                                             debug=True)
                libvirt.check_exit_status(result, status_error)
            if addr_str:
                check_in_guest_pci_with_addr(False)

    finally:
        vm_xml_backup.sync()

        for img in disks_img:
            os.remove(img)
コード例 #24
0
def run_virsh_snapshot_disk(test, params, env):
    """
    Test virsh snapshot command when disk in all kinds of type.

    (1). Init the variables from params.
    (2). Create a image by specifice format.
    (3). Attach disk to vm.
    (4). Snapshot create.
    (5). Snapshot revert.
    (6). cleanup.
    """
    # Init variables.
    vm_name = params.get("main_vm", "virt-tests-vm1")
    vm = env.get_vm(vm_name)
    image_format = params.get("snapshot_image_format", "qcow2")
    status_error = ("yes" == params.get("status_error", "no"))
    snapshot_from_xml = ("yes" == params.get("snapshot_from_xml", "no"))
    snapshot_current = ("yes" == params.get("snapshot_current", "no"))
    snapshot_revert_paused = ("yes" == params.get("snapshot_revert_paused",
                                                  "no"))

    # Some variable for xmlfile of snapshot.
    snapshot_memory = params.get("snapshot_memory", "internal")
    snapshot_disk = params.get("snapshot_disk", "internal")

    # Get a tmp_dir.
    tmp_dir = data_dir.get_tmp_dir()
    # Create a image.
    params['image_name'] = "snapshot_test"
    params['image_format'] = image_format
    params['image_size'] = "1M"
    image = qemu_storage.QemuImg(params, tmp_dir, "snapshot_test")
    img_path, _ = image.create(params)
    # Do the attach action.
    result = virsh.attach_disk(vm_name,
                               source=img_path,
                               target="vdf",
                               extra="--persistent --subdriver %s" %
                               image_format)
    if result.exit_status:
        raise error.TestNAError("Failed to attach disk %s to VM."
                                "Detail: %s." % (img_path, result.stderr))

    # Init snapshot_name
    snapshot_name = None
    snapshot_external_disk = []
    try:
        # Create snapshot.
        if snapshot_from_xml:
            snapshot_name = "snapshot_test"
            lines = [
                "<domainsnapshot>\n",
                "<name>%s</name>\n" % snapshot_name,
                "<description>Snapshot Test</description>\n"
            ]
            if snapshot_memory == "external":
                memory_external = os.path.join(tmp_dir, "snapshot_memory")
                snapshot_external_disk.append(memory_external)
                lines.append("<memory snapshot=\'%s\' file='%s'/>\n" %
                             (snapshot_memory, memory_external))
            else:
                lines.append("<memory snapshot='%s'/>\n" % snapshot_memory)

            # Add all disks into xml file.
            disks = vm.get_disk_devices().values()
            lines.append("<disks>\n")
            for disk in disks:
                lines.append("<disk name='%s' snapshot='%s'>\n" %
                             (disk['source'], snapshot_disk))
                if snapshot_disk == "external":
                    disk_external = os.path.join(
                        tmp_dir, "%s.snap" % os.path.basename(disk['source']))
                    snapshot_external_disk.append(disk_external)
                    lines.append("<source file='%s'/>\n" % disk_external)
                lines.append("</disk>\n")
            lines.append("</disks>\n")
            lines.append("</domainsnapshot>")

            snapshot_xml_path = "%s/snapshot_xml" % tmp_dir
            snapshot_xml_file = open(snapshot_xml_path, "w")
            snapshot_xml_file.writelines(lines)
            snapshot_xml_file.close()
            snapshot_result = virsh.snapshot_create(
                vm_name, ("--xmlfile %s" % snapshot_xml_path))
            if snapshot_result.exit_status:
                if status_error:
                    return
                else:
                    raise error.TestFail(
                        "Failed to create snapshot. Error:%s." %
                        snapshot_result.stderr.strip())
        else:
            options = ""
            snapshot_result = virsh.snapshot_create(vm_name, options)
            if snapshot_result.exit_status:
                if status_error:
                    return
                else:
                    raise error.TestFail(
                        "Failed to create snapshot. Error:%s." %
                        snapshot_result.stderr.strip())
            snapshot_name = re.search("\d+",
                                      snapshot_result.stdout.strip()).group(0)
            if snapshot_current:
                lines = [
                    "<domainsnapshot>\n",
                    "<description>Snapshot Test</description>\n",
                    "<state>running</state>\n",
                    "<creationTime>%s</creationTime>" % snapshot_name,
                    "</domainsnapshot>"
                ]
                snapshot_xml_path = "%s/snapshot_xml" % tmp_dir
                snapshot_xml_file = open(snapshot_xml_path, "w")
                snapshot_xml_file.writelines(lines)
                snapshot_xml_file.close()
                options += "--redefine %s --current" % snapshot_xml_path
                if snapshot_result.exit_status:
                    raise error.TestFail("Failed to create snapshot --current."
                                         "Error:%s." %
                                         snapshot_result.stderr.strip())

        if status_error:
            raise error.TestFail(
                "Success to create snapshot in negative case\n"
                "Detail: %s" % snapshot_result)

        # Touch a file in VM.
        if vm.is_dead():
            vm.start()
        session = vm.wait_for_login()

        # Init a unique name for tmp_file.
        tmp_file = tempfile.NamedTemporaryFile(prefix=("snapshot_test_"),
                                               dir="/tmp")
        tmp_file_path = tmp_file.name
        tmp_file.close()

        status, output = session.cmd_status_output("touch %s" % tmp_file_path)
        if status:
            raise error.TestFail("Touch file in vm failed. %s" % output)

        session.close()

        # Destroy vm for snapshot revert.
        virsh.destroy(vm_name)
        # Revert snapshot.
        revert_options = ""
        if snapshot_revert_paused:
            revert_options += " --paused"
        revert_result = virsh.snapshot_revert(vm_name, snapshot_name,
                                              revert_options)
        if revert_result.exit_status:
            raise error.TestFail("Revert snapshot failed. %s" %
                                 revert_result.stderr.strip())

        if vm.is_dead():
            raise error.TestFail("Revert snapshot failed.")

        if snapshot_revert_paused:
            if vm.is_paused():
                vm.resume()
            else:
                raise error.TestFail(
                    "Revert command successed, but VM is not "
                    "paused after reverting with --paused option.")
        # login vm.
        session = vm.wait_for_login()
        # Check the result of revert.
        status, output = session.cmd_status_output("cat %s" % tmp_file_path)
        if not status:
            raise error.TestFail("Tmp file exists, revert failed.")

        # Close the session.
        session.close()

    finally:
        virsh.detach_disk(vm_name, target="vdf", extra="--persistent")
        image.remove()
        if snapshot_name:
            virsh.snapshot_delete(vm_name, snapshot_name, "--metadata")
        for disk in snapshot_external_disk:
            if os.path.exists(disk):
                os.remove(disk)
コード例 #25
0
ファイル: virsh_domstate.py プロジェクト: yuliugit/tp-libvirt
def run(test, params, env):
    """
    Test command: virsh domstate.

    1.Prepare test environment.
    2.When the libvirtd == "off", stop the libvirtd service.
    3.Perform virsh domstate operation.
    4.Recover test environment.
    5.Confirm the test result.
    """
    vm_name = params.get("main_vm", "virt-tests-vm1")
    vm = env.get_vm(vm_name)

    libvirtd = params.get("libvirtd", "on")
    vm_ref = params.get("domstate_vm_ref")
    status_error = (params.get("status_error", "no") == "yes")
    extra = params.get("domstate_extra", "")
    vm_action = params.get("domstate_vm_action", "")

    domid = vm.get_id()
    domuuid = vm.get_uuid()
    libvirtd_service = utils_libvirtd.Libvirtd()

    if vm_ref == "id":
        vm_ref = domid
    elif vm_ref == "hex_id":
        vm_ref = hex(int(domid))
    elif vm_ref.find("invalid") != -1:
        vm_ref = params.get(vm_ref)
    elif vm_ref == "name":
        vm_ref = vm_name
    elif vm_ref == "uuid":
        vm_ref = domuuid

    try:
        if vm_action == "suspend":
            virsh.suspend(vm_name, ignore_status=False)
        elif vm_action == "resume":
            virsh.suspend(vm_name, ignore_status=False)
            virsh.resume(vm_name, ignore_status=False)
        elif vm_action == "destroy":
            virsh.destroy(vm_name, ignore_status=False)
        elif vm_action == "start":
            virsh.destroy(vm_name, ignore_status=False)
            virsh.start(vm_name, ignore_status=False)
        elif vm_action == "kill":
            libvirtd_service.stop()
            kill_process_by_pattern(vm_name)
            libvirtd_service.restart()
    except error.CmdError:
        raise error.TestError("Guest prepare action error!")

    if libvirtd == "off":
        libvirtd_service.stop()

    if vm_ref == "remote":
        remote_ip = params.get("remote_ip", "REMOTE.EXAMPLE.COM")
        local_ip = params.get("local_ip", "LOCAL.EXAMPLE.COM")
        remote_pwd = params.get("remote_pwd", None)
        if remote_ip.count("EXAMPLE.COM") or local_ip.count("EXAMPLE.COM"):
            raise error.TestNAError("Test 'remote' parameters not setup")
        status = 0
        try:
            remote_uri = libvirt_vm.complete_uri(local_ip)
            session = remote.remote_login("ssh", remote_ip, "22", "root",
                                          remote_pwd, "#")
            session.cmd_output('LANG=C')
            command = "virsh -c %s domstate %s" % (remote_uri, vm_name)
            status, output = session.cmd_status_output(command,
                                                       internal_timeout=5)
            session.close()
        except error.CmdError:
            status = 1
    else:
        result = virsh.domstate(vm_ref, extra, ignore_status=True)
        status = result.exit_status
        output = result.stdout.strip()

    # recover libvirtd service start
    if libvirtd == "off":
        libvirtd_service.start()

    # check status_error
    if status_error:
        if not status:
            raise error.TestFail("Run successfully with wrong command!")
    else:
        if status or not output:
            raise error.TestFail("Run failed with right command")
        if extra.count("reason"):
            if vm_action == "suspend":
                # If not, will cost long time to destroy vm
                virsh.destroy(vm_name)
                if not output.count("user"):
                    raise ActionError(vm_action)
            elif vm_action == "resume":
                if not output.count("unpaused"):
                    raise ActionError(vm_action)
            elif vm_action == "destroy":
                if not output.count("destroyed"):
                    raise ActionError(vm_action)
            elif vm_action == "start":
                if not output.count("booted"):
                    raise ActionError(vm_action)
            elif vm_action == "kill":
                if not output.count("crashed"):
                    raise ActionError(vm_action)
        if vm_ref == "remote":
            if not (re.search("running", output)
                    or re.search("blocked", output)
                    or re.search("idle", output)):
                raise error.TestFail("Run failed with right command")
コード例 #26
0
def run(test, params, env):
    """
    Test command: virsh net-destroy.

    The command can forcefully stop a given network.
    1.Make sure the network exists.
    2.Prepare network status.
    3.Perform virsh net-destroy operation.
    4.Check if the network has been destroied.
    5.Recover network environment.
    6.Confirm the test result.
    """

    net_ref = params.get("net_destroy_net_ref")
    extra = params.get("net_destroy_extra", "")
    network_name = params.get("net_destroy_network", "default")
    network_status = params.get("net_destroy_status", "active")
    status_error = params.get("status_error", "no")
    net_persistent = "yes" == params.get("net_persistent", "yes")
    net_cfg_file = params.get("net_cfg_file",
                              "/usr/share/libvirt/networks/default.xml")
    check_libvirtd = "yes" == params.get("check_libvirtd")
    vm_defined = "yes" == params.get("vm_defined")
    check_vm = "yes" == params.get("check_vm")

    # libvirt acl polkit related params
    if not libvirt_version.version_compare(1, 1, 1):
        if params.get('setup_libvirt_polkit') == 'yes':
            test.cancel("API acl test not supported in current"
                        " libvirt version.")

    uri = params.get("virsh_uri")
    unprivileged_user = params.get('unprivileged_user')
    if unprivileged_user:
        if unprivileged_user.count('EXAMPLE'):
            unprivileged_user = '******'

    output_all = virsh.net_list("--all").stdout.strip()
    # prepare the network status: active, persistent
    if not re.search(network_name, output_all):
        if net_persistent:
            virsh.net_define(net_cfg_file, ignore_status=False)
            virsh.net_start(network_name, ignore_status=False)
        else:
            virsh.create(net_cfg_file, ignore_status=False)
    if net_persistent:
        if not virsh.net_state_dict()[network_name]['persistent']:
            logging.debug("make the network persistent...")
            make_net_persistent(network_name)
    else:
        if virsh.net_state_dict()[network_name]['persistent']:
            virsh.net_undefine(network_name, ignore_status=False)
    if not virsh.net_state_dict()[network_name]['active']:
        if network_status == "active":
            virsh.net_start(network_name, ignore_status=False)
    else:
        if network_status == "inactive":
            logging.debug(
                "destroy network as we need to test inactive network...")
            virsh.net_destroy(network_name, ignore_status=False)
    logging.debug("After prepare: %s" % virsh.net_state_dict())

    # Run test case
    if net_ref == "uuid":
        net_ref = virsh.net_uuid(network_name).stdout.strip()
    elif net_ref == "name":
        net_ref = network_name

    if check_libvirtd or check_vm:
        vm_name = params.get("main_vm")
        if virsh.is_alive(vm_name):
            virsh.destroy(vm_name)
        vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
        vmxml_backup = vmxml
        # make sure there is interface with source network as default
        iface_devices = vmxml.get_devices(device_type="interface")
        has_default_net = False
        for iface in iface_devices:
            source = iface.get_source()
            if 'network' in source.keys() and source['network'] == 'default':
                has_default_net = True
                break
            elif 'bridge' in source.keys() and source['bridge'] == 'virbr0':
                has_default_net = True
                break
        if not has_default_net:
            options = "network default --current"
            virsh.attach_interface(vm_name, options, ignore_status=False)
        try:
            if vm_defined:
                ret = virsh.start(vm_name)
            else:
                logging.debug("undefine the vm, then create the vm...")
                vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
                virsh.undefine(vm_name)
                ret = virsh.create(vmxml.xml)
                logging.debug(ret.stdout)
            # check the create or start cmd status
            utils_test.libvirt.check_exit_status(
                ret, expect_error=(network_status != 'active'))
            status = 1

            if status_error != 'yes':
                cmd = "ps -ef | grep /usr/sbin/libvirtd | grep -v grep"
                # record the libvirt pid then destroy network
                libvirtd_pid = process.run(
                    cmd, shell=True).stdout_text.strip().split()[1]
                ret = virsh.net_destroy(net_ref,
                                        extra,
                                        uri=uri,
                                        debug=True,
                                        unprivileged_user=unprivileged_user,
                                        ignore_status=True)
                utils_test.libvirt.check_exit_status(ret, expect_error=False)
                # check_libvirtd pid no change
                result = check_libvirtd_restart(libvirtd_pid, cmd)
                if result:
                    test.fail("libvirtd crash after destroy network!")
                    status = 1
                else:
                    logging.debug(
                        "libvirtd do not crash after destroy network!")
                    status = 0
                if check_libvirtd:
                    # destroy vm, check libvirtd pid no change
                    ret = virsh.destroy(vm_name)
                    utils_test.libvirt.check_exit_status(ret,
                                                         expect_error=False)
                    result = check_libvirtd_restart(libvirtd_pid, cmd)
                    if result:
                        test.fail("libvirtd crash after destroy vm!")
                        status = 1
                    else:
                        logging.debug(
                            "libvirtd do not crash after destroy vm!")
                        status = 0
                elif check_vm:
                    # restart libvirtd and check vm is running
                    libvirtd = utils_libvirtd.Libvirtd()
                    libvirtd.restart()
                    if not virsh.is_alive(vm_name):
                        test.fail(
                            "vm shutdown when transient network destroyed then libvirtd restart"
                        )
                    else:
                        status = 0

        finally:
            if not vm_defined:
                vmxml_backup.define()
            vmxml_backup.sync()

    else:
        readonly = (params.get("net_destroy_readonly", "no") == "yes")
        status = virsh.net_destroy(net_ref,
                                   extra,
                                   uri=uri,
                                   readonly=readonly,
                                   debug=True,
                                   unprivileged_user=unprivileged_user,
                                   ignore_status=True).exit_status
        # Confirm the network has been destroied.
        if net_persistent:
            if virsh.net_state_dict()[network_name]['active']:
                status = 1
        else:
            output_all = virsh.net_list("--all").stdout.strip()
            if re.search(network_name, output_all):
                status = 1
                logging.debug(
                    "transient network should not exists after destroy")

    # Recover network status to system default status
    try:
        if network_name not in virsh.net_state_dict():
            virsh.net_define(net_cfg_file, ignore_status=False)
        if not virsh.net_state_dict()[network_name]['active']:
            virsh.net_start(network_name, ignore_status=False)
        if not virsh.net_state_dict()[network_name]['persistent']:
            make_net_persistent(network_name)
        if not virsh.net_state_dict()[network_name]['autostart']:
            virsh.net_autostart(network_name, ignore_status=False)
    except process.CmdError:
        test.error("Recover network status failed!")
    # Check status_error
    if status_error == "yes":
        if status == 0:
            test.fail("Run successfully with wrong command!")
    elif status_error == "no":
        if status != 0:
            test.fail("Run failed with right command")
    else:
        test.error("The status_error must be 'yes' or 'no'!")
コード例 #27
0
def run(test, params, env):
    """
    Test virsh vol-create and vol-create-as command to cover the following matrix:
    pool_type = [dir, fs, netfs]
    volume_format = [raw, bochs, cloop, cow, dmg, iso, qcow, qcow2, qed,
                     vmdk, vpc]

    pool_type = [disk]
    volume_format = [none, linux, fat16, fat32, linux-swap, linux-lvm,
                     linux-raid, extended]

    pool_type = [logical]
    volume_format = [none]

    pool_type = [iscsi, scsi]
    Not supported with format type

    TODO:
    pool_type = [rbd, glusterfs]

    Reference: http://www.libvirt.org/storage.html
    """

    src_pool_type = params.get("src_pool_type")
    src_pool_target = params.get("src_pool_target")
    src_pool_format = params.get("src_pool_format", "")
    pool_vol_num = int(params.get("src_pool_vol_num", '1'))
    src_emulated_image = params.get("src_emulated_image")
    extra_option = params.get("extra_option", "")
    prefix_vol_name = params.get("vol_name", "vol_create_test")
    vol_format = params.get("vol_format", "raw")
    vol_capacity = params.get("vol_capacity", 1048576)
    vol_allocation = params.get("vol_allocation", 1048576)
    image_size = params.get("emulate_image_size", "1G")
    lazy_refcounts = "yes" == params.get("lazy_refcounts")
    status_error = "yes" == params.get("status_error", "no")
    by_xml = "yes" == params.get("create_vol_by_xml", "yes")
    incomplete_target = "yes" == params.get("incomplete_target", "no")
    luks_encrypted = "luks" == params.get("encryption_method")
    encryption_secret_type = params.get("encryption_secret_type", "passphrase")
    virsh_readonly_mode = 'yes' == params.get("virsh_readonly", "no")
    vm_name = params.get("main_vm")
    with_clusterSize = "yes" == params.get("with_clusterSize")
    clusterSize = params.get("vol_clusterSize", "64")
    libvirt_version.is_libvirt_feature_supported(params)

    if vm_name:
        vm = env.get_vm(vm_name)
        vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
        params["orig_config_xml"] = vmxml.copy()

    if not libvirt_version.version_compare(1, 0, 0):
        if "--prealloc-metadata" in extra_option:
            test.cancel("metadata preallocation not supported in"
                        " current libvirt version.")
        if incomplete_target:
            test.cancel("It does not support generate target path"
                        "in current libvirt version.")

    pool_type = ['dir', 'disk', 'fs', 'logical', 'netfs', 'iscsi', 'scsi']
    if src_pool_type not in pool_type:
        test.cancel("pool type %s not in supported type list: %s" %
                    (src_pool_type, pool_type))

    # libvirt acl polkit related params
    if not libvirt_version.version_compare(1, 1, 1):
        if params.get('setup_libvirt_polkit') == 'yes':
            test.cancel("API acl test not supported in current"
                        " libvirt version.")
    uri = params.get("virsh_uri")
    unprivileged_user = params.get('unprivileged_user')
    if unprivileged_user:
        if unprivileged_user.count('EXAMPLE'):
            unprivileged_user = '******'

    # Stop multipathd to avoid start pool fail(For fs like pool, the new add
    # disk may in use by device-mapper, so start pool will report disk already
    # mounted error).
    multipathd = service.Factory.create_service("multipathd")
    multipathd_status = multipathd.status()
    if multipathd_status:
        multipathd.stop()

    # Set volume xml attribute dictionary, extract all params start with 'vol_'
    # which are for setting volume xml, except 'lazy_refcounts'.
    vol_arg = {}
    for key in params.keys():
        if key.startswith('vol_'):
            if key[4:] in [
                    'capacity', 'allocation', 'owner', 'group', 'clusterSize'
            ]:
                vol_arg[key[4:]] = int(params[key])
            else:
                vol_arg[key[4:]] = params[key]
    vol_arg['lazy_refcounts'] = lazy_refcounts

    def attach_disk_encryption(vol_path, uuid, params):
        """
        Attach a disk with luks encryption

        :vol_path: the volume path used in disk XML
        :uuid: the secret uuid of the volume
        :params: the parameter dictionary
        :raise: test.fail when disk cannot be attached

        """
        target_dev = params.get("target_dev", "vdb")
        vol_format = params.get("vol_format", "qcow2")
        disk_path = vol_path
        new_disk_dict = {}
        new_disk_dict.update({
            "driver_type": vol_format,
            "source_encryption_dict": {
                "encryption": 'luks',
                "secret": {
                    "type": "passphrase",
                    "uuid": uuid
                }
            }
        })
        result = utlv.attach_additional_device(vm_name, target_dev, disk_path,
                                               new_disk_dict)
        if result.exit_status:
            raise test.fail("Attach device %s failed." % target_dev)

    def check_vm_start():
        """
        Start a guest

        :params: the parameter dictionary
        :raise: test.fail when VM cannot be started
        """
        if not vm.is_alive():
            try:
                vm.start()
            except virt_vm.VMStartError as err:
                test.fail("Failed to start VM: %s" % err)

    def create_luks_secret(vol_path):
        """
        Create secret for luks encryption
        :param vol_path. volume path.
        :return: secret id if create successfully.
        """
        sec_xml = secret_xml.SecretXML("no", "yes")
        sec_xml.description = "volume secret"

        sec_xml.usage = 'volume'
        sec_xml.volume = vol_path
        sec_xml.xmltreefile.write()

        ret = virsh.secret_define(sec_xml.xml)
        utlv.check_exit_status(ret)
        # Get secret uuid.
        try:
            encryption_uuid = re.findall(r".+\S+(\ +\S+)\ +.+\S+",
                                         ret.stdout.strip())[0].lstrip()
        except IndexError as detail:
            test.error("Fail to get newly created secret uuid")
        logging.debug("Secret uuid %s", encryption_uuid)

        # Set secret value.
        encoding = locale.getpreferredencoding()
        secret_string = base64.b64encode(
            'redhat'.encode(encoding)).decode(encoding)
        ret = virsh.secret_set_value(encryption_uuid, secret_string)
        utlv.check_exit_status(ret)

        return encryption_uuid

    def post_process_vol(ori_vol_path):
        """
        Create or deactivate a volume without libvirt

        :param ori_vol_path: Full path of an original volume
        :return: Volume name for checking
        """
        process_vol_name = params.get("process_vol_name", "process_vol")
        process_vol_options = params.get("process_vol_options", "")
        process_vol_capacity = params.get("process_vol_capacity", vol_capacity)
        process_vol_cmd = ""
        unsupport_err = "Unsupported do '%s %s' in this test" % (
            process_vol_by, process_vol_type)
        if process_vol_by == "lvcreate":
            process_vol_cmd = "lvcreate -L %s " % process_vol_capacity
            if process_vol_type == "thin":
                if not process_vol_options:
                    process_vol_options = "-T "
                process_vol_cmd += "%s " % process_vol_options
                processthin_pool_name = params.get("processthin_pool_name",
                                                   "thinpool")
                processthin_vol_name = params.get("processthin_vol_name",
                                                  "thinvol")
                process_vol_capacity = params.get("process_vol_capacity", "1G")
                os.path.dirname(ori_vol_path)
                process_vol_cmd += "%s/%s " % (os.path.dirname(ori_vol_path),
                                               processthin_pool_name)
                process_vol_cmd += "-V %s " % process_vol_capacity
                process_vol_cmd += "-n %s " % processthin_vol_name
                process_vol_name = processthin_vol_name
            elif process_vol_type == "snapshot":
                if not process_vol_options:
                    process_vol_options = "-s "
                process_vol_cmd += "%s " % process_vol_options
                process_vol_cmd += "-n %s " % process_vol_name
                process_vol_cmd += "%s " % (ori_vol_path)
            else:
                logging.error(unsupport_err)
                return
        elif process_vol_by == "qemu-img" and process_vol_type == "create":
            process_vol_cmd = "qemu-img create "
            process_vol_path = os.path.dirname(ori_vol_path) + "/"
            process_vol_path += process_vol_name
            process_vol_cmd += "%s " % process_vol_options
            process_vol_cmd += "%s " % process_vol_path
            process_vol_cmd += "%s " % process_vol_capacity
        elif process_vol_by == "lvchange" and process_vol_type == "deactivate":
            process_vol_cmd = "lvchange %s " % ori_vol_path
            if not process_vol_options:
                process_vol_options = "-an"
            process_vol_cmd += process_vol_options
        else:
            logging.error(unsupport_err)
            return
        rst = process.run(process_vol_cmd, ignore_status=True, shell=True)
        if rst.exit_status:
            if "Snapshots of snapshots are not supported" in rst.stderr_text:
                logging.debug("%s is already a snapshot volume", ori_vol_path)
                process_vol_name = os.path.basename(ori_vol_path)
            else:
                logging.error(rst.stderr_text)
                return
        return process_vol_name

    def check_vol(pool_name, vol_name, expect_exist=True):
        """
        Check volume vol_name in pool pool_name
        """
        src_volumes = src_pv.list_volumes().keys()
        logging.debug("Current volumes in %s: %s", pool_name, src_volumes)
        if expect_exist:
            if vol_name not in src_volumes:
                test.fail("Can't find volume %s in pool %s" %
                          (vol_name, pool_name))
            # check format in volume xml
            volxml = libvirt_xml.VolXML()
            post_xml = volxml.new_from_vol_dumpxml(vol_name, pool_name)
            logging.debug("Volume %s XML: %s" %
                          (vol_name, post_xml.xmltreefile))
            if 'format' in post_xml.keys() and vol_format is not None:
                if post_xml.format != vol_format:
                    test.fail("Volume format %s is not expected" % vol_format +
                              " as defined.")
        else:
            if vol_name in src_volumes:
                test.fail("Find volume %s in pool %s, but expect not" %
                          (vol_name, pool_name))

    def attach_volume_disk(vm_name, src_pool_name, vol_name, params):
        """
        Attach volume disk to the guest

        params: vm_name: the name of the vm
        params: src_pool_name: the name of the pool
        params: vol_name: the name of the created volume
        params: params: the parameter dictionary
        """
        disk_target = params.get("disk_target", "vdb")
        disk_target_bus = params.get("disk_target_bus", "virtio")
        attach_options = params.get("attach_options", "")
        disk_params = {
            'device_type': 'disk',
            'type_name': 'volume',
            'target_dev': disk_target,
            'target_bus': disk_target_bus
        }
        disk_params_src = {}
        disk_params_src = {
            'source_pool': src_pool_name,
            'source_volume': vol_name,
            'driver_type': 'qcow2'
        }
        disk_params.update(disk_params_src)
        disk_xml = utlv.create_disk_xml(disk_params)
        cmd_result = virsh.attach_device(domainarg=vm_name,
                                         filearg=disk_xml,
                                         flagstr=attach_options,
                                         debug=True)
        utlv.check_exit_status(cmd_result, status_error)

    fmt_err0 = "Unknown file format '%s'" % vol_format
    fmt_err1 = "Formatting or formatting option not "
    fmt_err1 += "supported for file format '%s'" % vol_format
    fmt_err2 = "Driver '%s' does not support " % vol_format
    fmt_err2 += "image creation"
    fmt_err_list = [fmt_err0, fmt_err1, fmt_err2]
    skip_msg = "Volume format '%s' is not supported by qemu-img" % vol_format
    vol_path_list = []
    secret_uuids = []
    try:
        # Create the src pool
        src_pool_name = "virt-%s-pool" % src_pool_type
        pvt = utlv.PoolVolumeTest(test, params)
        pvt.pre_pool(src_pool_name,
                     src_pool_type,
                     src_pool_target,
                     src_emulated_image,
                     image_size=image_size,
                     source_format=src_pool_format)

        src_pv = libvirt_storage.PoolVolume(src_pool_name)
        src_pool_uuid = libvirt_storage.StoragePool().pool_info(
            src_pool_name)['UUID']
        # Print current pools for debugging
        logging.debug("Current pools:%s",
                      libvirt_storage.StoragePool().list_pools())
        # Create volumes by virsh in a loop
        while pool_vol_num > 0:
            # Set volume xml file
            vol_name = prefix_vol_name + "_%s" % pool_vol_num
            bad_vol_name = params.get("bad_vol_name", "")
            if bad_vol_name:
                vol_name = bad_vol_name
            pool_vol_num -= 1
            # disk partition for new volume
            if src_pool_type == "disk":
                vol_name = utlv.new_disk_vol_name(src_pool_name)
                if vol_name is None:
                    test.error("Fail to generate volume name")
            if by_xml:
                # According to BZ#1138523, we need inpect the right name
                # (disk partition) for new volume
                if src_pool_type == "disk":
                    vol_name = utlv.new_disk_vol_name(src_pool_name)
                    if vol_name is None:
                        test.error("Fail to generate volume name")
                vol_arg['name'] = vol_name
                volxml = libvirt_xml.VolXML()
                newvol = volxml.new_vol(**vol_arg)
                if luks_encrypted:
                    if vol_format == "qcow2" and not libvirt_version.version_compare(
                            6, 10, 0):
                        test.cancel("Qcow2 format with luks encryption"
                                    " is not supported in current libvirt")
                    # For luks encrypted disk, add related xml in newvol
                    luks_encryption_params = {}
                    luks_encryption_params.update({"format": "luks"})
                    luks_secret_uuid = create_luks_secret(
                        os.path.join(src_pool_target, vol_name))
                    secret_uuids.append(luks_secret_uuid)
                    luks_encryption_params.update({
                        "secret": {
                            "type": encryption_secret_type,
                            "uuid": luks_secret_uuid
                        }
                    })
                    newvol.encryption = volxml.new_encryption(
                        **luks_encryption_params)
                vol_xml = newvol['xml']
                if params.get('setup_libvirt_polkit') == 'yes':
                    process.run("chmod 666 %s" % vol_xml,
                                ignore_status=True,
                                shell=True)
                    if luks_encrypted and libvirt_version.version_compare(
                            4, 5, 0):
                        try:
                            polkit = test_setup.LibvirtPolkitConfig(params)
                            polkit_rules_path = polkit.polkit_rules_path
                            with open(polkit_rules_path, 'r+') as f:
                                rule = f.readlines()
                                for index, v in enumerate(rule):
                                    if v.find("secret") >= 0:
                                        nextline = rule[index + 1]
                                        s = nextline.replace(
                                            "QEMU", "secret").replace(
                                                "pool_name",
                                                "secret_uuid").replace(
                                                    "virt-dir-pool",
                                                    "%s" % luks_secret_uuid)
                                        rule[index + 1] = s
                                rule = ''.join(rule)
                            with open(polkit_rules_path, 'w+') as f:
                                f.write(rule)
                            logging.debug(rule)
                            polkit.polkitd.restart()
                        except IOError as e:
                            logging.error(e)
                # Run virsh_vol_create to create vol
                logging.debug("Create volume from XML: %s" %
                              newvol.xmltreefile)
                cmd_result = virsh.vol_create(
                    src_pool_name,
                    vol_xml,
                    extra_option,
                    unprivileged_user=unprivileged_user,
                    uri=uri,
                    ignore_status=True,
                    debug=True)

                if luks_encrypted and vol_format == "qcow2":
                    vol_path = virsh.vol_path(vol_name,
                                              src_pool_name).stdout.strip()
                    uuid = luks_secret_uuid
                    attach_disk_encryption(vol_path, uuid, params)
                    check_vm_start()
                if with_clusterSize:
                    clusterSize_B = int(clusterSize) * 1024
                    # check cluster size in volume xml
                    volume_xml = volxml.new_from_vol_dumpxml(
                        vol_name, src_pool_name)
                    check_point = "%s</clusterSize>" % clusterSize_B
                    if check_point in str(volume_xml):
                        logging.debug(
                            "Can get expected cluster size in volume xml")
                    else:
                        test.fail(
                            "Can't get expected cluster size %s in volume xml %s"
                            % (clusterSize_B, volume_xml))
                    # check cluster size in qemu-img info
                    vol_path = virsh.vol_path(vol_name,
                                              src_pool_name).stdout.strip()
                    ret = process.run("qemu-img info %s | grep cluster_size" %
                                      vol_path,
                                      shell=True)
                    ret_clusterSize = int(
                        ret.stdout_text.split(':')[1].strip())
                    if clusterSize_B == ret_clusterSize:
                        logging.debug(
                            "Can get expected cluster size in qemu-img info")
                    else:
                        test.fail(
                            "Gan't get expected cluster size %s in the image, the"
                            " incorrect cluster size is %s" %
                            (clusterSize_B, ret_clusterSize))
                    # start the guest with volume disk
                    attach_volume_disk(vm_name, src_pool_name, vol_name,
                                       params)
                    check_vm_start()

            else:
                # Run virsh_vol_create_as to create_vol
                pool_name = src_pool_name
                if params.get("create_vol_by_pool_uuid") == "yes":
                    pool_name = src_pool_uuid
                cmd_result = virsh.vol_create_as(
                    vol_name,
                    pool_name,
                    vol_capacity,
                    vol_allocation,
                    vol_format,
                    extra_option,
                    unprivileged_user=unprivileged_user,
                    uri=uri,
                    readonly=virsh_readonly_mode,
                    ignore_status=True,
                    debug=True)
            # Check result
            try:
                utlv.check_exit_status(cmd_result, status_error)
                check_vol(src_pool_name, vol_name, not status_error)
                if bad_vol_name:
                    pattern = "volume name '%s' cannot contain '/'" % vol_name
                    logging.debug("pattern: %s", pattern)
                    if "\\" in pattern and by_xml:
                        pattern = pattern.replace("\\", "\\\\")
                    if re.search(pattern, cmd_result.stderr) is None:
                        test.fail("vol-create failed with unexpected reason")
                if not status_error:
                    vol_path = virsh.vol_path(vol_name,
                                              src_pool_name).stdout.strip()
                    logging.debug("Full path of %s: %s", vol_name, vol_path)
                    vol_path_list.append(vol_path)
            except exceptions.TestFail as detail:
                stderr = cmd_result.stderr
                if any(err in stderr for err in fmt_err_list):
                    test.cancel(skip_msg)
                else:
                    test.fail("Create volume fail:\n%s" % detail)
        # Post process vol by other programs
        process_vol_by = params.get("process_vol_by")
        process_vol_type = params.get("process_vol_type", "")
        expect_vol_exist = "yes" == params.get("expect_vol_exist", "yes")
        if process_vol_by and vol_path_list:
            process_vol = post_process_vol(vol_path_list[0])
            if process_vol is not None:
                try:
                    virsh.pool_refresh(src_pool_name, ignore_status=False)
                    check_vol(src_pool_name, process_vol, expect_vol_exist)
                except (process.CmdError, exceptions.TestFail) as detail:
                    if process_vol_type == "thin":
                        logging.error(str(detail))
                        test.cancel("You may encounter bug BZ#1060287")
                    else:
                        test.fail("Fail to refresh pool:\n%s" % detail)
            else:
                test.fail("Post process volume failed")
    finally:
        # Cleanup
        # For old version lvm2(2.02.106 or early), deactivate volume group
        # (destroy libvirt logical pool) will fail if which has deactivated
        # lv snapshot, so before destroy the pool, we need activate it manually
        if vm.is_alive():
            virsh.destroy(vm_name, debug=True, ignore_status=True)
        if params.get("orig_config_xml"):
            params.get("orig_config_xml").sync()
        if src_pool_type == 'logical' and vol_path_list:
            vg_name = vol_path_list[0].split('/')[2]
            process.run("lvchange -ay %s" % vg_name, shell=True)
        try:
            pvt.cleanup_pool(src_pool_name, src_pool_type, src_pool_target,
                             src_emulated_image)
            for secret_uuid in set(secret_uuids):
                virsh.secret_undefine(secret_uuid)
        except exceptions.TestFail as detail:
            logging.error(str(detail))
        if multipathd_status:
            multipathd.start()
コード例 #28
0
ファイル: virsh_save.py プロジェクト: kmaehara/virt-test
def run_virsh_save(test, params, env):
    """
    Test command: virsh save.

    The command can save the RAM state of a running domain.
    1.Prepare test environment.
    2.When the libvirtd == "off", stop the libvirtd service.
    3.Run virsh save command with assigned options.
    4.Recover test environment.(If the libvirtd service is stopped ,start
      the libvirtd service.)
    5.Confirm the test result.

    """
    vm_name = params.get("main_vm")
    vm = env.get_vm(params["main_vm"])
    vm.verify_alive()

    domid = virsh.domid(vm_name).strip()
    domuuid = virsh.domuuid(vm_name).strip()

    savefile = params.get("save_file")
    pre_vm_state = params.get("save_pre_vm_state", "null")
    libvirtd = params.get("save_libvirtd")
    extra_param = params.get("save_extra_param")
    vm_ref = params.get("save_vm_ref")

    # prepare the environment
    if vm_ref == "name" and pre_vm_state == "paused":
        virsh.suspend(vm_name)
    elif vm_ref == "name" and pre_vm_state == "shut off":
        virsh.destroy(vm_name)

    # set the option
    if vm_ref == "id":
        vm_ref = domid
    elif vm_ref == "hex_id":
        vm_ref = hex(int(domid))
    elif vm_ref == "uuid":
        vm_ref = domuuid
    elif vm_ref == "save_invalid_id" or vm_ref == "save_invalid_uuid":
        vm_ref = params.get(vm_ref)
    elif vm_ref.find("name") != -1 or vm_ref == "extra_param":
        savefile = "%s %s" % (savefile, extra_param)
        if vm_ref == "only_name":
            savefile = " "
        vm_ref = vm_name

    if libvirtd == "off":
        libvirt_vm.libvirtd_stop()
    status = virsh.save(vm_ref, savefile, ignore_status=True).exit_status

    # recover libvirtd service start
    if libvirtd == "off":
        libvirt_vm.libvirtd_start()

    # cleanup
    if os.path.exists(savefile):
        virsh.restore(savefile)
        os.remove(savefile)

    # check status_error
    status_error = params.get("save_status_error")
    if status_error == "yes":
        if status == 0:
            raise error.TestFail("Run successfully with wrong command!")
    elif status_error == "no":
        if status != 0:
            raise error.TestFail("Run failed with right command")
コード例 #29
0
def run(test, params, env):
    """
    Test virsh snapshot command when disk in all kinds of type.

    (1). Init the variables from params.
    (2). Create a image by specifice format.
    (3). Attach disk to vm.
    (4). Snapshot create.
    (5). Snapshot revert.
    (6). cleanup.
    """
    # Init variables.
    vm_name = params.get("main_vm", "virt-tests-vm1")
    vm = env.get_vm(vm_name)
    image_format = params.get("snapshot_image_format", "qcow2")
    status_error = ("yes" == params.get("status_error", "no"))
    snapshot_from_xml = ("yes" == params.get("snapshot_from_xml", "no"))
    snapshot_current = ("yes" == params.get("snapshot_current", "no"))
    snapshot_revert_paused = ("yes" == params.get("snapshot_revert_paused",
                                                  "no"))

    # Do xml backup for final recovery
    vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)

    # Some variable for xmlfile of snapshot.
    snapshot_memory = params.get("snapshot_memory", "internal")
    snapshot_disk = params.get("snapshot_disk", "internal")

    # Get a tmp_dir.
    tmp_dir = data_dir.get_tmp_dir()
    # Create a image.
    params['image_name'] = "snapshot_test"
    params['image_format'] = image_format
    params['image_size'] = "1M"
    image = qemu_storage.QemuImg(params, tmp_dir, "snapshot_test")
    img_path, _ = image.create(params)
    # Do the attach action.
    result = virsh.attach_disk(vm_name, source=img_path, target="vdf",
                               extra="--persistent --subdriver %s" % image_format)
    if result.exit_status:
        raise error.TestNAError("Failed to attach disk %s to VM."
                                "Detail: %s." % (img_path, result.stderr))

    # Init snapshot_name
    snapshot_name = None
    snapshot_external_disk = []
    try:
        # Create snapshot.
        if snapshot_from_xml:
            snapshot_name = "snapshot_test"
            lines = ["<domainsnapshot>\n",
                     "<name>%s</name>\n" % snapshot_name,
                     "<description>Snapshot Test</description>\n"]
            if snapshot_memory == "external":
                memory_external = os.path.join(tmp_dir, "snapshot_memory")
                snapshot_external_disk.append(memory_external)
                lines.append("<memory snapshot=\'%s\' file='%s'/>\n" %
                             (snapshot_memory, memory_external))
            else:
                lines.append("<memory snapshot='%s'/>\n" % snapshot_memory)

            # Add all disks into xml file.
            disks = vm.get_disk_devices().values()
            lines.append("<disks>\n")
            for disk in disks:
                lines.append("<disk name='%s' snapshot='%s'>\n" %
                             (disk['source'], snapshot_disk))
                if snapshot_disk == "external":
                    disk_external = os.path.join(tmp_dir,
                                                 "%s.snap" % os.path.basename(disk['source']))
                    snapshot_external_disk.append(disk_external)
                    lines.append("<source file='%s'/>\n" % disk_external)
                lines.append("</disk>\n")
            lines.append("</disks>\n")
            lines.append("</domainsnapshot>")

            snapshot_xml_path = "%s/snapshot_xml" % tmp_dir
            snapshot_xml_file = open(snapshot_xml_path, "w")
            snapshot_xml_file.writelines(lines)
            snapshot_xml_file.close()
            snapshot_result = virsh.snapshot_create(
                vm_name, ("--xmlfile %s" % snapshot_xml_path))
            if snapshot_result.exit_status:
                if status_error:
                    return
                else:
                    raise error.TestFail("Failed to create snapshot. Error:%s."
                                         % snapshot_result.stderr.strip())
        else:
            options = ""
            snapshot_result = virsh.snapshot_create(vm_name, options)
            if snapshot_result.exit_status:
                if status_error:
                    return
                else:
                    raise error.TestFail("Failed to create snapshot. Error:%s."
                                         % snapshot_result.stderr.strip())
            snapshot_name = re.search(
                "\d+", snapshot_result.stdout.strip()).group(0)
            if snapshot_current:
                lines = ["<domainsnapshot>\n",
                         "<description>Snapshot Test</description>\n",
                         "<state>running</state>\n",
                         "<creationTime>%s</creationTime>" % snapshot_name,
                         "</domainsnapshot>"]
                snapshot_xml_path = "%s/snapshot_xml" % tmp_dir
                snapshot_xml_file = open(snapshot_xml_path, "w")
                snapshot_xml_file.writelines(lines)
                snapshot_xml_file.close()
                options += "--redefine %s --current" % snapshot_xml_path
                if snapshot_result.exit_status:
                    raise error.TestFail("Failed to create snapshot --current."
                                         "Error:%s." %
                                         snapshot_result.stderr.strip())

        if status_error:
            raise error.TestFail("Success to create snapshot in negative case\n"
                                 "Detail: %s" % snapshot_result)

        # Touch a file in VM.
        if vm.is_dead():
            vm.start()
        session = vm.wait_for_login()

        # Init a unique name for tmp_file.
        tmp_file = tempfile.NamedTemporaryFile(prefix=("snapshot_test_"),
                                               dir="/tmp")
        tmp_file_path = tmp_file.name
        tmp_file.close()

        status, output = session.cmd_status_output("touch %s" % tmp_file_path)
        if status:
            raise error.TestFail("Touch file in vm failed. %s" % output)

        session.close()

        # Destroy vm for snapshot revert.
        virsh.destroy(vm_name)
        # Revert snapshot.
        revert_options = ""
        if snapshot_revert_paused:
            revert_options += " --paused"
        revert_result = virsh.snapshot_revert(vm_name, snapshot_name,
                                              revert_options)
        if revert_result.exit_status:
            raise error.TestFail(
                "Revert snapshot failed. %s" % revert_result.stderr.strip())

        if vm.is_dead():
            raise error.TestFail("Revert snapshot failed.")

        if snapshot_revert_paused:
            if vm.is_paused():
                vm.resume()
            else:
                raise error.TestFail("Revert command successed, but VM is not "
                                     "paused after reverting with --paused option.")
        # login vm.
        session = vm.wait_for_login()
        # Check the result of revert.
        status, output = session.cmd_status_output("cat %s" % tmp_file_path)
        if not status:
            raise error.TestFail("Tmp file exists, revert failed.")

        # Close the session.
        session.close()

    finally:
        virsh.detach_disk(vm_name, target="vdf", extra="--persistent")
        image.remove()
        if snapshot_name:
            virsh.snapshot_delete(vm_name, snapshot_name, "--metadata")
        for disk in snapshot_external_disk:
            if os.path.exists(disk):
                os.remove(disk)
        vmxml_backup.sync("--snapshots-metadata")
コード例 #30
0
def reset_env(vm_name, xml_file):
    virsh.destroy(vm_name)
    virsh.undefine(vm_name)
    virsh.define(xml_file)
    if os.path.exists(xml_file):
        os.remove(xml_file)
コード例 #31
0
def run(test, params, env):
    """
    Test command: virsh reboot.

    Run a reboot command in the target domain.
    1.Prepare test environment.
    2.When the libvirtd == "off", stop the libvirtd service.
    3.Perform virsh reboot operation.
    4.Recover test environment.(libvirts service)
    5.Confirm the test result.
    """

    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)

    # run test case
    libvirtd = params.get("libvirtd", "on")
    vm_ref = params.get("reboot_vm_ref")
    status_error = ("yes" == params.get("status_error"))
    extra = params.get("reboot_extra")
    remote_ip = params.get("remote_ip", "REMOTE.EXAMPLE.COM")
    local_ip = params.get("local_ip", "LOCAL.EXAMPLE.COM")
    remote_pwd = params.get("remote_pwd", "password")
    agent = ("yes" == params.get("reboot_agent", "no"))
    mode = params.get("reboot_mode", "")
    pre_domian_status = params.get("reboot_pre_domian_status", "running")
    xml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    try:
        # Add or remove qemu-agent from guest before test
        try:
            vm.prepare_guest_agent(channel=agent, start=agent)
        except virt_vm.VMError, e:
            logging.debug(e)
            # qemu-guest-agent is not available on REHL5
            raise error.TestNAError(
                "qemu-guest-agent package is not available")

        if pre_domian_status == "shutoff":
            virsh.destroy(vm_name)
        if libvirtd == "off":
            utils_libvirtd.libvirtd_stop()

        domid = vm.get_id()
        domuuid = vm.get_uuid()
        if vm_ref == "id":
            vm_ref = domid
        elif vm_ref == "name":
            vm_ref = vm_name
        elif vm_ref == "uuid":
            vm_ref = domuuid
        elif vm_ref == "hex_id":
            vm_ref = hex(int(domid))
        elif vm_ref.find("invalid") != -1:
            vm_ref = params.get(vm_ref)
        elif vm_ref == "remote_name":
            if remote_ip.count("EXAMPLE.COM") or local_ip.count("EXAMPLE.COM"):
                raise error.TestNAError("remote_ip and/or local_ip parameters"
                                        " not changed from default values")
            complete_uri = libvirt_vm.complete_uri(local_ip)
            try:
                session = remote.remote_login("ssh", remote_ip, "22", "root",
                                              remote_pwd, "#")
                session.cmd_output('LANG=C')
                command = "virsh -c %s reboot %s %s" % (complete_uri, vm_name,
                                                        mode)
                status, output = session.cmd_status_output(command,
                                                           internal_timeout=5)
                session.close()
            except (remote.LoginError, error.CmdError, aexpect.ShellError), e:
                logging.error("Exception: %s", str(e))
                status = -1
コード例 #32
0
    def run_test(dev_type, params, test_obj=None):
        """
        Test domain lifecycle

        1) Start the vm and check network
        2) Destroy and start the VM, and check network
        3) Save and restore, and check network
        4) Suspend and resume, and check network
        5) Reboot the VM and check the network
        """
        # Setup Iface device
        vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
        iface_dict = eval(params.get('iface_dict', '{}'))
        iface_dev = interface_base.create_iface(dev_type, iface_dict)
        libvirt.add_vm_device(vmxml, iface_dev)

        logging.info("Start a VM with a '%s' type interface.", dev_type)
        vm.start()
        vm.wait_for_serial_login(timeout=240).close()
        check_points.check_network_accessibility(vm,
                                                 test_obj=test_obj,
                                                 **params)

        logging.info("Destroy and start the VM.")
        virsh.destroy(vm.name, **VIRSH_ARGS)
        virsh.start(vm.name, **VIRSH_ARGS)
        check_points.check_network_accessibility(vm,
                                                 test_obj=test_obj,
                                                 config_vdpa=True,
                                                 **params)

        logging.info("Save the VM.")
        save_error = "yes" == params.get("save_error", "no")
        save_path = os.path.join(data_dir.get_tmp_dir(), vm.name + '.save')
        res = virsh.save(vm.name, 'sss', debug=True)
        libvirt.check_exit_status(res, expect_error=save_error)
        if not save_error:
            logging.info("Restore vm.")
            virsh.restore(save_path, **VIRSH_ARGS)
            check_points.check_network_accessibility(vm,
                                                     test_obj=test_obj,
                                                     config_vdpa=False,
                                                     **params)

        logging.info("Suspend and resume the vm.")
        virsh.suspend(vm.name, **VIRSH_ARGS)
        if not libvirt.check_vm_state(vm_name, "paused"):
            test.fail("VM should be paused!")
        virsh.resume(vm.name, **VIRSH_ARGS)
        if not libvirt.check_vm_state(vm_name, "running"):
            test.fail("VM should be running!")
        check_points.check_network_accessibility(vm,
                                                 test_obj=test_obj,
                                                 config_vdpa=False,
                                                 **params)

        logging.debug("Reboot VM and check network.")
        virsh.reboot(vm.name, **VIRSH_ARGS)
        check_points.check_network_accessibility(vm,
                                                 test_obj=test_obj,
                                                 config_vdpa=False,
                                                 **params)
コード例 #33
0
def run(test, params, env):
    """
    Test virtiofs filesystem device:

    1.Start guest with 1/2 virtiofs filesystem devices.
    2.Start 2 guest with the same virtiofs filesystem device.
    3.Coldplug/Coldunplug virtiofs filesystem device
    4.Share data between guests and host.
    5.Lifecycle for guest with virtiofs filesystem device.
    """
    def generate_expected_process_option(expected_results):
        """
        Generate expected virtiofsd process option
        """
        if cache_mode != "auto":
            expected_results = "cache=%s" % cache_mode
        if xattr == "on":
            expected_results += ",xattr"
        elif xattr == "off":
            expected_results += ",no_xattr"
        if flock == "on":
            expected_results += ",flock"
        else:
            expected_results += ",no_flock"
        if lock_posix == "on":
            expected_results += ",posix_lock"
        else:
            expected_results += ",no_posix_lock"
        logging.debug(expected_results)
        return expected_results

    def shared_data(vm_names, fs_devs):
        """
        Shared data between guests and host:
        1.Mount dir in guest;
        2.Write a file in guest;
        3.Check the md5sum value are the same in guests and host;
        """
        md5s = []
        for vm in vms:
            session = vm.wait_for_login()
            for fs_dev in fs_devs:
                logging.debug(fs_dev)
                mount_dir = '/var/tmp/' + fs_dev.target['dir']
                session.cmd('rm -rf %s' % mount_dir, ignore_all_errors=False)
                session.cmd('mkdir -p %s' % mount_dir)
                logging.debug("mount virtiofs dir in guest")
                cmd = "mount -t virtiofs %s %s" % (fs_dev.target['dir'],
                                                   mount_dir)
                status, output = session.cmd_status_output(cmd, timeout=300)
                if status != 0:
                    session.close()
                    test.fail("mount virtiofs dir failed: %s" % output)
                if vm == vms[0]:
                    filename_guest = mount_dir + '/' + vm.name
                    cmd = "dd if=/dev/urandom of=%s bs=1M count=512 oflag=direct" % filename_guest
                    status, output = session.cmd_status_output(cmd,
                                                               timeout=300)
                    if status != 0:
                        session.close()
                        test.fail("Write data failed: %s" % output)
                md5_value = session.cmd_status_output(
                    "md5sum %s" % filename_guest)[1].strip().split()[0]
                md5s.append(md5_value)
                logging.debug(md5_value)
                md5_value = process.run(
                    "md5sum %s" %
                    filename_guest).stdout_text.strip().split()[0]
                logging.debug(md5_value)
                md5s.append(md5_value)
            session.close()
        if len(set(md5s)) != len(fs_devs):
            test.fail("The md5sum value are not the same in guests and host")

    def launch_externally_virtiofs(source_dir, source_socket):
        """
        Launch externally virtiofs

        :param source_dir:  the dir shared on host
        :param source_socket: the socket file listened on
        """
        process.run('chcon -t virtd_exec_t %s' % path,
                    ignore_status=False,
                    shell=True)
        cmd = "systemd-run %s --socket-path=%s -o source=%s" % (
            path, source_socket, source_dir)
        try:
            process.run(cmd, ignore_status=False, shell=True)
            # Make sure the socket is created
            utils_misc.wait_for(lambda: os.path.isdir(source_socket),
                                timeout=3)
            process.run("chown qemu:qemu %s" % source_socket,
                        ignore_status=False)
            process.run('chcon -t svirt_image_t %s' % source_socket,
                        ignore_status=False,
                        shell=True)
        except Exception as err:
            cmd = "pkill virtiofsd"
            process.run(cmd, shell=True)
            test.fail("{}".format(err))

    def prepare_stress_script(script_path, script_content):
        """
        Refer to xfstest generic/531. Create stress test script to create a lot of unlinked files.

        :param source_path: The path of script
        :param content: The content of stress script
        """
        logging.debug("stress script path: %s content: %s" %
                      (script_path, script_content))
        script_lines = script_content.split(';')
        try:
            with open(script_path, 'w') as fd:
                fd.write('\n'.join(script_lines))
            os.chmod(script_path, 0o777)
        except Exception as e:
            test.error("Prepare the guest stress script failed %s" % e)

    def run_stress_script(session, script_path):
        """
        Run stress script in the guest

        :param session: guest session
        :param script_path: The path of script in the guest
        """
        # Set ULIMIT_NOFILE to increase the number of unlinked files
        session.cmd("ulimit -n 500000 && /usr/bin/python3 %s" % script_path,
                    timeout=120)

    def umount_fs(vm):
        """
        Unmount the filesystem in guest

        :param vm: filesystem in this vm that should be unmounted
        """
        if vm.is_alive():
            session = vm.wait_for_login()
            for fs_dev in fs_devs:
                mount_dir = '/var/tmp/' + fs_dev.target['dir']
                session.cmd('umount -f %s' % mount_dir, ignore_all_errors=True)
                session.cmd('rm -rf %s' % mount_dir, ignore_all_errors=True)
            session.close()

    start_vm = params.get("start_vm", "no")
    vm_names = params.get("vms", "avocado-vt-vm1").split()
    cache_mode = params.get("cache_mode", "none")
    xattr = params.get("xattr", "on")
    lock_posix = params.get("lock_posix", "on")
    flock = params.get("flock", "on")
    xattr = params.get("xattr", "on")
    path = params.get("virtiofsd_path", "/usr/libexec/virtiofsd")
    queue_size = int(params.get("queue_size", "512"))
    driver_type = params.get("driver_type", "virtiofs")
    guest_num = int(params.get("guest_num", "1"))
    fs_num = int(params.get("fs_num", "1"))
    vcpus_per_cell = int(params.get("vcpus_per_cell", 2))
    dir_prefix = params.get("dir_prefix", "mount_tag")
    error_msg_start = params.get("error_msg_start", "")
    error_msg_save = params.get("error_msg_save", "")
    status_error = params.get("status_error", "no") == "yes"
    socket_file_checking = params.get("socket_file_checking", "no") == "yes"
    suspend_resume = params.get("suspend_resume", "no") == "yes"
    managedsave = params.get("managedsave", "no") == "yes"
    coldplug = params.get("coldplug", "no") == "yes"
    hotplug_unplug = params.get("hotplug_unplug", "no") == "yes"
    detach_device_alias = params.get("detach_device_alias", "no") == "yes"
    extra_hugepages = params.get_numeric("extra_hugepages")
    edit_start = params.get("edit_start", "no") == "yes"
    with_hugepages = params.get("with_hugepages", "yes") == "yes"
    with_numa = params.get("with_numa", "yes") == "yes"
    with_memfd = params.get("with_memfd", "no") == "yes"
    source_socket = params.get("source_socket", "/var/tmp/vm001.socket")
    launched_mode = params.get("launched_mode", "auto")
    destroy_start = params.get("destroy_start", "no") == "yes"
    bug_url = params.get("bug_url", "")
    script_content = params.get("stress_script", "")

    fs_devs = []
    vms = []
    vmxml_backups = []
    expected_fails_msg = []
    expected_results = ""
    host_hp_size = utils_memory.get_huge_page_size()
    backup_huge_pages_num = utils_memory.get_num_huge_pages()
    huge_pages_num = 0

    if len(vm_names) != guest_num:
        test.cancel("This test needs exactly %d vms." % guest_num)

    if not libvirt_version.version_compare(7, 0, 0) and not with_numa:
        test.cancel("Not supported without NUMA before 7.0.0")

    if not libvirt_version.version_compare(7, 6, 0) and destroy_start:
        test.cancel("Bug %s is not fixed on current build" % bug_url)

    try:
        # Define filesystem device xml
        for index in range(fs_num):
            driver = {'type': driver_type, 'queue': queue_size}
            source_dir = os.path.join('/var/tmp/',
                                      str(dir_prefix) + str(index))
            logging.debug(source_dir)
            not os.path.isdir(source_dir) and os.mkdir(source_dir)
            target_dir = dir_prefix + str(index)
            source = {'socket': source_socket}
            target = {'dir': target_dir}
            if launched_mode == "auto":
                binary_keys = [
                    'path', 'cache_mode', 'xattr', 'lock_posix', 'flock'
                ]
                binary_values = [path, cache_mode, xattr, lock_posix, flock]
                binary_dict = dict(zip(binary_keys, binary_values))
                source = {'dir': source_dir}
                accessmode = "passthrough"
                fsdev_keys = [
                    'accessmode', 'driver', 'source', 'target', 'binary'
                ]
                fsdev_values = [
                    accessmode, driver, source, target, binary_dict
                ]
            else:
                fsdev_keys = ['driver', 'source', 'target']
                fsdev_values = [driver, source, target]
            fsdev_dict = dict(zip(fsdev_keys, fsdev_values))
            logging.debug(fsdev_dict)
            fs_dev = libvirt_device_utils.create_fs_xml(
                fsdev_dict, launched_mode)
            logging.debug(fs_dev)
            fs_devs.append(fs_dev)

        #Start guest with virtiofs filesystem device
        for index in range(guest_num):
            logging.debug("prepare vm %s", vm_names[index])
            vm = env.get_vm(vm_names[index])
            vms.append(vm)
            vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_names[index])
            vmxml_backup = vmxml.copy()
            vmxml_backups.append(vmxml_backup)
            if vmxml.max_mem < 1024000:
                vmxml.max_mem = 1024000
            if with_hugepages:
                huge_pages_num += vmxml.max_mem // host_hp_size + extra_hugepages
                utils_memory.set_num_huge_pages(huge_pages_num)
            vmxml.remove_all_device_by_type('filesystem')
            vmxml.sync()
            numa_no = None
            if with_numa:
                numa_no = vmxml.vcpu // vcpus_per_cell if vmxml.vcpu != 1 else 1
            vm_xml.VMXML.set_vm_vcpus(vmxml.vm_name,
                                      vmxml.vcpu,
                                      numa_number=numa_no)
            vm_xml.VMXML.set_memoryBacking_tag(vmxml.vm_name,
                                               access_mode="shared",
                                               hpgs=with_hugepages,
                                               memfd=with_memfd)
            vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_names[index])
            logging.debug(vmxml)
            if launched_mode == "externally":
                launch_externally_virtiofs(source_dir, source_socket)
            if coldplug:
                ret = virsh.attach_device(vm_names[index],
                                          fs_devs[0].xml,
                                          flagstr='--config',
                                          debug=True)
                utils_test.libvirt.check_exit_status(ret, expect_error=False)
            else:
                if not hotplug_unplug:
                    for fs in fs_devs:
                        vmxml.add_device(fs)
                        vmxml.sync()
            logging.debug(vmxml)
            libvirt_pcicontr.reset_pci_num(vm_names[index])
            result = virsh.start(vm_names[index], debug=True)
            if hotplug_unplug:
                for fs_dev in fs_devs:
                    ret = virsh.attach_device(vm_names[index],
                                              fs_dev.xml,
                                              ignore_status=True,
                                              debug=True)
                    libvirt.check_exit_status(ret, status_error)
                if status_error:
                    return
            if status_error and not managedsave:
                expected_error = error_msg_start
                utils_test.libvirt.check_exit_status(result, expected_error)
                return
            else:
                utils_test.libvirt.check_exit_status(result,
                                                     expect_error=False)
            expected_results = generate_expected_process_option(
                expected_results)
            if launched_mode == "auto":
                cmd = 'ps aux | grep virtiofsd | head -n 1'
                utils_test.libvirt.check_cmd_output(cmd,
                                                    content=expected_results)

        if managedsave:
            expected_error = error_msg_save
            result = virsh.managedsave(vm_names[0],
                                       ignore_status=True,
                                       debug=True)
            utils_test.libvirt.check_exit_status(result, expected_error)
        else:
            shared_data(vm_names, fs_devs)
            if suspend_resume:
                virsh.suspend(vm_names[0], debug=True, ignore_status=False)
                time.sleep(30)
                virsh.resume(vm_names[0], debug=True, ignore_statue=False)
            elif destroy_start:
                session = vm.wait_for_login(timeout=120)
                # Prepare the guest test script
                script_path = os.path.join(fs_devs[0].source["dir"], "test.py")
                script_content %= (fs_devs[0].source["dir"],
                                   fs_devs[0].source["dir"])
                prepare_stress_script(script_path, script_content)
                # Run guest stress script
                stress_script_thread = threading.Thread(
                    target=run_stress_script, args=(session, script_path))
                stress_script_thread.setDaemon(True)
                stress_script_thread.start()
                # Create a lot of unlink files
                time.sleep(60)
                virsh.destroy(vm_names[0], debug=True, ignore_status=False)
                ret = virsh.start(vm_names[0], debug=True)
                libvirt.check_exit_status(ret)
            elif edit_start:
                vmxml_virtio_backup = vm_xml.VMXML.new_from_inactive_dumpxml(
                    vm_names[0])
                if vm.is_alive():
                    virsh.destroy(vm_names[0])
                    cmd = "virt-xml %s --edit --qemu-commandline '\-foo'" % vm_names[
                        0]
                    cmd_result = process.run(cmd,
                                             ignore_status=True,
                                             shell=True)
                    logging.debug(virsh.dumpxml(vm_names[0]))
                    if cmd_result.exit_status:
                        test.error("virt-xml edit guest failed: %s" %
                                   cmd_result)
                    result = virsh.start(vm_names[0],
                                         ignore_status=True,
                                         debug=True)
                    if error_msg_start:
                        expected_fails_msg.append(error_msg_start)
                    utils_test.libvirt.check_result(
                        result, expected_fails=expected_fails_msg)
                    if not libvirt_version.version_compare(6, 10, 0):
                        # Because of bug #1897105, it was fixed in libvirt-6.10.0,
                        # before this version, need to recover the env manually.
                        cmd = "pkill virtiofsd"
                        process.run(cmd, shell=True)
                    if not vm.is_alive():
                        # Restoring vm and check if vm can start successfully
                        vmxml_virtio_backup.sync()
                        virsh.start(vm_names[0],
                                    ignore_status=False,
                                    shell=True)
            elif socket_file_checking:
                result = virsh.domid(vm_names[0])
                domid = result.stdout.strip()
                domain_dir = "var/lib/libvirt/qemu/domain-" + domid + '-' + vm_names[
                    0]
                if result.exit_status:
                    test.fail("Get domid failed.")
                    for fs_dev in fs_devs:
                        alias = fs_dev.alias['name']
                        expected_pid = domain_dir + alias + '-fs.pid'
                        expected_sock = alias + '-fs.sock'
                        status1 = process.run('ls -l %s' % expected_pid,
                                              shell=True).exit_status
                        status2 = process.run('ls -l %s' % expected_sock,
                                              shell=True).exit_status
                        if not (status1 and status2):
                            test.fail(
                                "The socket and pid file is not as expected")
            elif hotplug_unplug:
                for vm in vms:
                    umount_fs(vm)
                    if detach_device_alias:
                        alias = fs_dev.alias['name']
                        ret = virsh.detach_device_alias(vm.name,
                                                        alias,
                                                        ignore_status=True,
                                                        debug=True)
                    else:
                        ret = virsh.detach_device(vm.name,
                                                  fs_dev.xml,
                                                  ignore_status=True,
                                                  debug=True)
                    libvirt.check_exit_status(ret, status_error)
    finally:
        for vm in vms:
            if vm.is_alive():
                umount_fs(vm)
                vm.destroy(gracefully=False)
        for vmxml_backup in vmxml_backups:
            vmxml_backup.sync()
        for index in range(fs_num):
            process.run('rm -rf %s' % '/var/tmp/' + str(dir_prefix) +
                        str(index),
                        ignore_status=False)
            process.run('rm -rf %s' % source_socket,
                        ignore_status=False,
                        shell=True)
        if launched_mode == "externally":
            process.run('restorecon %s' % path,
                        ignore_status=False,
                        shell=True)
        utils_memory.set_num_huge_pages(backup_huge_pages_num)
コード例 #34
0
ファイル: virsh_event.py プロジェクト: pombredanne/tp-libvirt
    def trigger_events(dom, events_list=[]):
        """
        Trigger various events in events_list

        :param dom: the vm objects corresponding to the domain
        :return: the expected output that virsh event command prints out
        """
        expected_events_list = []
        save_path = os.path.join(tmpdir, "%s_event.save" % dom.name)
        print(dom.name)
        xmlfile = dom.backup_xml()
        new_disk = os.path.join(tmpdir, "%s_new_disk.img" % dom.name)

        try:
            for event in events_list:
                logging.debug("Current event is: %s", event)
                if event in [
                        'start', 'restore', 'create', 'edit', 'define',
                        'undefine', 'crash', 'device-removal-failed',
                        'watchdog', 'io-error'
                ]:
                    if dom.is_alive():
                        dom.destroy()
                        if event in ['create', 'define']:
                            dom.undefine()
                else:
                    if not dom.is_alive():
                        dom.start()
                        dom.wait_for_login().close()
                        if event == "resume":
                            dom.pause()

                if event == "undefine":
                    virsh.undefine(dom.name, **virsh_dargs)
                    expected_events_list.append("'lifecycle' for %s:"
                                                " Undefined Removed")
                elif event == "create":
                    virsh.create(xmlfile, **virsh_dargs)
                    expected_events_list.append("'lifecycle' for %s:"
                                                " Resumed Unpaused")
                    expected_events_list.append("'lifecycle' for %s:"
                                                " Started Booted")
                elif event == "destroy":
                    virsh.destroy(dom.name, **virsh_dargs)
                    expected_events_list.append("'lifecycle' for %s:"
                                                " Stopped Destroyed")
                elif event == "define":
                    virsh.define(xmlfile, **virsh_dargs)
                    expected_events_list.append("'lifecycle' for %s:"
                                                " Defined Added")
                elif event == "start":
                    virsh.start(dom.name, **virsh_dargs)
                    expected_events_list.append("'lifecycle' for %s:"
                                                " Resumed Unpaused")
                    expected_events_list.append("'lifecycle' for %s:"
                                                " Started Booted")
                    dom.wait_for_login().close()
                elif event == "suspend":
                    virsh.suspend(dom.name, **virsh_dargs)
                    expected_events_list.append("'lifecycle' for %s:"
                                                " Suspended Paused")
                    if not libvirt_version.version_compare(5, 3, 0):
                        expected_events_list.append("'lifecycle' for %s:"
                                                    " Suspended Paused")
                elif event == "resume":
                    virsh.resume(dom.name, **virsh_dargs)
                    expected_events_list.append("'lifecycle' for %s:"
                                                " Resumed Unpaused")
                elif event == "save":
                    virsh.save(dom.name, save_path, **virsh_dargs)
                    expected_events_list.append("'lifecycle' for %s:"
                                                " Suspended Paused")
                    expected_events_list.append("'lifecycle' for %s:"
                                                " Stopped Saved")
                elif event == "restore":
                    if not os.path.exists(save_path):
                        logging.error("%s not exist", save_path)
                    else:
                        virsh.restore(save_path, **virsh_dargs)
                        expected_events_list.append("'lifecycle' for %s:"
                                                    " Started Restored")
                        expected_events_list.append("'lifecycle' for %s:"
                                                    " Resumed Snapshot")
                elif event == "edit":
                    #Check whether 'description' element exists.
                    domxml = virsh.dumpxml(dom.name).stdout.strip()
                    find_desc = parseString(domxml).getElementsByTagName(
                        "description")
                    if find_desc == []:
                        #If not exists, add one for it.
                        logging.info("Adding <description> to guest")
                        virsh.desc(dom.name, "--config",
                                   "Added desc for testvm", **virsh_dargs)
                    #The edit operation is to delete 'description' element.
                    edit_cmd = [r":g/<description.*<\/description>/d"]
                    utlv.exec_virsh_edit(dom.name, edit_cmd)
                    expected_events_list.append("'lifecycle' for %s:"
                                                " Defined Updated")
                elif event == "shutdown":
                    if signal_name is None:
                        virsh.shutdown(dom.name, **virsh_dargs)
                        # Wait a few seconds for shutdown finish
                        time.sleep(3)
                        if utils_misc.compare_qemu_version(2, 9, 0):
                            #Shutdown reason distinguished from qemu_2.9.0-9
                            expected_events_list.append(
                                "'lifecycle' for %s:"
                                " Shutdown Finished after guest request")
                    else:
                        os.kill(dom.get_pid(), getattr(signal, signal_name))
                        if utils_misc.compare_qemu_version(2, 9, 0):
                            expected_events_list.append(
                                "'lifecycle' for %s:"
                                " Shutdown Finished after host request")
                    if not utils_misc.compare_qemu_version(2, 9, 0):
                        expected_events_list.append("'lifecycle' for %s:"
                                                    " Shutdown Finished")
                    wait_for_shutoff(dom)
                    expected_events_list.append("'lifecycle' for %s:"
                                                " Stopped Shutdown")
                elif event == "crash":
                    if not vmxml.xmltreefile.find('devices').findall('panic'):
                        # Set panic device
                        panic_dev = Panic()
                        panic_dev.model = panic_model
                        panic_dev.addr_type = addr_type
                        panic_dev.addr_iobase = addr_iobase
                        vmxml.add_device(panic_dev)
                    vmxml.on_crash = "coredump-restart"
                    vmxml.sync()
                    logging.info("Guest xml now is: %s", vmxml)
                    dom.start()
                    session = dom.wait_for_login()
                    # Stop kdump in the guest
                    session.cmd("systemctl stop kdump", ignore_all_errors=True)
                    # Enable sysRq
                    session.cmd("echo 1 > /proc/sys/kernel/sysrq")
                    try:
                        # Crash the guest
                        session.cmd("echo c > /proc/sysrq-trigger", timeout=90)
                    except (ShellTimeoutError,
                            ShellProcessTerminatedError) as details:
                        logging.info(details)
                    session.close()
                    expected_events_list.append("'lifecycle' for %s:"
                                                " Crashed Panicked")
                    expected_events_list.append("'lifecycle' for %s:"
                                                " Resumed Unpaused")
                elif event == "reset":
                    virsh.reset(dom.name, **virsh_dargs)
                    expected_events_list.append("'reboot' for %s")
                elif event == "vcpupin":
                    virsh.vcpupin(dom.name, '0', '0', **virsh_dargs)
                    expected_events_list.append("'tunable' for %s:"
                                                "\n\tcputune.vcpupin0: 0")
                elif event == "emulatorpin":
                    virsh.emulatorpin(dom.name, '0', **virsh_dargs)
                    expected_events_list.append("'tunable' for %s:"
                                                "\n\tcputune.emulatorpin: 0")
                elif event == "setmem":
                    mem_size = int(params.get("mem_size", 512000))
                    virsh.setmem(dom.name, mem_size, **virsh_dargs)
                    expected_events_list.append("'balloon-change' for %s:")
                elif event == "device-added-removed":
                    add_disk(dom.name, new_disk, 'vdb', '')
                    expected_events_list.append("'device-added' for %s:"
                                                " virtio-disk1")
                    virsh.detach_disk(dom.name, 'vdb', **virsh_dargs)
                    expected_events_list.append("'device-removed' for %s:"
                                                " virtio-disk1")
                    iface_xml_obj = create_iface_xml()
                    iface_xml_obj.xmltreefile.write()
                    virsh.detach_device(dom.name, iface_xml_obj.xml,
                                        **virsh_dargs)
                    expected_events_list.append("'device-removed' for %s:"
                                                " net0")
                    time.sleep(2)
                    virsh.attach_device(dom.name, iface_xml_obj.xml,
                                        **virsh_dargs)
                    expected_events_list.append("'device-added' for %s:"
                                                " net0")
                elif event == "block-threshold":
                    add_disk(dom.name, new_disk, 'vdb', '', format=disk_format)
                    logging.debug(process.run('qemu-img info %s -U' %
                                              new_disk))
                    virsh.domblkthreshold(vm_name, 'vdb', '100M')
                    session = dom.wait_for_login()
                    session.cmd(
                        "mkfs.ext4 /dev/vdb && mount /dev/vdb /mnt && ls /mnt && "
                        "dd if=/dev/urandom of=/mnt/bigfile bs=1M count=300 && sync"
                    )
                    time.sleep(5)
                    session.close()
                    expected_events_list.append(
                        "'block-threshold' for %s:"
                        " dev: vdb(%s)  104857600 29368320")
                    virsh.detach_disk(dom.name, 'vdb', **virsh_dargs)
                elif event == "change-media":
                    target_device = "hdc"
                    device_target_bus = params.get("device_target_bus", "ide")
                    disk_blk = vm_xml.VMXML.get_disk_blk(dom.name)
                    logging.info("disk_blk %s", disk_blk)
                    if target_device not in disk_blk:
                        logging.info("Adding cdrom to guest")
                        if dom.is_alive():
                            dom.destroy()
                        add_disk(
                            dom.name, new_disk, target_device,
                            ("--type cdrom --sourcetype file --driver qemu " +
                             "--config --targetbus %s" % device_target_bus))
                        dom.start()
                    all_options = new_disk + " --insert"
                    virsh.change_media(dom.name, target_device, all_options,
                                       **virsh_dargs)
                    expected_events_list.append("'tray-change' for %s disk" +
                                                " .*%s.*:" %
                                                device_target_bus + " opened")
                    expected_events_list.append("'tray-change' for %s disk" +
                                                " .*%s.*:" %
                                                device_target_bus + " closed")
                    all_options = new_disk + " --eject"
                    virsh.change_media(dom.name, target_device, all_options,
                                       **virsh_dargs)
                    expected_events_list.append("'tray-change' for %s disk" +
                                                " .*%s.*:" %
                                                device_target_bus + " opened")
                elif event == "hwclock":
                    session = dom.wait_for_login()
                    try:
                        session.cmd("hwclock --systohc", timeout=60)
                    except (ShellTimeoutError,
                            ShellProcessTerminatedError) as details:
                        logging.info(details)
                    session.close()
                    expected_events_list.append("'rtc-change' for %s:")
                elif event == "metadata_set":
                    metadata_uri = params.get("metadata_uri")
                    metadata_key = params.get("metadata_key")
                    metadata_value = params.get("metadata_value")
                    virsh.metadata(dom.name,
                                   metadata_uri,
                                   options="",
                                   key=metadata_key,
                                   new_metadata=metadata_value,
                                   **virsh_dargs)
                    expected_events_list.append("'metadata-change' for %s: "
                                                "element http://app.org/")
                elif event == "metadata_edit":
                    metadata_uri = "http://herp.derp/"
                    metadata_key = "herp"
                    metadata_value = "<derp xmlns:foobar='http://foo.bar/'>foo<bar></bar></derp>"
                    virsh_cmd = r"virsh metadata %s --uri %s --key %s %s"
                    virsh_cmd = virsh_cmd % (dom.name, metadata_uri,
                                             metadata_key, "--edit")
                    session = aexpect.ShellSession("sudo -s")
                    logging.info("Running command: %s", virsh_cmd)
                    try:
                        session.sendline(virsh_cmd)
                        session.sendline(r":insert")
                        session.sendline(metadata_value)
                        session.sendline(".")
                        session.send('ZZ')
                        remote.handle_prompts(session,
                                              None,
                                              None,
                                              r"[\#\$]\s*$",
                                              debug=True,
                                              timeout=60)
                    except Exception as e:
                        test.error("Error occured: %s" % e)
                    session.close()
                    # Check metadata after edit
                    virsh.metadata(dom.name,
                                   metadata_uri,
                                   options="",
                                   key=metadata_key,
                                   **virsh_dargs)
                    expected_events_list.append("'metadata-change' for %s: "
                                                "element http://app.org/")
                elif event == "metadata_remove":
                    virsh.metadata(dom.name,
                                   metadata_uri,
                                   options="--remove",
                                   key=metadata_key,
                                   **virsh_dargs)
                    expected_events_list.append("'metadata-change' for %s: "
                                                "element http://app.org/")
                elif event == "detach-dimm":
                    prepare_vmxml_mem(vmxml)
                    tg_size = params.get("dimm_size")
                    tg_sizeunit = params.get("dimm_unit")
                    dimm_xml = utils_hotplug.create_mem_xml(
                        tg_size, None, None, tg_sizeunit)
                    virsh.attach_device(dom.name,
                                        dimm_xml.xml,
                                        flagstr="--config",
                                        **virsh_dargs)
                    vmxml_dimm = vm_xml.VMXML.new_from_dumpxml(dom.name)
                    logging.debug(
                        "Current vmxml with plugged dimm dev is %s\n" %
                        vmxml_dimm)
                    virsh.start(dom.name, **virsh_dargs)
                    dom.wait_for_login().close()
                    result = virsh.detach_device(dom.name,
                                                 dimm_xml.xml,
                                                 debug=True,
                                                 ignore_status=True)
                    expected_fails = params.get("expected_fails")
                    utlv.check_result(result, expected_fails)
                    vmxml_live = vm_xml.VMXML.new_from_dumpxml(dom.name)
                    logging.debug(
                        "Current vmxml after hot-unplug dimm is %s\n" %
                        vmxml_live)
                    expected_events_list.append(
                        "'device-removal-failed' for %s: dimm0")
                elif event == "watchdog":
                    vmxml.remove_all_device_by_type('watchdog')
                    watchdog_dev = Watchdog()
                    watchdog_dev.model_type = params.get("watchdog_model")
                    action = params.get("action")
                    watchdog_dev.action = action
                    vmxml.add_device(watchdog_dev)
                    vmxml.sync()
                    logging.debug("Current vmxml with watchdog dev is %s\n" %
                                  vmxml)
                    virsh.start(dom.name, **virsh_dargs)
                    session = dom.wait_for_login()
                    try:
                        session.cmd("echo 0 > /dev/watchdog")
                    except (ShellTimeoutError,
                            ShellProcessTerminatedError) as details:
                        test.fail("Failed to trigger watchdog: %s" % details)
                    session.close()
                    # watchdog acts slowly, waiting for it.
                    time.sleep(30)
                    expected_events_list.append("'watchdog' for %s: " +
                                                "%s" % action)
                    if action == 'pause':
                        expected_events_list.append(
                            "'lifecycle' for %s: Suspended Watchdog")
                        virsh.resume(dom.name, **virsh_dargs)
                    else:
                        # action == 'reset'
                        expected_events_list.append("'reboot' for %s")
                elif event == "io-error":
                    part_size = params.get("part_size")
                    resume_event = params.get("resume_event")
                    suspend_event = params.get("suspend_event")
                    process.run("truncate -s %s %s" % (part_size, small_part),
                                shell=True)
                    utlv.mkfs(small_part, part_format)
                    utils_misc.mount(small_part, mount_point, None)
                    add_disk(dom.name, new_disk, 'vdb',
                             '--subdriver qcow2 --config', 'qcow2')
                    dom.start()
                    session = dom.wait_for_login()
                    session.cmd(
                        "mkfs.ext4 /dev/vdb && mount /dev/vdb /mnt && ls /mnt && "
                        "dd if=/dev/zero of=/mnt/test.img bs=1M count=50",
                        ignore_all_errors=True)
                    time.sleep(5)
                    session.close()
                    expected_events_list.append("'io-error' for %s: " +
                                                "%s" % new_disk +
                                                r" \(virtio-disk1\) pause")
                    expected_events_list.append(
                        "'io-error-reason' for %s: " + "%s" % new_disk +
                        r" \(virtio-disk1\) pause due to enospc")
                    expected_events_list.append(suspend_event)
                    process.run("df -hT")
                    virsh.resume(dom.name, **virsh_dargs)
                    time.sleep(5)
                    expected_events_list.append(resume_event)
                    expected_events_list.append("'io-error' for %s: " +
                                                "%s" % new_disk +
                                                r" \(virtio-disk1\) pause")
                    expected_events_list.append(
                        "'io-error-reason' for %s: " + "%s" % new_disk +
                        r" \(virtio-disk1\) pause due to enospc")
                    expected_events_list.append(suspend_event)
                    ret = virsh.domstate(dom.name, "--reason", **virsh_dargs)
                    if ret.stdout.strip() != "paused (I/O error)":
                        test.fail(
                            "Domain state should still be paused due to I/O error!"
                        )
                else:
                    test.error("Unsupported event: %s" % event)
                # Event may not received immediately
                time.sleep(3)
        finally:
            if os.path.exists(save_path):
                os.unlink(save_path)
            if os.path.exists(new_disk):
                os.unlink(new_disk)
        return [(dom.name, event) for event in expected_events_list]
コード例 #35
0
def run(test, params, env):
    """
    Test command: virsh start.

    1) Get the params from params.
    2) Prepare libvirtd's status.
    3) Do the start operation.
    4) Result check.
    5) clean up.
    """
    # get the params from params
    vm_name = params.get("main_vm", "avocado-vt-vm1")
    vm_ref = params.get("vm_ref", "vm1")
    opt = params.get("vs_opt", "")

    # Backup for recovery.
    vmxml_backup = libvirt_xml.vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)

    backup_name = vm_ref
    vm = None
    if vm_ref is not "":
        vm = env.get_vm(vm_ref)
    vmxml = libvirt_xml.VMXML()

    libvirtd_state = params.get("libvirtd", "on")
    pre_operation = params.get("vs_pre_operation", "")
    status_error = params.get("status_error", "no")

    try:
        # prepare before start vm
        if libvirtd_state == "on":
            utils_libvirtd.libvirtd_start()
        elif libvirtd_state == "off":
            utils_libvirtd.libvirtd_stop()

        if pre_operation == "rename":
            new_vm_name = params.get("vs_new_vm_name", "virsh_start_vm1")
            vm = libvirt_xml.VMXML.vm_rename(vm, new_vm_name)
            vm_ref = new_vm_name
        elif pre_operation == "undefine":
            vmxml = vmxml.new_from_dumpxml(vm_ref)
            vmxml.undefine()

        # do the start operation
        try:
            if pre_operation == "remote":
                # get the params for remote test
                remote_ip = params.get("remote_ip", "ENTER.YOUR.REMOTE.IP")
                remote_user = params.get("remote_user", "root")
                remote_pwd = params.get("remote_pwd",
                                        "ENTER.YOUR.REMOTE.PASSWORD")
                if pre_operation == "remote" and remote_ip.count(
                        "ENTER.YOUR."):
                    test.cancel("Remote test parameters not configured")

                ssh_key.setup_ssh_key(remote_ip, remote_user, remote_pwd)
                remote_uri = "qemu+ssh://%s/system" % remote_ip
                cmd_result = virsh.start(vm_ref,
                                         ignore_status=True,
                                         debug=True,
                                         uri=remote_uri)
                if cmd_result.exit_status:
                    test.fail("Start vm failed.\n Detail: %s" % cmd_result)
            elif opt.count("console"):
                # With --console, start command will print the
                # dmesg of guest in starting and turn into the
                # login prompt. In this case, we start it with
                # --console and login vm in console by
                # remote.handle_prompts().
                cmd = "start %s --console" % vm_ref
                virsh_session = virsh.VirshSession(virsh_exec=virsh.VIRSH_EXEC,
                                                   auto_close=True)
                virsh_session.sendline(cmd)
                remote.handle_prompts(virsh_session,
                                      params.get("username", ""),
                                      params.get("password", ""),
                                      r"[\#\$]\s*$",
                                      timeout=60,
                                      debug=True)
            elif opt.count("autodestroy"):
                # With --autodestroy, vm will be destroyed when
                # virsh session closed. Then we execute start
                # command in a virsh session and start vm with
                # --autodestroy. Then we closed the virsh session,
                # and check the vm is destroyed or not.
                virsh_session = virsh.VirshSession(virsh_exec=virsh.VIRSH_EXEC,
                                                   auto_close=True)
                cmd = "start %s --autodestroy" % vm_ref
                status = virsh_session.cmd_status(cmd)
                if status:
                    test.fail("Failed to start vm with --autodestroy.")
                # Close the session, then the vm should be destroyed.
                virsh_session.close()
            elif opt.count("force-boot"):
                # With --force-boot, VM will be stared from boot
                # even we have saved it with virsh managedsave.
                # In this case, we start vm and execute sleep 1000&,
                # then save it with virsh managedsave. At last, we
                # start vm with --force-boot. To verify the result,
                # we check the sleep process. If the process exists,
                # force-boot failed, else case pass.
                vm.start()
                session = vm.wait_for_login()
                status = session.cmd_status("sleep 1000&")
                if status:
                    test.error("Can not execute command in guest.")
                sleep_pid = session.cmd_output("echo $!").strip()
                virsh.managedsave(vm_ref)
                virsh.start(vm_ref, options=opt)
            else:
                cmd_result = virsh.start(vm_ref, options=opt)
                if cmd_result.exit_status:
                    if status_error == "no":
                        test.fail("Start vm failed.\n Detail: %s" % cmd_result)
                else:
                    # start vm successfully
                    if status_error == "yes":
                        if libvirtd_state == "off" and libvirt_version.version_compare(
                                5, 6, 0):
                            logging.info(
                                "From libvirt version 5.6.0 libvirtd is restarted,"
                                " command should succeed.")
                        else:
                            test.fail("Run successfully with wrong "
                                      "command!\n Detail:%s" % cmd_result)

            if opt.count("paused"):
                if not (vm.state() == "paused"):
                    test.fail("VM is not paused when started with "
                              "--paused.")
            elif opt.count("autodestroy"):
                if vm.is_alive():
                    test.fail("VM was started with --autodestroy,"
                              "but not destroyed when virsh session "
                              "closed.")
            elif opt.count("force-boot"):
                session = vm.wait_for_login()
                status = session.cmd_status("ps %s |grep '[s]leep 1000'" %
                                            sleep_pid)
                if not status:
                    test.fail("VM was started with --force-boot,"
                              "but it is restored from a"
                              " managedsave.")
            else:
                if status_error == "no" and not vm.is_alive(
                ) and pre_operation != "remote":
                    test.fail("VM was started but it is not alive.")

        except remote.LoginError as detail:
            test.fail("Failed to login guest.")
    finally:
        # clean up
        if libvirtd_state == "off":
            utils_libvirtd.libvirtd_start()

        elif pre_operation == "rename":
            libvirt_xml.VMXML.vm_rename(vm, backup_name)
        elif pre_operation == "remote":
            virsh.destroy(vm_ref,
                          ignore_status=False,
                          debug=True,
                          uri=remote_uri)

        if vm and vm.is_paused():
            vm.resume()

        # Restore VM
        vmxml_backup.sync()
コード例 #36
0
def run(test, params, env):
    """
    Test the PCIe controllers' options
    1. Backup guest xml before the tests
    2. Modify guest xml and define the guest
    3. Start guest
    4. Hotplug if needed
    5. Do checking
    6. Destroy guest and restore guest
    """

    def get_disk_bus(disk_dev=None):
        """
        Get the bus list of guest disks

        :param disk_dev: The specified disk device
        :return: list for disks' buses
        """
        disk_bus_list = []

        cur_vm_xml = VMXML.new_from_dumpxml(vm_name)
        disk_dev_list = cur_vm_xml.get_disk_blk(vm_name)
        if disk_dev not in disk_dev_list:
            return disk_bus_list
        for disk_index in range(0, len(disk_dev_list)):
            disk_target = disk_dev if disk_dev else disk_dev_list[disk_index]
            disk_bus = cur_vm_xml.get_disk_attr(vm_name, disk_target, 'address', 'bus')
            disk_bus_list.append(disk_bus)
            if disk_dev:
                break
        return disk_bus_list

    def check_guest_disks(ishotplug):
        """
        Check guest disks in different ways

        :param ishotplug: True for hotplug, False for hotunplug
        :raise: test.fail if some errors happen
        """
        def _find_disk_by_cmd():
            """
            Check disk using virsh command

            :return: True if the disk is found, otherwise False
            """
            ret = virsh.domblklist(vm_name, **virsh_options)
            target_disks = re.findall(r"[v,s]d[a-z]", ret.stdout.strip())
            logging.debug(target_disks)

            for one_disk in target_disks:
                if target_dev in one_disk:
                    logging.debug("Found the disk '{}'".format(target_dev))
                    return True
            logging.debug("Can't find the disk '{}'".format(target_dev))
            return False

        def _find_disk_in_xml():
            """
            Check disk in guest xml

            :return: True if the disk is found with right bus
                     False if the disk is not found
            :raise: test.fail if the disk's bus is incorrect
            """
            bus_list = get_disk_bus(target_dev)
            if len(bus_list) == 0:
                return False
            if bus_list[0] != '0x%02x' % int(contr_index):
                test.fail("The found disk's bus is expected to be {}, "
                          "but {} found".format('0x%02x' % int(contr_index),
                                                bus_list[0]))
            return True

        virsh_options.update({'ignore_status': False})
        # Firstly check virsh.domblklist
        found_by_cmd = _find_disk_by_cmd()
        found_in_xml = _find_disk_in_xml()
        msg1 = "Can't find the device with target_dev '{}' by cmd".format(target_dev)
        msg2 = "Found the device with target_dev '{}' unexpectedly by cmd".format(target_dev)
        msg3 = "The device with target_dev '{}' was not detached successfully in xml".format(target_dev)
        msg4 = "The device with target_dev '{}' was detached unexpectedly in xml".format(target_dev)
        if ((ishotplug and not status_error and not found_by_cmd) or
           (not ishotplug and status_error and not found_by_cmd)):
            test.fail(msg1)
        if ((ishotplug and status_error and found_by_cmd) or
           (not ishotplug and not status_error and found_by_cmd)):
            test.fail(msg2)
        if ((ishotplug and not status_error and not found_in_xml) or
           (not ishotplug and not status_error and found_in_xml)):
            test.fail(msg3)
        if ((ishotplug and status_error and found_in_xml) or
           (not ishotplug and status_error and not found_in_xml)):
            test.fail(msg4)

    def check_inside_guest(ishotplug):
        """
        Check devices within the guest

        :param ishotplug: True for hotplug, False for hotunplug
        :raise: test.fail if the result is not expected
        """
        def _check_disk_in_guest():
            """
            Compare the disk numbers within the guest

            :return: True if new disk is found, otherwise False
            """
            new_disk_num = len(vm.get_disks())
            if new_disk_num > ori_disk_num:
                logging.debug("New disk is found in vm")
                return True
            logging.debug("New disk is not found in vm")
            return False

        vm_session = vm.wait_for_login()
        status = _check_disk_in_guest()
        vm_session.close()
        msg1 = "Can't find the device in the guest"
        msg2 = "Found the device in the guest unexpectedly"
        if ((ishotplug and not status_error and not status) or
                (not ishotplug and status_error and not status)):
            test.fail(msg1)
        if ((ishotplug and status_error and status) or
                (not ishotplug and not status_error and status)):
            test.fail(msg2)

    def check_guest_contr():
        """
        Check the controller in guest xml

        :raise: test.fail if the controller does not meet the expectation
        """
        cntl = None
        cur_vm_xml = VMXML.new_from_dumpxml(vm_name)
        for cntl in cur_vm_xml.devices.by_device_tag('controller'):
            if (cntl.type == 'pci' and
               cntl.model == contr_model and
               cntl.index == contr_index):
                logging.debug(cntl.target)
                cntl_hotplug = cntl.target.get('hotplug')
                logging.debug("Got controller's hotplug:%s", cntl_hotplug)
                if cntl_hotplug != hotplug_option:
                    test.fail("The controller's hotplug option is {}, "
                              "but expect {}".format(cntl_hotplug,
                                                     hotplug_option))
                break
        if not cntl:
            test.fail("The controller with index {} is not found".format(contr_index))

    vm_name = params.get("main_vm", "avocado-vt-vm1")
    setup_controller = params.get("setup_controller", 'yes') == 'yes'
    check_within_guest = params.get("check_within_guest", 'yes') == 'yes'
    check_disk_xml = params.get("check_disk_xml", 'no') == 'yes'
    check_cntl_xml = params.get("check_cntl_xml", 'no') == 'yes'
    contr_model = params.get("controller_model", 'pcie-root-port')
    contr_target = params.get("controller_target")
    contr_index = params.get("contr_index")
    hotplug_option = params.get("hotplug_option")
    hotplug = params.get("hotplug", 'yes') == 'yes'
    define_option = params.get("define_option")
    attach_extra = params.get("attach_extra")
    target_dev = params.get("target_dev")
    err_msg = params.get("err_msg")
    status_error = params.get("status_error", "no") == 'yes'
    restart_daemon = params.get("restart_daemon", "no") == 'yes'
    save_restore = params.get("save_restore", "no") == 'yes'
    hotplug_counts = params.get("hotplug_counts")

    virsh_options = {'debug': True, 'ignore_status': False}

    image_path_list = []
    vm = env.get_vm(vm_name)
    vm_xml_obj = VMXML.new_from_inactive_dumpxml(vm_name)
    vm_xml_backup = vm_xml_obj.copy()
    try:
        if check_within_guest:
            if not vm.is_alive():
                virsh.start(vm_name, **virsh_options)
            ori_disk_num = len(vm.get_disks())
            logging.debug("The original disk number in vm is %d", ori_disk_num)
            virsh.destroy(vm_name)

        vm_xml_obj.remove_all_device_by_type('controller')
        if setup_controller:
            contr_dict = {'controller_type': 'pci',
                          'controller_model': contr_model,
                          'controller_index': contr_index,
                          'controller_target': contr_target}
            contr_obj = libvirt.create_controller_xml(contr_dict)
            vm_xml_obj.add_device(contr_obj)
            logging.debug("Add a controller: %s" % contr_obj)

        virsh.define(vm_xml_obj.xml, options=define_option, **virsh_options)
        if not save_restore:
            disk_max = int(hotplug_counts) if hotplug_counts else 1
            for disk_inx in range(0, disk_max):
                image_path = os.path.join(data_dir.get_tmp_dir(),
                                          'disk{}.qcow2'.format(disk_inx))
                image_path_list.append(image_path)
                libvirt.create_local_disk("file", image_path, '10M',
                                          disk_format='qcow2')
        if not hotplug and not save_restore:
            # Do coldplug before hotunplug to prepare the interface device
            virsh.attach_disk(vm_name, image_path, target_dev,
                              extra=attach_extra,
                              **virsh_options)
        virsh.start(vm_name, **virsh_options)

        logging.debug("Test VM XML after starting:"
                      "\n%s", VMXML.new_from_dumpxml(vm_name))
        vm.wait_for_login().close()

        if restart_daemon:
            daemon_obj = Libvirtd()
            daemon_obj.restart()

        if save_restore:
            save_path = os.path.join(data_dir.get_tmp_dir(), 'rhel.save')
            virsh.save(vm_name, save_path, **virsh_options)
            time.sleep(10)
            virsh.restore(save_path, **virsh_options)
        # Create interface device xml
        if hotplug:
            virsh_options.update({'ignore_status': True})
            attach_times = 1 if not hotplug_counts else int(hotplug_counts)

            if attach_times == 1:
                ret = virsh.attach_disk(vm_name, image_path_list[0], target_dev,
                                        extra=attach_extra,
                                        **virsh_options)
                libvirt.check_result(ret, expected_fails=err_msg)
            else:
                for attach_inx in range(0, attach_times):
                    disk_dev = 'vd{}'.format(chr(98 + attach_inx))
                    ret = virsh.attach_disk(vm_name, image_path_list[attach_inx], disk_dev,
                                            extra=attach_extra,
                                            **virsh_options)
                    if ret.exit_status:
                        break
                libvirt.check_result(ret, expected_fails=err_msg)
        if not hotplug and check_within_guest:
            virsh_options.update({'ignore_status': True})
            ret = virsh.detach_disk(vm_name, target_dev, **virsh_options)
            libvirt.check_result(ret, expected_fails=err_msg)
        if check_disk_xml:
            time.sleep(5)
            check_guest_disks(hotplug)
        if check_cntl_xml:
            check_guest_contr()
        if hotplug_counts:
            bus_list = get_disk_bus()
            for one_bus in bus_list:
                if one_bus == '0x%02x' % int(contr_index):
                    test.fail("The disk should not be attached "
                              "to the controller with "
                              "index '{}'".format(contr_index))
            logging.debug("No disk is found to attach to the "
                          "controller with index '{}'".format(contr_index))
        if check_within_guest:
            check_inside_guest(hotplug)

    finally:
        vm_xml_backup.sync()
コード例 #37
0
def run(test, params, env):
    """
    Test command: virsh reboot.

    Run a reboot command in the target domain.
    1.Prepare test environment.
    2.When the libvirtd == "off", stop the libvirtd service.
    3.Perform virsh reboot operation.
    4.Recover test environment.(libvirts service)
    5.Confirm the test result.
    """
    def boot_time():
        session = vm.wait_for_login()
        boot_time = session.cmd_output("uptime --since")
        session.close()
        return boot_time

    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)

    # run test case
    libvirtd = params.get("libvirtd", "on")
    vm_ref = params.get("reboot_vm_ref")
    status_error = ("yes" == params.get("status_error"))
    extra = params.get("reboot_extra", "")
    remote_ip = params.get("remote_ip", "REMOTE.EXAMPLE.COM")
    local_ip = params.get("local_ip", "LOCAL.EXAMPLE.COM")
    remote_pwd = params.get("remote_pwd", "password")
    local_pwd = params.get("local_pwd", "password")
    agent = ("yes" == params.get("reboot_agent", "no"))
    mode = params.get("reboot_mode", "")
    pre_domian_status = params.get("reboot_pre_domian_status", "running")
    reboot_readonly = "yes" == params.get("reboot_readonly", "no")
    wait_time = int(params.get('wait_time', 5))
    xml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    try:
        # Add or remove qemu-agent from guest before test
        try:
            vm.prepare_guest_agent(channel=agent, start=agent)
        except virt_vm.VMError as e:
            logging.debug(e)
            # qemu-guest-agent is not available on REHL5
            test.cancel("qemu-guest-agent package is not available")

        if pre_domian_status == "shutoff":
            virsh.destroy(vm_name)
        if libvirtd == "off":
            utils_libvirtd.libvirtd_stop()

        domid = vm.get_id()
        domuuid = vm.get_uuid()
        if vm_ref == "id":
            vm_ref = domid
        elif vm_ref == "name":
            vm_ref = vm_name
        elif vm_ref == "uuid":
            vm_ref = domuuid
        elif vm_ref == "hex_id":
            vm_ref = hex(int(domid))
        elif vm_ref.find("invalid") != -1:
            vm_ref = params.get(vm_ref)
        elif vm_ref == "remote_name":
            if remote_ip.count("EXAMPLE.COM") or local_ip.count("EXAMPLE.COM"):
                test.cancel("remote_ip and/or local_ip parameters"
                            " not changed from default values")
            complete_uri = libvirt_vm.complete_uri(local_ip)

            # Setup ssh connection
            ssh_connection = utils_conn.SSHConnection(server_ip=local_ip,
                                                      server_pwd=local_pwd,
                                                      client_ip=remote_ip,
                                                      client_pwd=remote_pwd)
            try:
                ssh_connection.conn_check()
            except utils_conn.ConnectionError:
                ssh_connection.conn_setup()
                ssh_connection.conn_check()

            try:
                session = remote.remote_login("ssh", remote_ip, "22", "root",
                                              remote_pwd, "#")
                session.cmd_output('LANG=C')
                command = "virsh -c %s reboot %s %s" % (complete_uri, vm_name,
                                                        mode)
                status, output = session.cmd_status_output(command,
                                                           internal_timeout=5)
                session.close()
                if not status:
                    # the operation before the end of reboot
                    # may result in data corruption
                    vm.wait_for_login().close()
            except (remote.LoginError, process.CmdError,
                    aexpect.ShellError) as e:
                logging.error("Exception: %s", str(e))
                status = -1
        if vm_ref != "remote_name":
            if not status_error:
                # Not need to check the boot up time if it is a negative test
                first_boot_time = boot_time()

            vm_ref = "%s" % vm_ref
            if extra:
                vm_ref += " %s" % extra
            cmdresult = virsh.reboot(vm_ref,
                                     mode,
                                     ignore_status=True,
                                     debug=True)
            status = cmdresult.exit_status
            if status:
                logging.debug("Error status, cmd error: %s", cmdresult.stderr)
                if not virsh.has_command_help_match('reboot', '\s+--mode\s+'):
                    # old libvirt doesn't support reboot
                    status = -2
            # avoid the check if it is negative test
            if not status_error:
                cmdoutput = ''

                def _wait_for_reboot_up():
                    second_boot_time = boot_time()
                    is_rebooted = second_boot_time > first_boot_time
                    cmdoutput = virsh.domstate(vm_ref,
                                               '--reason',
                                               ignore_status=True,
                                               debug=True)
                    domstate_status = cmdoutput.exit_status
                    output = "running" in cmdoutput.stdout
                    return not domstate_status and output and is_rebooted

                if not wait.wait_for(
                        _wait_for_reboot_up, timeout=wait_time, step=1):
                    test.fail("Cmd error: %s Error status: %s" %
                              (cmdoutput.stderr, cmdoutput.stdout))
            elif pre_domian_status != 'shutoff':
                vm.wait_for_login().close()
        output = virsh.dom_list(ignore_status=True).stdout.strip()

        # Test the readonly mode
        if reboot_readonly:
            result = virsh.reboot(vm_ref,
                                  ignore_status=True,
                                  debug=True,
                                  readonly=True)
            libvirt.check_exit_status(result, expect_error=True)
            # This is for status_error check
            status = result.exit_status

        # recover libvirtd service start
        if libvirtd == "off":
            utils_libvirtd.libvirtd_start()

        # check status_error
        if status_error:
            if not status:
                test.fail("Run successfully with wrong command!")
        else:
            if status or (not re.search(vm_name, output)):
                if status == -2:
                    test.cancel("Reboot command doesn't work on older libvirt "
                                "versions")
                test.fail("Run failed with right command")
    finally:
        xml_backup.sync()

        if 'ssh_connection' in locals():
            ssh_connection.auto_recover = True
コード例 #38
0
ファイル: virsh_destroy.py プロジェクト: Hao-Liu/tp-libvirt
def run(test, params, env):
    """
    Test command: virsh destroy.

    The command can destroy (stop) a domain.
    1.Prepare test environment.
    2.When the ibvirtd == "off", stop the libvirtd service.
    3.Perform virsh destroy operation.
    4.Recover test environment.
    5.Confirm the test result.
    """
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    domid = vm.get_id()
    domuuid = vm.get_uuid()

    vm_ref = params.get("destroy_vm_ref")
    status_error = params.get("status_error", "no")
    libvirtd = params.get("libvirtd", "on")
    remote_ip = params.get("remote_ip", "REMOTE.EXAMPLE.COM")
    remote_pwd = params.get("remote_pwd", None)
    local_ip = params.get("local_ip", "LOCAL.EXAMPLE.COM")
    local_pwd = params.get("local_pwd", "LOCAL.EXAMPLE.COM")
    if vm_ref == "remote" and (remote_ip.count("EXAMPLE.COM") or
                               local_ip.count("EXAMPLE.COM")):
        raise error.TestNAError(
            "Remote test parameters unchanged from default")

    uri = params.get("virsh_uri")
    unprivileged_user = params.get('unprivileged_user')
    if unprivileged_user:
        if unprivileged_user.count('EXAMPLE'):
            unprivileged_user = '******'

    if not libvirt_version.version_compare(1, 1, 1):
        if params.get('setup_libvirt_polkit') == 'yes':
            raise error.TestNAError("API acl test not supported in current"
                                    " libvirt version.")

    if vm_ref == "id":
        vm_ref = domid
    elif vm_ref == "hex_id":
        vm_ref = hex(int(domid))
    elif vm_ref.find("invalid") != -1:
        vm_ref = params.get(vm_ref)
    elif vm_ref == "name":
        vm_ref = "%s %s" % (vm_name, params.get("destroy_extra"))
    elif vm_ref == "uuid":
        vm_ref = domuuid

    if libvirtd == "off":
        utils_libvirtd.libvirtd_stop()

    if vm_ref != "remote":
        status = virsh.destroy(vm_ref, ignore_status=True,
                               unprivileged_user=unprivileged_user,
                               uri=uri, debug=True).exit_status
        output = ""
    else:
        status = 0
        try:
            remote_uri = libvirt_vm.complete_uri(local_ip)
            session = remote.remote_login("ssh", remote_ip, "22",
                                          "root", remote_pwd, "#")
            session.cmd_output('LANG=C')

            # Setup up remote to remote login in local host
            ssh_key.setup_remote_ssh_key(remote_ip, "root", remote_pwd,
                                         local_ip, "root", local_pwd)

            command = "virsh -c %s destroy %s" % (remote_uri, vm_name)
            status, output = session.cmd_status_output(command,
                                                       internal_timeout=5)
            session.close()
        except error.CmdError:
            status = 1

    if libvirtd == "off":
        utils_libvirtd.libvirtd_start()

    # check status_error
    if status_error == "yes":
        if status == 0:
            raise error.TestFail("Run successfully with wrong command! "
                                 "Output:\n%s" % output)
    elif status_error == "no":
        if status != 0:
            raise error.TestFail("Run failed with right command! Output:\n%s"
                                 % output)
コード例 #39
0
def run_destroy_console(params, libvirtd, vm):
    """
    Start a vm with console connected and then destroy it.
    """
    vm.start(autoconsole=True)
    virsh.destroy(vm.name)
コード例 #40
0
ファイル: virsh_shutdown.py プロジェクト: nasastry/tp-libvirt
def run(test, params, env):
    """
    Test command: virsh shutdown.

    The conmand can gracefully shutdown a domain.

    1.Prepare test environment.
    2.When the libvirtd == "off", stop the libvirtd service.
    3.Perform virsh setvcpus operation.
    4.Recover test environment.
    5.Confirm the test result.
    """

    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    vm_ref = params.get("shutdown_vm_ref")
    status_error = ("yes" == params.get("status_error"))
    agent = ("yes" == params.get("shutdown_agent", "no"))
    mode = params.get("shutdown_mode", "")
    pre_domian_status = params.get("reboot_pre_domian_status", "running")
    libvirtd = params.get("libvirtd", "on")
    xml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    timeout = eval(params.get("shutdown_timeout", "60"))
    readonly = "yes" == params.get("shutdown_readonly", "no")
    expect_msg = params.get("shutdown_err_msg")

    # Libvirt acl test related params
    uri = params.get("virsh_uri")
    unprivileged_user = params.get('unprivileged_user')
    if unprivileged_user:
        if unprivileged_user.count('EXAMPLE'):
            unprivileged_user = '******'

    if not libvirt_version.version_compare(1, 1, 1):
        if params.get('setup_libvirt_polkit') == 'yes':
            test.cancel("API acl test not supported in"
                        " current libvirt version.")

    try:
        # Add or remove qemu-agent from guest before test
        vm.prepare_guest_agent(channel=agent, start=agent)
        if pre_domian_status == "shutoff":
            virsh.destroy(vm_name)
        domid = vm.get_id()
        domuuid = vm.get_uuid()
        # run test case
        if vm_ref == "id":
            vm_ref = domid
        elif vm_ref == "hex_id":
            vm_ref = hex(int(domid))
        elif vm_ref.find("invalid") != -1:
            vm_ref = params.get(vm_ref)
        elif vm_ref == "name":
            vm_ref = "%s %s" % (vm_name, params.get("shutdown_extra"))
        elif vm_ref == "uuid":
            vm_ref = domuuid

        if libvirtd == "off":
            utils_libvirtd.libvirtd_stop()

        if vm_ref != "remote":
            result = virsh.shutdown(vm_ref, mode,
                                    unprivileged_user=unprivileged_user,
                                    uri=uri, debug=True,
                                    ignore_status=True,
                                    readonly=readonly)
            status = result.exit_status
        else:
            remote_ip = params.get("remote_ip", "REMOTE.EXAMPLE.COM")
            remote_pwd = params.get("remote_pwd", None)
            remote_user = params.get("remote_user", "root")
            local_ip = params.get("local_ip", "LOCAL.EXAMPLE.COM")
            local_pwd = params.get("local_pwd", "password")
            local_user = params.get("username", "root")
            if remote_ip.count("EXAMPLE.COM") or local_ip.count("EXAMPLE.COM"):
                test.cancel("Remote test parameters"
                            " unchanged from default")
            status = 0
            try:
                remote_uri = libvirt_vm.complete_uri(local_ip)
                # set up auto ssh login from remote machine to
                # execute commands
                config_opt = ["StrictHostKeyChecking=no"]
                ssh_key.setup_remote_ssh_key(remote_ip, remote_user,
                                             remote_pwd, hostname2=local_ip,
                                             user2=local_user,
                                             password2=local_pwd,
                                             config_options=config_opt)
                session = remote.remote_login("ssh", remote_ip, "22", "root",
                                              remote_pwd, "#")
                session.cmd_output('LANG=C')
                command = ("virsh -c %s shutdown %s %s"
                           % (remote_uri, vm_name, mode))
                status = session.cmd_status(command, internal_timeout=5)
                session.close()
            except process.CmdError:
                status = 1

        # recover libvirtd service start
        if libvirtd == "off":
            utils_libvirtd.libvirtd_start()

        # check status_error
        if status_error:
            if not status:
                test.fail("Run successfully with wrong command!")
            if expect_msg:
                libvirt.check_result(result, expect_msg.split(';'))
        else:
            if status:
                test.fail("Run failed with right command")
            if not vm.wait_for_shutdown(timeout):
                test.fail("Failed to shutdown in timeout %s" % timeout)
    finally:
        if utils_misc.wait_for(utils_libvirtd.libvirtd_is_running, 60):
            xml_backup.sync()
コード例 #41
0
def run(test, params, env):
    """
    Test command: virsh dompmsuspend <domain> <target>
    The command suspends a running domain using guest OS's power management.
    """

    # MAIN TEST CODE ###
    # Process cartesian parameters
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)

    vm_state = params.get("vm_state", "running")
    suspend_target = params.get("pm_suspend_target", "mem")
    pm_enabled = params.get("pm_enabled", "not_set")
    pm_enabled_disk = params.get("pm_enabled_disk", "no")
    pm_enabled_mem = params.get("pm_enabled_mem", "no")
    test_managedsave = "yes" == params.get("test_managedsave", "no")
    test_save_restore = "yes" == params.get("test_save_restore", "no")
    test_suspend_resume = "yes" == params.get("test_suspend_resume", "no")
    pmsuspend_error = 'yes' == params.get("pmsuspend_error", 'no')
    pmsuspend_error_msg = params.get("pmsuspend_error_msg")
    agent_error_test = 'yes' == params.get("agent_error_test", 'no')

    # Libvirt acl test related params
    uri = params.get("virsh_uri")
    unprivileged_user = params.get('unprivileged_user')
    if unprivileged_user:
        if unprivileged_user.count('EXAMPLE'):
            unprivileged_user = '******'

    if not libvirt_version.version_compare(1, 1, 1):
        if params.get('setup_libvirt_polkit') == 'yes':
            raise error.TestNAError("API acl test not supported in current"
                                    " libvirt version.")

    # A backup of original vm
    vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    vmxml_backup = vmxml.copy()

    # Expected possible fail patterns.
    # Error output should match one of these patterns.
    # An empty list mean test should succeed.
    fail_pat = []
    virsh_dargs = {'debug': True, 'ignore_status': True}
    if params.get('setup_libvirt_polkit') == 'yes':
        virsh_dargs_copy = virsh_dargs.copy()
        virsh_dargs_copy['uri'] = uri
        virsh_dargs_copy['unprivileged_user'] = unprivileged_user
        if pmsuspend_error:
            fail_pat.append('access denied')

    # Setup possible failure patterns
    if pm_enabled == 'not_set':
        fail_pat.append('not supported')
    if pm_enabled == 'no':
        fail_pat.append('disabled')

    if vm_state == 'paused':
        # For older version
        fail_pat.append('not responding')
        # For newer version
        fail_pat.append('not running')
    elif vm_state == 'shutoff':
        fail_pat.append('not running')

    if pmsuspend_error_msg:
        fail_pat.append(pmsuspend_error_msg)

    try:
        if vm.is_alive():
            vm.destroy()

        # Set pm tag in domain's XML if needed.
        if pm_enabled == 'not_set':
            try:
                if vmxml.pm:
                    del vmxml.pm
            except xcepts.LibvirtXMLNotFoundError:
                pass
        else:
            pm_xml = vm_xml.VMPMXML()
            pm_xml.mem_enabled = pm_enabled_mem
            pm_xml.disk_enabled = pm_enabled_disk
            vmxml.pm = pm_xml
        vmxml.sync()

        vm.prepare_guest_agent()
        # Selinux should be enforcing
        vm.setenforce(1)

        # Create swap partition/file if nessesary.
        need_mkswap = False
        if suspend_target in ['disk', 'hybrid']:
            need_mkswap = not vm.has_swap()
        if need_mkswap:
            logging.debug("Creating swap partition.")
            vm.create_swap_partition()

        try:
            libvirtd = utils_libvirtd.Libvirtd()
            savefile = os.path.join(test.tmpdir, "%s.save" % vm_name)
            session = vm.wait_for_login()
            # Touch a file on guest to test managed save command.
            if test_managedsave:
                session.cmd_status("touch pmtest")
            session.close()

            # Set vm state
            if vm_state == "paused":
                vm.pause()
            elif vm_state == "shutoff":
                vm.destroy()

            # Run test case
            result = virsh.dompmsuspend(vm_name,
                                        suspend_target,
                                        debug=True,
                                        uri=uri,
                                        unprivileged_user=unprivileged_user)
            if result.exit_status == 0:
                if fail_pat:
                    raise error.TestFail(
                        "Expected failed with %s, but run succeed"
                        ":\n%s" % (fail_pat, result))
            else:
                if not fail_pat:
                    raise error.TestFail(
                        "Expected success, but run failed:\n%s" % result)
                #if not any_pattern_match(fail_pat, result.stderr):
                if not any(p in result.stderr for p in fail_pat):
                    raise error.TestFail("Expected failed with one of %s, but "
                                         "failed with:\n%s" %
                                         (fail_pat, result))

            utils_misc.wait_for(lambda: vm.state() == 'pmsuspended', 30)
            if agent_error_test:
                err_msg = ("Requested operation is not valid:"
                           " domain is not running")
                ret = virsh.dompmsuspend(vm_name, "mem", **virsh_dargs)
                libvirt.check_result(ret, [err_msg])
                ret = virsh.dompmsuspend(vm_name, "disk", **virsh_dargs)
                libvirt.check_result(ret, [err_msg])
                ret = virsh.domtime(vm_name, **virsh_dargs)
                libvirt.check_result(ret, [err_msg])

            if test_managedsave:
                ret = virsh.managedsave(vm_name, **virsh_dargs)
                libvirt.check_exit_status(ret)
                # Dompmwakeup should return false here
                ret = virsh.dompmwakeup(vm_name, **virsh_dargs)
                libvirt.check_exit_status(ret, True)
                ret = virsh.start(vm_name)
                libvirt.check_exit_status(ret)
                if not vm.is_paused():
                    raise error.TestFail(
                        "Vm status is not paused before pm wakeup")
                if params.get('setup_libvirt_polkit') == 'yes':
                    ret = virsh.dompmwakeup(vm_name, **virsh_dargs_copy)
                else:
                    ret = virsh.dompmwakeup(vm_name, **virsh_dargs)
                libvirt.check_exit_status(ret)
                if not vm.is_paused():
                    raise error.TestFail(
                        "Vm status is not paused after pm wakeup")
                ret = virsh.resume(vm_name, **virsh_dargs)
                libvirt.check_exit_status(ret)
                sess = vm.wait_for_login()
                if sess.cmd_status("ls pmtest && rm -f pmtest"):
                    raise error.TestFail("Check managed save failed on guest")
                sess.close()
            if test_save_restore:
                # Run a series of operations to check libvirtd status.
                ret = virsh.dompmwakeup(vm_name, **virsh_dargs)
                libvirt.check_exit_status(ret)
                # Wait for vm is started
                vm.wait_for_login()
                ret = virsh.save(vm_name, savefile, **virsh_dargs)
                libvirt.check_exit_status(ret)
                ret = virsh.restore(savefile, **virsh_dargs)
                libvirt.check_exit_status(ret)
                # Wait for vm is started
                vm.wait_for_login()
                # run pmsuspend again
                ret = virsh.dompmsuspend(vm_name, suspend_target,
                                         **virsh_dargs)
                libvirt.check_exit_status(ret)
                # save and restore the guest again.
                ret = virsh.save(vm_name, savefile, **virsh_dargs)
                libvirt.check_exit_status(ret)
                ret = virsh.restore(savefile, **virsh_dargs)
                libvirt.check_exit_status(ret)
                ret = virsh.destroy(vm_name, **virsh_dargs)
                libvirt.check_exit_status(ret)
                if not libvirtd.is_running():
                    raise error.TestFail("libvirtd crashed")
            if test_suspend_resume:
                ret = virsh.suspend(vm_name)
                libvirt.check_exit_status(ret, expect_error=True)
                if vm.state() != 'pmsuspended':
                    raise error.TestFail("VM state should be pmsuspended")
                ret = virsh.resume(vm_name)
                libvirt.check_exit_status(ret, expect_error=True)
                if vm.state() != 'pmsuspended':
                    raise error.TestFail("VM state should be pmsuspended")
        finally:
            libvirtd.restart()
            # Remove the tmp file
            if os.path.exists(savefile):
                os.remove(savefile)
            # Restore VM state
            if vm_state == "paused":
                vm.resume()

            if suspend_target in ['mem', 'hybrid']:
                if vm.state() == "pmsuspended":
                    virsh.dompmwakeup(vm_name)
            else:
                if vm.state() == "in shutdown":
                    vm.wait_for_shutdown()
                if vm.is_dead():
                    vm.start()

            if need_mkswap:
                vm.cleanup_swap()

    finally:
        # Destroy the vm.
        if vm.is_alive():
            vm.destroy()
        # Recover xml of vm.
        vmxml_backup.sync()
コード例 #42
0
ファイル: virsh_reboot.py プロジェクト: zcyes/tp-libvirt
def run(test, params, env):
    """
    Test command: virsh reboot.

    Run a reboot command in the target domain.
    1.Prepare test environment.
    2.When the libvirtd == "off", stop the libvirtd service.
    3.Perform virsh reboot operation.
    4.Recover test environment.(libvirts service)
    5.Confirm the test result.
    """

    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)

    # run test case
    libvirtd = params.get("libvirtd", "on")
    vm_ref = params.get("reboot_vm_ref")
    status_error = ("yes" == params.get("status_error"))
    extra = params.get("reboot_extra")
    remote_ip = params.get("remote_ip", "REMOTE.EXAMPLE.COM")
    local_ip = params.get("local_ip", "LOCAL.EXAMPLE.COM")
    remote_pwd = params.get("remote_pwd", "password")
    agent = ("yes" == params.get("reboot_agent", "no"))
    mode = params.get("reboot_mode", "")
    pre_domian_status = params.get("reboot_pre_domian_status", "running")
    xml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    try:
        # Add or remove qemu-agent from guest before test
        if agent:
            vm_xml.VMXML.set_agent_channel(vm_name)
        else:
            vm_xml.VMXML.remove_agent_channel(vm_name)

        virsh.start(vm_name)
        guest_session = vm.wait_for_login()
        if agent:
            if guest_session.cmd_status("which qemu-ga"):
                raise error.TestNAError("Cannot execute this test for domain"
                                        " doesn't have qemu-ga command!")
            # check if the qemu-guest-agent is active or not firstly
            stat_ps = guest_session.cmd_status("ps aux |grep [q]emu-ga")
            if stat_ps != 0:
                s, o = guest_session.cmd_status_output("qemu-ga -d")
                if s != 0:
                    raise error.TestError("'qemu-ga -d' failed.\noutput:%s" % o)
            stat_ps = guest_session.cmd_status("ps aux |grep [q]emu-ga")
            guest_session.close()
            if stat_ps:
                raise error.TestError("Fail to start qemu-guest-agent!")
        if pre_domian_status == "shutoff":
            virsh.destroy(vm_name)
        if libvirtd == "off":
            utils_libvirtd.libvirtd_stop()

        domid = vm.get_id()
        domuuid = vm.get_uuid()
        if vm_ref == "id":
            vm_ref = domid
        elif vm_ref == "name":
            vm_ref = vm_name
        elif vm_ref == "uuid":
            vm_ref = domuuid
        elif vm_ref == "hex_id":
            vm_ref = hex(int(domid))
        elif vm_ref.find("invalid") != -1:
            vm_ref = params.get(vm_ref)
        elif vm_ref == "remote_name":
            if remote_ip.count("EXAMPLE.COM") or local_ip.count("EXAMPLE.COM"):
                raise error.TestNAError("remote_ip and/or local_ip parameters"
                                        " not changed from default values")
            complete_uri = libvirt_vm.complete_uri(local_ip)
            try:
                session = remote.remote_login("ssh", remote_ip, "22", "root",
                                              remote_pwd, "#")
                session.cmd_output('LANG=C')
                command = "virsh -c %s reboot %s %s" % (complete_uri, vm_name,
                                                        mode)
                status, output = session.cmd_status_output(command,
                                                           internal_timeout=5)
                session.close()
            # FIXME: Catch specific exception
            except Exception, detail:
                logging.error("Exception: %s", str(detail))
                status = -1
        if vm_ref != "remote_name":
            vm_ref = "%s %s" % (vm_ref, extra)
            cmdresult = virsh.reboot(vm_ref, mode,
                                     ignore_status=True, debug=True)
            status = cmdresult.exit_status
            if status:
                logging.debug("Error status, cmd error: %s", cmdresult.stderr)
                if not virsh.has_command_help_match('reboot', '\s+--mode\s+'):
                    # old libvirt doesn't support reboot
                    status = -2
        output = virsh.dom_list(ignore_status=True).stdout.strip()

        # recover libvirtd service start
        if libvirtd == "off":
            utils_libvirtd.libvirtd_start()

        # check status_error
        if status_error:
            if not status:
                raise error.TestFail("Run successfully with wrong command!")
        else:
            if status or (not re.search(vm_name, output)):
                if status == -2:
                    raise error.TestNAError(
                        "Reboot command doesn't work on older libvirt versions")
                raise error.TestFail("Run failed with right command")
コード例 #43
0
def run(test, params, env):
    """
    Test command: virsh domcontrol.

    The command can show the state of a control interface to the domain.
    1.Prepare test environment, destroy or suspend a VM.
    2.Do action to get a subprocess(dump, save, restore, managedsave) if
      domcontrol_job is set as yes.
    3.Perform virsh domcontrol to check state of a control interface to the
      domain.
    4.Recover the VM's status and wait for the subprocess over.
    5.Confirm the test result.
    """

    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    start_vm = params.get("start_vm")
    pre_vm_state = params.get("pre_vm_state", "running")
    options = params.get("domcontrol_options", "")
    action = params.get("domcontrol_action", "dump")
    tmp_file = os.path.join(data_dir.get_tmp_dir(), "domcontrol.tmp")
    vm_ref = params.get("domcontrol_vm_ref")
    job = params.get("domcontrol_job", "yes")
    readonly = "yes" == params.get("readonly", "no")
    status_error = params.get("status_error", "no")
    remote_uri = params.get("remote_uri")
    remote_ip = params.get("remote_ip")
    remote_pwd = params.get("remote_pwd")
    remote_user = params.get("remote_user", "root")
    if start_vm == "no" and vm.is_alive():
        vm.destroy()

    if remote_uri:
        if remote_ip.count("EXAMPLE"):
            test.cancel("The remote ip is Sample one, pls configure it first")
        ssh_key.setup_ssh_key(remote_ip, remote_user, remote_pwd)

    # Instead of "paused_after_start_vm", use "pre_vm_state".
    # After start the VM, wait for some time to make sure the job
    # can be created on this domain.
    if start_vm == "yes":
        vm.wait_for_login()
        if params.get("pre_vm_state") == "suspend":
            vm.pause()

    domid = vm.get_id()
    domuuid = vm.get_uuid()

    if vm_ref == "id":
        vm_ref = domid
    elif vm_ref == "hex_id":
        vm_ref = hex(int(domid))
    elif vm_ref == "uuid":
        vm_ref = domuuid
    elif vm_ref.find("invalid") != -1:
        vm_ref = params.get(vm_ref)
    elif vm_ref == "name":
        vm_ref = vm_name

    if action == "managedsave":
        tmp_file = '/var/lib/libvirt/qemu/save/%s.save' % vm.name

    if action == "restore":
        virsh.save(vm_name, tmp_file, ignore_status=True)

    process = None
    if job == "yes" and start_vm == "yes" and status_error == "no":
        # Check domain contorl interface state with job on domain.
        process = get_subprocess(action, vm_name, tmp_file)
        while process.poll() is None:
            if vm.is_alive():
                ret = virsh.domcontrol(vm_ref, options, ignore_status=True,
                                       debug=True)
                status = ret.exit_status
                # check status_error
                if status != 0:
                    # Do not raise error if domain is not running, as save,
                    # managedsave and restore will change the domain state
                    # from running to shutoff or reverse, and the timing of
                    # the state change is not predicatable, so skip the error
                    # of domain state change and focus on domcontrol command
                    # status while domain is running.
                    if vm.is_alive():
                        test.fail("Run failed with right command")
    else:
        if remote_uri:
            # check remote domain status
            if not virsh.is_alive(vm_name, uri=remote_uri):
                # If remote domain is not running, start remote domain
                virsh.start(vm_name, uri=remote_uri)

        # Check domain control interface state without job on domain.
        ret = virsh.domcontrol(vm_ref, options, readonly=readonly,
                               ignore_status=True, debug=True, uri=remote_uri)
        status = ret.exit_status

        # check status_error
        if status_error == "yes":
            if status == 0:
                test.fail("Run successfully with wrong command!")
        elif status_error == "no":
            if status != 0:
                test.fail("Run failed with right command")

    # Recover the environment.
    if action == "managedsave":
        virsh.managedsave_remove(vm_name, ignore_status=True)
    if os.path.exists(tmp_file):
        os.unlink(tmp_file)
    if remote_uri:
        if virsh.is_alive(vm_name, uri=remote_uri):
            # Destroy remote domain
            virsh.destroy(vm_name, uri=remote_uri)
    if pre_vm_state == "suspend":
        vm.resume()
    if process:
        if process.poll() is None:
            process.kill()
コード例 #44
0
def run(test, params, env):
    """
    Test command: virsh dompmsuspend <domain> <target>
    The command suspends a running domain using guest OS's power management.
    """

    # MAIN TEST CODE ###
    # Process cartesian parameters
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)

    vm_state = params.get("vm_state", "running")
    suspend_target = params.get("pm_suspend_target", "mem")
    pm_enabled = params.get("pm_enabled", "not_set")
    test_managedsave = "yes" == params.get("test_managedsave", "no")
    test_save_restore = "yes" == params.get("test_save_restore", "no")
    test_suspend_resume = "yes" == params.get("test_suspend_resume", "no")
    pmsuspend_error = 'yes' == params.get("pmsuspend_error", 'no')

    # Libvirt acl test related params
    uri = params.get("virsh_uri")
    unprivileged_user = params.get('unprivileged_user')
    if unprivileged_user:
        if unprivileged_user.count('EXAMPLE'):
            unprivileged_user = '******'

    if not libvirt_version.version_compare(1, 1, 1):
        if params.get('setup_libvirt_polkit') == 'yes':
            raise error.TestNAError("API acl test not supported in current"
                                    " libvirt version.")

    # A backup of original vm
    vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    vmxml_backup = vmxml.copy()

    # Expected possible fail patterns.
    # Error output should match one of these patterns.
    # An empty list mean test should succeed.
    fail_pat = []
    virsh_dargs = {'debug': True, 'ignore_status': True}
    if params.get('setup_libvirt_polkit') == 'yes':
        virsh_dargs_copy = virsh_dargs.copy()
        virsh_dargs_copy['uri'] = uri
        virsh_dargs_copy['unprivileged_user'] = unprivileged_user
        if pmsuspend_error:
            fail_pat.append('access denied')

    # Setup possible failure patterns
    if pm_enabled == 'not_set':
        fail_pat.append('not supported')
    if pm_enabled == 'no':
        fail_pat.append('disabled')

    if vm_state == 'paused':
        fail_pat.append('not responding')
    elif vm_state == 'shutoff':
        fail_pat.append('not running')

    try:
        if vm.is_alive():
            vm.destroy()

        # Set pm tag in domain's XML if needed.
        if pm_enabled == 'not_set':
            try:
                if vmxml.pm:
                    del vmxml.pm
            except xcepts.LibvirtXMLNotFoundError:
                pass
        else:
            pm_xml = vm_xml.VMPMXML()
            if suspend_target == 'mem':
                pm_xml.mem_enabled = pm_enabled
            elif suspend_target == 'disk':
                pm_xml.disk_enabled = pm_enabled
            elif suspend_target == 'hybrid':
                pm_xml.mem_enabled = pm_enabled
                pm_xml.disk_enabled = pm_enabled
            vmxml.pm = pm_xml
        vmxml.sync()

        vm.prepare_guest_agent()

        # Create swap partition/file if nessesary.
        need_mkswap = False
        if suspend_target in ['disk', 'hybrid']:
            need_mkswap = not vm.has_swap()
        if need_mkswap:
            logging.debug("Creating swap partition.")
            vm.create_swap_partition()

        try:
            libvirtd = utils_libvirtd.Libvirtd()
            savefile = os.path.join(test.tmpdir, "%s.save" % vm_name)
            session = vm.wait_for_login()
            # Touch a file on guest to test managed save command.
            if test_managedsave:
                session.cmd_status("touch pmtest")

            # Set vm state
            if vm_state == "paused":
                vm.pause()
            elif vm_state == "shutoff":
                vm.destroy()

            # Run test case
            result = virsh.dompmsuspend(vm_name, suspend_target, debug=True,
                                        uri=uri,
                                        unprivileged_user=unprivileged_user)
            if result.exit_status == 0:
                if fail_pat:
                    raise error.TestFail("Expected failed with %s, but run succeed"
                                         ":\n%s" % (fail_pat, result))
            else:
                if not fail_pat:
                    raise error.TestFail("Expected success, but run failed:\n%s"
                                         % result)
                #if not any_pattern_match(fail_pat, result.stderr):
                if not any(p in result.stderr for p in fail_pat):
                    raise error.TestFail("Expected failed with one of %s, but "
                                         "failed with:\n%s" % (fail_pat, result))
            if test_managedsave:
                ret = virsh.managedsave(vm_name, **virsh_dargs)
                libvirt.check_exit_status(ret)
                # Dompmwakeup should return false here
                ret = virsh.dompmwakeup(vm_name, **virsh_dargs)
                libvirt.check_exit_status(ret, True)
                ret = virsh.start(vm_name)
                libvirt.check_exit_status(ret)
                if not vm.is_paused():
                    raise error.TestFail("Vm status is not paused before pm wakeup")
                if params.get('setup_libvirt_polkit') == 'yes':
                    ret = virsh.dompmwakeup(vm_name, **virsh_dargs_copy)
                else:
                    ret = virsh.dompmwakeup(vm_name, **virsh_dargs)
                libvirt.check_exit_status(ret)
                if not vm.is_paused():
                    raise error.TestFail("Vm status is not paused after pm wakeup")
                ret = virsh.resume(vm_name, **virsh_dargs)
                libvirt.check_exit_status(ret)
                sess = vm.wait_for_login()
                if sess.cmd_status("ls pmtest && rm -f pmtest"):
                    raise error.TestFail("Check managed save failed on guest")
                sess.close()
            if test_save_restore:
                # Run a series of operations to check libvirtd status.
                ret = virsh.dompmwakeup(vm_name, **virsh_dargs)
                libvirt.check_exit_status(ret)
                # Wait for vm is started
                vm.wait_for_login()
                ret = virsh.save(vm_name, savefile, **virsh_dargs)
                libvirt.check_exit_status(ret)
                ret = virsh.restore(savefile, **virsh_dargs)
                libvirt.check_exit_status(ret)
                # Wait for vm is started
                vm.wait_for_login()
                # run pmsuspend again
                ret = virsh.dompmsuspend(vm_name, suspend_target, **virsh_dargs)
                libvirt.check_exit_status(ret)
                # save and restore the guest again.
                ret = virsh.save(vm_name, savefile, **virsh_dargs)
                libvirt.check_exit_status(ret)
                ret = virsh.restore(savefile, **virsh_dargs)
                libvirt.check_exit_status(ret)
                ret = virsh.destroy(vm_name, **virsh_dargs)
                libvirt.check_exit_status(ret)
                if not libvirtd.is_running():
                    raise error.TestFail("libvirtd crashed")
            if test_suspend_resume:
                ret = virsh.suspend(vm_name)
                libvirt.check_exit_status(ret, expect_error=True)
                if vm.state() != 'pmsuspended':
                    raise error.TestFail("VM state should be pmsuspended")
                ret = virsh.resume(vm_name)
                libvirt.check_exit_status(ret, expect_error=True)
                if vm.state() != 'pmsuspended':
                    raise error.TestFail("VM state should be pmsuspended")
        finally:
            libvirtd.restart()
            # Remove the tmp file
            if os.path.exists(savefile):
                os.remove(savefile)
            # Restore VM state
            if vm_state == "paused":
                vm.resume()

            if suspend_target in ['mem', 'hybrid']:
                if vm.state() == "pmsuspended":
                    virsh.dompmwakeup(vm_name)
            else:
                if vm.state() == "in shutdown":
                    vm.wait_for_shutdown()
                if vm.is_dead():
                    vm.start()

            # Cleanup
            session.close()

            if need_mkswap:
                vm.cleanup_swap()

    finally:
        # Destroy the vm.
        if vm.is_alive():
            vm.destroy()
        # Recover xml of vm.
        vmxml_backup.sync()
コード例 #45
0
def reset_env(vm_name, xml_file):
    virsh.destroy(vm_name)
    virsh.undefine(vm_name)
    virsh.define(xml_file)
    if os.path.exists(xml_file):
        os.remove(xml_file)
コード例 #46
0
def run(test, params, env):
    """
    Test command: virsh reboot.

    Run a reboot command in the target domain.
    1.Prepare test environment.
    2.When the libvirtd == "off", stop the libvirtd service.
    3.Perform virsh reboot operation.
    4.Recover test environment.(libvirts service)
    5.Confirm the test result.
    """

    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)

    # run test case
    libvirtd = params.get("libvirtd", "on")
    vm_ref = params.get("reboot_vm_ref")
    status_error = ("yes" == params.get("status_error"))
    extra = params.get("reboot_extra")
    remote_ip = params.get("remote_ip", "REMOTE.EXAMPLE.COM")
    local_ip = params.get("local_ip", "LOCAL.EXAMPLE.COM")
    remote_pwd = params.get("remote_pwd", "password")
    agent = ("yes" == params.get("reboot_agent", "no"))
    mode = params.get("reboot_mode", "")
    pre_domian_status = params.get("reboot_pre_domian_status", "running")
    xml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    try:
        # Add or remove qemu-agent from guest before test
        if agent:
            vm_xml.VMXML.set_agent_channel(vm_name)
        else:
            vm_xml.VMXML.remove_agent_channel(vm_name)

        virsh.start(vm_name)
        guest_session = vm.wait_for_login()
        if agent:
            guest_session.cmd("qemu-ga -d")
            stat_ps = guest_session.cmd_status("ps aux |grep [q]emu-ga")
            guest_session.close()
            if stat_ps:
                raise error.TestError("Fail to start qemu-guest-agent!")
        if pre_domian_status == "shutoff":
            virsh.destroy(vm_name)
        if libvirtd == "off":
            utils_libvirtd.libvirtd_stop()

        domid = vm.get_id()
        domuuid = vm.get_uuid()
        if vm_ref == "id":
            vm_ref = domid
        elif vm_ref == "name":
            vm_ref = vm_name
        elif vm_ref == "uuid":
            vm_ref = domuuid
        elif vm_ref == "hex_id":
            vm_ref = hex(int(domid))
        elif vm_ref.find("invalid") != -1:
            vm_ref = params.get(vm_ref)
        elif vm_ref == "remote_name":
            if remote_ip.count("EXAMPLE.COM") or local_ip.count("EXAMPLE.COM"):
                raise error.TestNAError("remote_ip and/or local_ip parameters"
                                        " not changed from default values")
            complete_uri = libvirt_vm.complete_uri(local_ip)
            try:
                session = remote.remote_login("ssh", remote_ip, "22", "root",
                                              remote_pwd, "#")
                session.cmd_output('LANG=C')
                command = "virsh -c %s reboot %s %s" % (complete_uri, vm_name,
                                                        mode)
                status, output = session.cmd_status_output(command,
                                                           internal_timeout=5)
                session.close()
            # FIXME: Catch specific exception
            except Exception, detail:
                logging.error("Exception: %s", str(detail))
                status = -1
        if vm_ref != "remote_name":
            vm_ref = "%s %s" % (vm_ref, extra)
            cmdresult = virsh.reboot(vm_ref,
                                     mode,
                                     ignore_status=True,
                                     debug=True)
            status = cmdresult.exit_status
            if status:
                logging.debug("Error status, cmd error: %s", cmdresult.stderr)
                if not virsh.has_command_help_match('reboot', '\s+--mode\s+'):
                    # old libvirt doesn't support reboot
                    status = -2
        output = virsh.dom_list(ignore_status=True).stdout.strip()

        # recover libvirtd service start
        if libvirtd == "off":
            utils_libvirtd.libvirtd_start()

        # check status_error
        if status_error:
            if not status:
                raise error.TestFail("Run successfully with wrong command!")
        else:
            if status or (not re.search(vm_name, output)):
                if status == -2:
                    raise error.TestNAError(
                        "Reboot command doesn't work on older libvirt versions"
                    )
                raise error.TestFail("Run failed with right command")
コード例 #47
0
ファイル: virsh_snapshot.py プロジェクト: spiceqa/virt-test
            check_info(infos["Children"], sni["Children"],
                       "Incorrect children count")
            check_info(infos["Descendants"], sni["Descendants"],
                       "Incorrect descendants count")

        except error.CmdError:
            handle_error("Failed getting snapshots info", vm_name)
        except error.TestFail, e:
            handle_error(str(e), vm_name)
        logging.info("Snapshot %s verified", sni["Name"])

    logging.info("Test snapshot switching")
    for sni in snapshot_info:
        try:
            # Assure VM is shut off before revert.
            virsh.destroy(vm_name)
            result = virsh.snapshot_revert(vm_name, sni["Name"])
            if result.exit_status:
                raise error.TestFail("Snapshot revert failed.\n"
                                     "Error: %s." % result.stderr)
            state = normalize_state(virsh.domstate(vm_name).stdout.strip())
            if state != sni["State"]:
                raise error.TestFail("Incorrect state after revert - %s" %
                                     (sni["Name"]))
            if state == normalize_state('shutoff'):
                vm.start()
            elif state == normalize_state('paused'):
                vm.resume()

            session = vm.wait_for_login()
            test_file(session, sni["to_create"], 0)
コード例 #48
0
    def edit_vcpu(source):
        """
        Modify vm's cpu information by virsh edit command.

        :param source : virsh edit's option.
        :return: True if edit successed,False if edit failed.
        """
        vcpucount_result = virsh.vcpucount(vm_name,
                                           options="--config --maximum")
        if vcpucount_result.exit_status:
            # Fail back to libvirt_xml way to test vcpucount.
            original_vcpu = str(vmxml.vcpu)
        else:
            original_vcpu = vcpucount_result.stdout.strip()

        expected_vcpu = str(int(original_vcpu) + 1)
        top_mode = {}
        if not status_error == "yes":
            # check if topology is defined and change vcpu accordingly
            try:
                vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(source)
                topology = vmxml_backup.get_cpu_topology()
                cores = topology['cores']
                threads = topology['threads']
                sockets = str(topology['sockets'])
                old_topology = "<topology sockets='%s' cores='%s' threads='%s'\/>" % (
                    sockets, cores, threads)
                sockets = str(int(topology['sockets']) + 1)
                new_topology = "<topology sockets='%s' cores='%s' threads='%s'\/>" % (
                    sockets, cores, threads)
                top_mode = {"edit": r":%s /<topology .*\/>/" + new_topology,
                            "recover": r":%s /<topology .*\/>/" + old_topology}
                expected_vcpu = str(int(sockets) * int(cores) * int(threads))
            except Exception as details:
                expected_vcpu = str(int(original_vcpu) + 1)
        dic_mode = {
            "edit": r":%s /[0-9]*<\/vcpu>/" + expected_vcpu + r"<\/vcpu>",
            "recover": r":%s /[0-9]*<\/vcpu>/" + original_vcpu + r"<\/vcpu>"}
        if top_mode:
            status = libvirt.exec_virsh_edit(source, [top_mode["edit"],
                                                      dic_mode["edit"]])
        else:
            status = libvirt.exec_virsh_edit(source, [dic_mode["edit"]])
        logging.info(status)
        if not status:
            vmxml.sync()
            return status
        if libvirtd_stat == "off":
            return False
        if params.get("paused_after_start_vm") == "yes":
            virsh.resume(vm_name, ignore_status=True)
            virsh.destroy(vm_name)
        elif params.get("start_vm") == "yes":
            virsh.destroy(vm_name)
        new_vcpus = str(vm_xml.VMXML.new_from_inactive_dumpxml(vm_name).vcpu)
        # Recover cpuinfo
        # Use name rather than source, since source could be domid
        if top_mode:
            status = libvirt.exec_virsh_edit(vm_name, [top_mode["recover"],
                                                       dic_mode["recover"]])
        else:
            status = libvirt.exec_virsh_edit(vm_name, [dic_mode["recover"]])
        vmxml.sync()
        if status and new_vcpus != expected_vcpu:
            return False
        return status
コード例 #49
0
def run(test, params, env):
    """
    Test command: virsh dompmsuspend <domain> <target>
    The command suspends a running domain using guest OS's power management.
    """
    def check_vm_guestagent(session):
        # Check if qemu-ga already started automatically
        cmd = "rpm -q qemu-guest-agent || yum install -y qemu-guest-agent"
        stat_install, output = session.cmd_status_output(cmd, 300)
        logging.debug(output)
        if stat_install != 0:
            raise error.TestError("Fail to install qemu-guest-agent, make"
                                  "sure that you have usable repo in guest")

        # Check if qemu-ga already started
        stat_ps = session.cmd_status("ps aux |grep [q]emu-ga | grep -v grep")
        if stat_ps != 0:
            session.cmd("service qemu-ga start")
            # Check if the qemu-ga really started
            stat_ps = session.cmd_status(
                "ps aux |grep [q]emu-ga | grep -v grep")
            if stat_ps != 0:
                raise error.TestError("Fail to run qemu-ga in guest")

    # MAIN TEST CODE ###
    # Process cartesian parameters
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)

    vm_state = params.get("vm_state", "running")
    suspend_target = params.get("pm_suspend_target", "mem")
    pm_enabled = params.get("pm_enabled", "not_set")
    test_managedsave = "yes" == params.get("test_managedsave", "no")
    test_save_restore = "yes" == params.get("test_save_restore", "no")

    # A backup of original vm
    vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    vmxml_backup = vmxml.copy()

    # Expected possible fail patterns.
    # Error output should match one of these patterns.
    # An empty list mean test should succeed.
    fail_pat = []
    virsh_dargs = {'debug': True, 'ignore_status': True}

    # Setup possible failure patterns
    if pm_enabled == 'not_set':
        fail_pat.append('not supported')
    if pm_enabled == 'no':
        fail_pat.append('disabled')

    if vm_state == 'paused':
        fail_pat.append('not responding')
    elif vm_state == 'shutoff':
        fail_pat.append('not running')

    try:
        if vm.is_alive():
            vm.destroy()

        # Set pm tag in domain's XML if needed.
        if pm_enabled == 'not_set':
            try:
                if vmxml.pm:
                    del vmxml.pm
            except xcepts.LibvirtXMLNotFoundError:
                pass
        else:
            pm_xml = vm_xml.VMPMXML()
            if suspend_target == 'mem':
                pm_xml.mem_enabled = pm_enabled
            elif suspend_target == 'disk':
                pm_xml.disk_enabled = pm_enabled
            elif suspend_target == 'hybrid':
                if 'hybrid_enabled' in dir(pm_xml):
                    pm_xml.hybrid_enabled = pm_enabled
                else:
                    raise error.TestNAError("PM suspend type 'hybrid' is not "
                                            "supported yet.")
            vmxml.pm = pm_xml
        vmxml.sync()

        vm_xml.VMXML.set_agent_channel(vm_name)
        vm.start()

        # Create swap partition/file if nessesary.
        need_mkswap = False
        if suspend_target in ['disk', 'hybrid']:
            need_mkswap = not vm.has_swap()
        if need_mkswap:
            logging.debug("Creating swap partition.")
            vm.create_swap_partition()

        try:
            libvirtd = utils_libvirtd.Libvirtd()
            savefile = os.path.join(test.tmpdir, "%s.save" % vm_name)
            session = vm.wait_for_login()
            check_vm_guestagent(session)
            # Touch a file on guest to test managed save command.
            if test_managedsave:
                session.cmd_status("touch pmtest")

            # Set vm state
            if vm_state == "paused":
                vm.pause()
            elif vm_state == "shutoff":
                vm.destroy()

            # Run test case
            result = virsh.dompmsuspend(vm_name, suspend_target, debug=True)
            if result.exit_status == 0:
                if fail_pat:
                    raise error.TestFail(
                        "Expected failed with %s, but run succeed"
                        ":\n%s" % (fail_pat, result))
            else:
                if not fail_pat:
                    raise error.TestFail(
                        "Expected success, but run failed:\n%s" % result)
                #if not any_pattern_match(fail_pat, result.stderr):
                if not any(p in result.stderr for p in fail_pat):
                    raise error.TestFail("Expected failed with one of %s, but "
                                         "failed with:\n%s" %
                                         (fail_pat, result))
            if test_managedsave:
                ret = virsh.managedsave(vm_name, **virsh_dargs)
                libvirt.check_exit_status(ret)
                # Dompmwakeup should return false here
                ret = virsh.dompmwakeup(vm_name, **virsh_dargs)
                libvirt.check_exit_status(ret, True)
                ret = virsh.start(vm_name)
                libvirt.check_exit_status(ret)
                if not vm.is_paused():
                    raise error.TestFail(
                        "Vm status is not paused before pm wakeup")
                ret = virsh.dompmwakeup(vm_name, **virsh_dargs)
                libvirt.check_exit_status(ret)
                if not vm.is_paused():
                    raise error.TestFail(
                        "Vm status is not paused after pm wakeup")
                ret = virsh.resume(vm_name, **virsh_dargs)
                libvirt.check_exit_status(ret)
                sess = vm.wait_for_login()
                if sess.cmd_status("ls pmtest && rm -f pmtest"):
                    raise error.TestFail("Check managed save failed on guest")
                sess.close()
            if test_save_restore:
                # Run a series of operations to check libvirtd status.
                ret = virsh.dompmwakeup(vm_name, **virsh_dargs)
                libvirt.check_exit_status(ret)
                ret = virsh.save(vm_name, savefile, **virsh_dargs)
                libvirt.check_exit_status(ret)
                ret = virsh.restore(savefile, **virsh_dargs)
                libvirt.check_exit_status(ret)
                # run pmsuspend again
                ret = virsh.dompmsuspend(vm_name, suspend_target,
                                         **virsh_dargs)
                libvirt.check_exit_status(ret)
                # save and restore the guest again.
                ret = virsh.save(vm_name, savefile, **virsh_dargs)
                libvirt.check_exit_status(ret)
                ret = virsh.restore(savefile, **virsh_dargs)
                libvirt.check_exit_status(ret)
                ret = virsh.destroy(vm_name, **virsh_dargs)
                libvirt.check_exit_status(ret)
                if not libvirtd.is_running():
                    raise error.TestFail("libvirtd crashed")

        finally:
            libvirtd.restart()
            # Remove the tmp file
            if os.path.exists(savefile):
                os.remove(savefile)
            # Restore VM state
            if vm_state == "paused":
                vm.resume()

            if suspend_target in ['mem', 'hybrid']:
                if vm.state() == "pmsuspended":
                    virsh.dompmwakeup(vm_name)
            else:
                if vm.state() == "in shutdown":
                    vm.wait_for_shutdown()
                if vm.is_dead():
                    vm.start()

            # Cleanup
            session.close()

            if need_mkswap:
                vm.cleanup_swap()

    finally:
        # Destroy the vm.
        if vm.is_alive():
            vm.destroy()
        # Recover xml of vm.
        vmxml_backup.sync()
コード例 #50
0
def run(test, params, env):
    """
    Test command: virsh destroy.

    The command can destroy (stop) a domain.
    1.Prepare test environment.
    2.When the ibvirtd == "off", stop the libvirtd service.
    3.Perform virsh destroy operation.
    4.Recover test environment.
    5.Confirm the test result.
    """
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    domid = vm.get_id()
    domuuid = vm.get_uuid()

    vm_ref = params.get("destroy_vm_ref")
    status_error = params.get("status_error", "no")
    libvirtd = params.get("libvirtd", "on")
    remote_ip = params.get("remote_ip", "REMOTE.EXAMPLE.COM")
    remote_pwd = params.get("remote_pwd", None)
    local_ip = params.get("local_ip", "LOCAL.EXAMPLE.COM")
    local_pwd = params.get("local_pwd", "LOCAL.EXAMPLE.COM")
    if vm_ref == "remote" and (remote_ip.count("EXAMPLE.COM")
                               or local_ip.count("EXAMPLE.COM")):
        test.cancel("Remote test parameters unchanged from default")

    uri = params.get("virsh_uri")
    unprivileged_user = params.get('unprivileged_user')
    if unprivileged_user:
        if unprivileged_user.count('EXAMPLE'):
            unprivileged_user = '******'

    if not libvirt_version.version_compare(1, 1, 1):
        if params.get('setup_libvirt_polkit') == 'yes':
            test.cancel("API acl test not supported in current"
                        " libvirt version.")

    if vm_ref == "id":
        vm_ref = domid
    elif vm_ref == "hex_id":
        vm_ref = hex(int(domid))
    elif vm_ref.find("invalid") != -1:
        vm_ref = params.get(vm_ref)
    elif vm_ref == "name":
        vm_ref = "%s %s" % (vm_name, params.get("destroy_extra"))
    elif vm_ref == "uuid":
        vm_ref = domuuid

    if libvirtd == "off":
        utils_libvirtd.libvirtd_stop()

    if vm_ref != "remote":
        status = virsh.destroy(vm_ref,
                               ignore_status=True,
                               unprivileged_user=unprivileged_user,
                               uri=uri,
                               debug=True).exit_status
        output = ""
    else:
        status = 0
        try:
            remote_uri = libvirt_vm.complete_uri(local_ip)
            session = remote.remote_login("ssh", remote_ip, "22", "root",
                                          remote_pwd, "#")
            session.cmd_output('LANG=C')

            # Setup up remote to remote login in local host
            ssh_key.setup_remote_ssh_key(remote_ip, "root", remote_pwd,
                                         local_ip, "root", local_pwd)

            command = "virsh -c %s destroy %s" % (remote_uri, vm_name)
            status, output = session.cmd_status_output(command,
                                                       internal_timeout=5)
            session.close()
        except process.CmdError:
            status = 1

    if libvirtd == "off":
        utils_libvirtd.libvirtd_start()

    # check status_error
    if status_error == "yes":
        if status == 0:
            test.fail("Run successfully with wrong command! "
                      "Output:\n%s" % output)
    elif status_error == "no":
        if status != 0:
            test.fail("Run failed with right command! Output:\n%s" % output)
コード例 #51
0
def run(test, params, env):
    """
    Test the input virtual devices

    1. prepare a guest with different input devices
    2. check whether the guest can be started
    3. check the qemu cmd line
    """
    def check_dumpxml():
        """
        Check whether the added devices are shown in the guest xml
        """
        pattern = "<input bus=\"%s\" type=\"%s\">" % (bus_type, input_type)
        xml_after_adding_device = VMXML.new_from_dumpxml(vm_name)
        if pattern not in str(xml_after_adding_device):
            test.fail("Can not find the %s input device xml "
                      "in the guest xml file." % input_type)

    def check_qemu_cmd_line():
        """
        Check whether the added devices are shown in the qemu cmd line
        """
        # if the tested input device is a keyboard or mouse with ps2 bus,
        # there is no keyboard or mouse in qemu cmd line
        if bus_type == "ps2" and input_type in ["keyboard", "mouse"]:
            return
        with open('/proc/%s/cmdline' % vm.get_pid(), 'r') as cmdline_file:
            cmdline = cmdline_file.read()
        if bus_type == "usb" and input_type == "keyboard":
            pattern = r"-device.%s-kbd" % bus_type
        elif input_type == "passthrough":
            pattern = r"-device.%s-input-host-pci" % bus_type
        else:
            pattern = r"-device.%s-%s" % (bus_type, input_type)
        if not re.search(pattern, cmdline):
            test.fail("Can not find the %s input device "
                      "in qemu cmd line." % input_type)

    vm_name = params.get("main_vm", "avocado-vt-vm1")
    vm = env.get_vm(vm_name)

    status_error = params.get("status_error", "no") == "yes"
    bus_type = params.get("bus_type")
    input_type = params.get("input_type")
    if input_type == "tablet":
        if not libvirt_version.version_compare(1, 2, 2):
            test.cancel("tablet input type is not supported "
                        "on the current version.")
    if input_type == "passthrough" or bus_type == "virtio":
        if not libvirt_version.version_compare(1, 3, 0):
            test.cancel("passthrough input type or virtio bus type "
                        "is not supported on current version.")

    vm_xml = VMXML.new_from_dumpxml(vm_name)
    vm_xml_backup = vm_xml.copy()
    if vm.is_alive():
        vm.destroy()

    try:
        # ps2 keyboard and ps2 mouse are default, no need to re-add the xml
        if not (bus_type == "ps2" and input_type in ["keyboard", "mouse"]):
            vm_xml.remove_all_device_by_type('input')
            input_dev = Input(type_name=input_type)
            input_dev.input_bus = bus_type
            if input_type == "passthrough":
                kbd_dev_name = glob.glob('/dev/input/by-path/*kbd')
                if not kbd_dev_name:
                    test.cancel("There is no keyboard device on this host.")
                logging.debug(
                    "keyboard %s is going to be passthrough "
                    "to the host.", kbd_dev_name[0])
                input_dev.source_evdev = kbd_dev_name[0]
            vm_xml.add_device(input_dev)
            try:
                vm_xml.sync()
            except Exception as error:
                if not status_error:
                    test.fail(
                        "Failed to define the guest after adding the %s input "
                        "device xml. Details: %s " % (input_type, error))
                logging.debug(
                    "This is the expected failing in negative cases.")
                return

        res = virsh.start(vm_name)
        if res.exit_status:
            if not status_error:
                test.fail("Failed to start vm after adding the %s input "
                          "device xml. Details: %s " % (input_type, error))
            logging.debug("This is the expected failure in negative cases.")
            return
        if status_error:
            test.fail(
                "Expected fail in negative cases but vm started successfully.")
            return

        logging.debug("VM started successfully in postive cases.")
        check_dumpxml()
        check_qemu_cmd_line()
    finally:
        if vm.is_alive():
            virsh.destroy(vm_name)
        vm_xml_backup.sync()
コード例 #52
0
def run(test, params, env):
    """
    Test command: virsh change-media.

    The command changes the media used by CD or floppy drives.

    Test steps:
    1. Prepare test environment.
    2. Perform virsh change-media operation.
    3. Recover test environment.
    4. Confirm the test result.
    """
    def is_attached(vmxml_devices, disk_type, source_file, target_dev):
        """
        Check attached device and disk exist or not.

        :param vmxml_devices: VMXMLDevices instance
        :param disk_type: disk's device type: cdrom or floppy
        :param source_file : disk's source file to check
        :param target_dev : target device name
        :return: True/False if backing file and device found
        """
        disks = vmxml_devices.by_device_tag('disk')
        for disk in disks:
            if disk.device != disk_type:
                continue
            if disk.target['dev'] != target_dev:
                continue
            if disk.xmltreefile.find('source') is not None and \
                    'file' in disk.source.attrs:
                if disk.source.attrs['file'] != source_file:
                    continue
            else:
                continue
            # All three conditions met
            logging.debug("Find %s in given disk XML", source_file)
            return True
        logging.debug("Not find %s in gievn disk XML", source_file)
        return False

    def check_result(vm_name,
                     disk_source,
                     disk_type,
                     disk_target,
                     flags,
                     vm_state,
                     attach=True):
        """
        Check the test result of attach/detach-device command.
        """
        active_vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
        active_attached = is_attached(active_vmxml.devices, disk_type,
                                      disk_source, disk_target)
        if vm_state != "transient":
            inactive_vmxml = vm_xml.VMXML.new_from_dumpxml(
                vm_name, options="--inactive")
            inactive_attached = is_attached(inactive_vmxml.devices, disk_type,
                                            disk_source, disk_target)

        if flags.count("config") and not flags.count("live"):
            if vm_state != "transient":
                if attach:
                    if not inactive_attached:
                        test.fail("Inactive domain XML not updated"
                                  " when --config options used for"
                                  " attachment")
                    if vm_state != "shutoff":
                        if active_attached:
                            test.fail("Active domain XML updated"
                                      " when --config options used"
                                      " for attachment")
                else:
                    if inactive_attached:
                        test.fail("Inactive domain XML not updated"
                                  " when --config options used for"
                                  " detachment")
                    if vm_state != "shutoff":
                        if not active_attached:
                            test.fail("Active domain XML updated"
                                      " when --config options used"
                                      " for detachment")
        elif flags.count("live") and not flags.count("config"):
            if attach:
                if vm_state in ["paused", "running", "transient"]:
                    if not active_attached:
                        test.fail("Active domain XML not updated"
                                  " when --live options used for"
                                  " attachment")
                if vm_state in ["paused", "running"]:
                    if inactive_attached:
                        test.fail("Inactive domain XML updated"
                                  " when --live options used for"
                                  " attachment")
            else:
                if vm_state in ["paused", "running", "transient"]:
                    if active_attached:
                        test.fail("Active domain XML not updated"
                                  " when --live options used for"
                                  " detachment")
                if vm_state in ["paused", "running"]:
                    if not inactive_attached:
                        test.fail("Inactive domain XML updated"
                                  " when --live options used for"
                                  " detachment")
        elif flags.count("live") and flags.count("config"):
            if attach:
                if vm_state in ["paused", "running"]:
                    if not active_attached:
                        test.fail("Active domain XML not updated"
                                  " when --live --config options"
                                  " used for attachment")
                    if not inactive_attached:
                        test.fail("Inactive domain XML not updated"
                                  " when --live --config options "
                                  "used for attachment")
            else:
                if vm_state in ["paused", "running"]:
                    if active_attached:
                        test.fail("Active domain XML not updated "
                                  "when --live --config options "
                                  "used for detachment")
                    if inactive_attached:
                        test.fail("Inactive domain XML not updated"
                                  " when --live --config options "
                                  "used for detachment")
        elif flags.count("current") or flags == "":
            if attach:
                if vm_state in ["paused", "running", "transient"]:
                    if not active_attached:
                        test.fail("Active domain XML not updated"
                                  " when --current options used "
                                  "for attachment")
                if vm_state in ["paused", "running"]:
                    if inactive_attached:
                        test.fail("Inactive domain XML updated "
                                  "when --current options used "
                                  "for live attachment")
                if vm_state == "shutoff" and not inactive_attached:
                    test.fail("Inactive domain XML not updated "
                              "when --current options used for "
                              "attachment")
            else:
                if vm_state in ["paused", "running", "transient"]:
                    if active_attached:
                        test.fail("Active domain XML not updated"
                                  " when --current options used "
                                  "for detachment")
                if vm_state in ["paused", "running"]:
                    if not inactive_attached:
                        test.fail("Inactive domain XML updated "
                                  "when --current options used "
                                  "for live detachment")
                if vm_state == "shutoff" and inactive_attached:
                    test.fail("Inactive domain XML not updated "
                              "when --current options used for "
                              "detachment")

    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    vm_ref = params.get("change_media_vm_ref")
    action = params.get("change_media_action")
    action_twice = params.get("change_media_action_twice", "")
    pre_vm_state = params.get("pre_vm_state")
    options = params.get("change_media_options")
    options_twice = params.get("change_media_options_twice", "")
    device_type = params.get("change_media_device_type", "cdrom")
    target_bus = params.get("change_media_target_bus", "ide")
    target_device = params.get("change_media_target_device", "hdc")
    init_iso_name = params.get("change_media_init_iso")
    old_iso_name = params.get("change_media_old_iso")
    new_iso_name = params.get("change_media_new_iso")
    virsh_dargs = {"debug": True, "ignore_status": True}

    if device_type not in ['cdrom', 'floppy']:
        test.cancel("Got a invalid device type:/n%s" % device_type)

    # Backup for recovery.
    vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)

    old_iso = os.path.join(data_dir.get_tmp_dir(), old_iso_name)
    new_iso = os.path.join(data_dir.get_tmp_dir(), new_iso_name)

    if vm_ref == "name":
        vm_ref = vm_name

    if vm.is_alive():
        vm.destroy(gracefully=False)

    try:
        if not init_iso_name:
            init_iso = ""
        else:
            init_iso = os.path.join(data_dir.get_tmp_dir(), init_iso_name)

        # Prepare test files.
        libvirt.create_local_disk("iso", old_iso)
        libvirt.create_local_disk("iso", new_iso)

        # Check domain's disk device
        disk_blk = vm_xml.VMXML.get_disk_blk(vm_name)
        logging.info("disk_blk %s", disk_blk)
        if target_device not in disk_blk:
            if vm.is_alive():
                virsh.destroy(vm_name)
            logging.info("Adding device")
            libvirt.create_local_disk("iso", init_iso)
            disk_params = {
                "disk_type": "file",
                "device_type": device_type,
                "driver_name": "qemu",
                "driver_type": "raw",
                "target_bus": target_bus,
                "readonly": "yes"
            }
            libvirt.attach_additional_device(vm_name, target_device, init_iso,
                                             disk_params)

        vmxml_for_test = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
        # Turn VM into certain state.
        if pre_vm_state == "running":
            logging.info("Starting %s..." % vm_name)
            if vm.is_dead():
                vm.start()
                vm.wait_for_login().close()
        elif pre_vm_state == "shutoff":
            logging.info("Shutting down %s..." % vm_name)
            if vm.is_alive():
                vm.destroy(gracefully=False)
        elif pre_vm_state == "paused":
            logging.info("Pausing %s..." % vm_name)
            if vm.is_dead():
                vm.start()
                vm.wait_for_login().close()
            if not vm.pause():
                test.cancel("Can't pause the domain")
            time.sleep(5)
        elif pre_vm_state == "transient":
            logging.info("Creating %s..." % vm_name)
            vm.undefine()
            if virsh.create(vmxml_for_test.xml, **virsh_dargs).exit_status:
                vmxml_backup.define()
                test.cancel("Can't create the domain")

        # Libvirt will ignore --source when action is eject
        attach = True
        device_source = old_iso
        if action == "--eject ":
            source = ""
            attach = False
        else:
            source = device_source

        all_options = action + options + " " + source
        ret = virsh.change_media(vm_ref,
                                 target_device,
                                 all_options,
                                 ignore_status=True,
                                 debug=True)
        status_error = False
        if pre_vm_state == "shutoff":
            if options.count("live"):
                status_error = True
        elif pre_vm_state == "transient":
            if options.count("config"):
                status_error = True

        if vm.is_paused():
            vm.resume()
            vm.wait_for_login().close()
            # For paused vm, change_media for eject/update operation
            # should be executed again for it takes effect
            if ret.exit_status:
                if not action.count("insert") and not options.count("force"):
                    ret = virsh.change_media(vm_ref,
                                             target_device,
                                             all_options,
                                             ignore_status=True,
                                             debug=True)
        if not status_error and ret.exit_status:
            test.fail("Change media failed: %s" % ret.stderr.strip())
        libvirt.check_exit_status(ret, status_error)
        if not ret.exit_status:
            check_result(vm_name, device_source, device_type, target_device,
                         options, pre_vm_state, attach)

        if action_twice:
            if pre_vm_state == "paused":
                if not vm.pause():
                    test.fail("Can't pause the domain")
                time.sleep(5)
            attach = True
            device_source = new_iso
            if action_twice == "--eject ":
                #options_twice += " --force "
                source = ""
                attach = False
            else:
                source = device_source
            all_options = action_twice + options_twice + " " + source
            time.sleep(5)
            if options_twice == "--config" or pre_vm_state == "shutoff":
                wait_for_event = False
            else:
                wait_for_event = True
            ret = virsh.change_media(vm_ref,
                                     target_device,
                                     all_options,
                                     wait_for_event=wait_for_event,
                                     event_timeout=14,
                                     ignore_status=True,
                                     debug=True)
            status_error = False
            if pre_vm_state == "shutoff":
                if options_twice.count("live"):
                    status_error = True
            elif pre_vm_state == "transient":
                if options_twice.count("config"):
                    status_error = True

            if action_twice == "--insert ":
                if pre_vm_state in ["running", "paused"]:
                    if options in ["--force", "--current", "", "--live"]:
                        if options_twice.count("config"):
                            status_error = True
                    elif options == "--config":
                        if options_twice in ["--force", "--current", ""]:
                            status_error = True
                        elif options_twice in [
                                "--config --live"
                        ] and pre_vm_state in ["running"]:
                            status_error = False
                        elif options_twice.count("live"):
                            status_error = True
                elif pre_vm_state == "transient":
                    if ret.exit_status:
                        status_error = True
                elif pre_vm_state == "shutoff":
                    if options.count("live"):
                        status_error = True
            if vm.is_paused():
                vm.resume()
                vm.wait_for_login().close()
                # For paused vm, change_media for eject/update operation
                # should be executed again for it takes effect
                if ret.exit_status and not action_twice.count("insert"):
                    ret = virsh.change_media(vm_ref,
                                             target_device,
                                             all_options,
                                             wait_for_event=wait_for_event,
                                             ignore_status=True,
                                             debug=True)
            if not status_error and ret.exit_status:
                test.fail("Change media failed: %s" % ret.stderr.strip())
            libvirt.check_exit_status(ret, status_error)
            if not ret.exit_status:
                check_result(vm_name, device_source, device_type,
                             target_device, options_twice, pre_vm_state,
                             attach)

        # Try to start vm.
        if vm.is_dead():
            vm.start()
            vm.wait_for_login().close()
    finally:
        if vm.is_alive():
            vm.destroy(gracefully=False)
        # Recover xml of vm.
        vmxml_backup.sync()
        # Remove disks
        if os.path.exists(init_iso):
            os.remove(init_iso)
        if os.path.exists(old_iso):
            os.remove(old_iso)
        if os.path.exists(init_iso):
            os.remove(new_iso)
コード例 #53
0
def run(test, params, env):
    """
    Test virsh snapshot command when disk in all kinds of type.

    (1). Init the variables from params.
    (2). Create a image by specifice format.
    (3). Attach disk to vm.
    (4). Snapshot create.
    (5). Snapshot revert.
    (6). cleanup.
    """
    # Init variables.
    vm_name = params.get("main_vm", "avocado-vt-vm1")
    vm = env.get_vm(vm_name)
    vm_state = params.get("vm_state", "running")
    image_format = params.get("snapshot_image_format", "qcow2")
    snapshot_del_test = "yes" == params.get("snapshot_del_test", "no")
    status_error = ("yes" == params.get("status_error", "no"))
    snapshot_from_xml = ("yes" == params.get("snapshot_from_xml", "no"))
    snapshot_current = ("yes" == params.get("snapshot_current", "no"))
    snapshot_revert_paused = ("yes" == params.get("snapshot_revert_paused",
                                                  "no"))
    replace_vm_disk = "yes" == params.get("replace_vm_disk", "no")
    disk_source_protocol = params.get("disk_source_protocol")
    vol_name = params.get("vol_name")
    tmp_dir = data_dir.get_tmp_dir()
    pool_name = params.get("pool_name", "gluster-pool")
    brick_path = os.path.join(tmp_dir, pool_name)
    multi_gluster_disks = "yes" == params.get("multi_gluster_disks", "no")

    # Pool variables.
    snapshot_with_pool = "yes" == params.get("snapshot_with_pool", "no")
    pool_name = params.get("pool_name")
    pool_type = params.get("pool_type")
    pool_target = params.get("pool_target")
    emulated_image = params.get("emulated_image", "emulated-image")
    vol_format = params.get("vol_format")
    lazy_refcounts = "yes" == params.get("lazy_refcounts")
    options = params.get("snapshot_options", "")
    export_options = params.get("export_options", "rw,no_root_squash")

    # Set volume xml attribute dictionary, extract all params start with 'vol_'
    # which are for setting volume xml, except 'lazy_refcounts'.
    vol_arg = {}
    for key in list(params.keys()):
        if key.startswith('vol_'):
            if key[4:] in ['capacity', 'allocation', 'owner', 'group']:
                vol_arg[key[4:]] = int(params[key])
            else:
                vol_arg[key[4:]] = params[key]
    vol_arg['lazy_refcounts'] = lazy_refcounts

    supported_pool_list = [
        "dir", "fs", "netfs", "logical", "iscsi", "disk", "gluster"
    ]
    if snapshot_with_pool:
        if pool_type not in supported_pool_list:
            test.cancel("%s not in support list %s" %
                        (pool_target, supported_pool_list))

    # Do xml backup for final recovery
    vmxml_backup = libvirt_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    # Some variable for xmlfile of snapshot.
    snapshot_memory = params.get("snapshot_memory", "internal")
    snapshot_disk = params.get("snapshot_disk", "internal")
    no_memory_snap = "yes" == params.get("no_memory_snap", "no")

    # Skip 'qed' cases for libvirt version greater than 1.1.0
    if libvirt_version.version_compare(1, 1, 0):
        if vol_format == "qed" or image_format == "qed":
            test.cancel("QED support changed, check bug: "
                        "https://bugzilla.redhat.com/show_bug.cgi"
                        "?id=731570")

    if not libvirt_version.version_compare(1, 2, 7):
        # As bug 1017289 closed as WONTFIX, the support only
        # exist on 1.2.7 and higher
        if disk_source_protocol == 'gluster':
            test.cancel("Snapshot on glusterfs not support in "
                        "current version. Check more info with "
                        "https://bugzilla.redhat.com/buglist.cgi?"
                        "bug_id=1017289,1032370")

    # Init snapshot_name
    snapshot_name = None
    snapshot_external_disk = []
    snapshot_xml_path = None
    del_status = None
    image = None
    pvt = None
    # Get a tmp dir
    snap_cfg_path = "/var/lib/libvirt/qemu/snapshot/%s/" % vm_name
    try:
        if replace_vm_disk:
            utlv.set_vm_disk(vm, params, tmp_dir)
            if multi_gluster_disks:
                new_params = params.copy()
                new_params["pool_name"] = "gluster-pool2"
                new_params["vol_name"] = "gluster-vol2"
                new_params["disk_target"] = "vdf"
                new_params["image_convert"] = 'no'
                utlv.set_vm_disk(vm, new_params, tmp_dir)

        if snapshot_with_pool:
            # Create dst pool for create attach vol img
            pvt = utlv.PoolVolumeTest(test, params)
            pvt.pre_pool(pool_name,
                         pool_type,
                         pool_target,
                         emulated_image,
                         image_size="1G",
                         pre_disk_vol=["20M"],
                         source_name=vol_name,
                         export_options=export_options)

            if pool_type in ["iscsi", "disk"]:
                # iscsi and disk pool did not support create volume in libvirt,
                # logical pool could use libvirt to create volume but volume
                # format is not supported and will be 'raw' as default.
                pv = libvirt_storage.PoolVolume(pool_name)
                vols = list(pv.list_volumes().keys())
                if vols:
                    vol_name = vols[0]
                else:
                    test.cancel("No volume in pool: %s" % pool_name)
            else:
                # Set volume xml file
                volxml = libvirt_xml.VolXML()
                newvol = volxml.new_vol(**vol_arg)
                vol_xml = newvol['xml']

                # Run virsh_vol_create to create vol
                logging.debug("create volume from xml: %s" %
                              newvol.xmltreefile)
                cmd_result = virsh.vol_create(pool_name,
                                              vol_xml,
                                              ignore_status=True,
                                              debug=True)
                if cmd_result.exit_status:
                    test.cancel("Failed to create attach volume.")

            cmd_result = virsh.vol_path(vol_name, pool_name, debug=True)
            if cmd_result.exit_status:
                test.cancel("Failed to get volume path from pool.")
            img_path = cmd_result.stdout.strip()

            if pool_type in ["logical", "iscsi", "disk"]:
                # Use qemu-img to format logical, iscsi and disk block device
                if vol_format != "raw":
                    cmd = "qemu-img create -f %s %s 10M" % (vol_format,
                                                            img_path)
                    cmd_result = process.run(cmd,
                                             ignore_status=True,
                                             shell=True)
                    if cmd_result.exit_status:
                        test.cancel("Failed to format volume, %s" %
                                    cmd_result.stdout_text.strip())
            extra = "--persistent --subdriver %s" % vol_format
        else:
            # Create a image.
            params['image_name'] = "snapshot_test"
            params['image_format'] = image_format
            params['image_size'] = "1M"
            image = qemu_storage.QemuImg(params, tmp_dir, "snapshot_test")
            img_path, _ = image.create(params)
            extra = "--persistent --subdriver %s" % image_format

        if not multi_gluster_disks:
            # Do the attach action.
            out = process.run("qemu-img info %s" % img_path, shell=True)
            logging.debug("The img info is:\n%s" % out.stdout.strip())
            result = virsh.attach_disk(vm_name,
                                       source=img_path,
                                       target="vdf",
                                       extra=extra,
                                       debug=True)
            if result.exit_status:
                test.cancel("Failed to attach disk %s to VM."
                            "Detail: %s." % (img_path, result.stderr))

        # Create snapshot.
        if snapshot_from_xml:
            snap_xml = libvirt_xml.SnapshotXML()
            snapshot_name = "snapshot_test"
            snap_xml.snap_name = snapshot_name
            snap_xml.description = "Snapshot Test"
            if not no_memory_snap:
                if "--disk-only" not in options:
                    if snapshot_memory == "external":
                        memory_external = os.path.join(tmp_dir,
                                                       "snapshot_memory")
                        snap_xml.mem_snap_type = snapshot_memory
                        snap_xml.mem_file = memory_external
                        snapshot_external_disk.append(memory_external)
                    else:
                        snap_xml.mem_snap_type = snapshot_memory

            # Add all disks into xml file.
            vmxml = libvirt_xml.VMXML.new_from_inactive_dumpxml(vm_name)
            disks = vmxml.devices.by_device_tag('disk')
            # Remove non-storage disk such as 'cdrom'
            for disk in disks:
                if disk.device != 'disk':
                    disks.remove(disk)
            new_disks = []
            for src_disk_xml in disks:
                disk_xml = snap_xml.SnapDiskXML()
                disk_xml.xmltreefile = src_disk_xml.xmltreefile
                del disk_xml.device
                del disk_xml.address
                disk_xml.snapshot = snapshot_disk
                disk_xml.disk_name = disk_xml.target['dev']

                # Only qcow2 works as external snapshot file format, update it
                # here
                driver_attr = disk_xml.driver
                driver_attr.update({'type': 'qcow2'})
                disk_xml.driver = driver_attr

                if snapshot_disk == 'external':
                    new_attrs = disk_xml.source.attrs
                    if 'file' in disk_xml.source.attrs:
                        new_file = "%s.snap" % disk_xml.source.attrs['file']
                        snapshot_external_disk.append(new_file)
                        new_attrs.update({'file': new_file})
                        hosts = None
                    elif 'name' in disk_xml.source.attrs:
                        new_name = "%s.snap" % disk_xml.source.attrs['name']
                        new_attrs.update({'name': new_name})
                        hosts = disk_xml.source.hosts
                    elif ('dev' in disk_xml.source.attrs
                          and disk_xml.type_name == 'block'):
                        # Use local file as external snapshot target for block type.
                        # As block device will be treat as raw format by default,
                        # it's not fit for external disk snapshot target. A work
                        # around solution is use qemu-img again with the target.
                        disk_xml.type_name = 'file'
                        del new_attrs['dev']
                        new_file = "%s/blk_src_file.snap" % tmp_dir
                        snapshot_external_disk.append(new_file)
                        new_attrs.update({'file': new_file})
                        hosts = None

                    new_src_dict = {"attrs": new_attrs}
                    if hosts:
                        new_src_dict.update({"hosts": hosts})
                    disk_xml.source = disk_xml.new_disk_source(**new_src_dict)
                else:
                    del disk_xml.source

                new_disks.append(disk_xml)

            snap_xml.set_disks(new_disks)
            snapshot_xml_path = snap_xml.xml
            logging.debug("The snapshot xml is: %s" % snap_xml.xmltreefile)

            options += " --xmlfile %s " % snapshot_xml_path

            if vm_state == "shut off":
                vm.destroy(gracefully=False)

            snapshot_result = virsh.snapshot_create(vm_name,
                                                    options,
                                                    debug=True)
            out_err = snapshot_result.stderr.strip()
            if snapshot_result.exit_status:
                if status_error:
                    return
                else:
                    if re.search(
                            "live disk snapshot not supported with this "
                            "QEMU binary", out_err):
                        test.cancel(out_err)

                    if libvirt_version.version_compare(1, 2, 5):
                        # As commit d2e668e in 1.2.5, internal active snapshot
                        # without memory state is rejected. Handle it as SKIP
                        # for now. This could be supportted in future by bug:
                        # https://bugzilla.redhat.com/show_bug.cgi?id=1103063
                        if re.search(
                                "internal snapshot of a running VM" +
                                " must include the memory state", out_err):
                            test.cancel("Check Bug #1083345, %s" % out_err)

                    test.fail("Failed to create snapshot. Error:%s." % out_err)
        else:
            snapshot_result = virsh.snapshot_create(vm_name,
                                                    options,
                                                    debug=True)
            if snapshot_result.exit_status:
                if status_error:
                    return
                else:
                    test.fail("Failed to create snapshot. Error:%s." %
                              snapshot_result.stderr.strip())
            snapshot_name = re.search("\d+",
                                      snapshot_result.stdout.strip()).group(0)

            if snapshot_current:
                snap_xml = libvirt_xml.SnapshotXML()
                new_snap = snap_xml.new_from_snapshot_dumpxml(
                    vm_name, snapshot_name)
                # update an element
                new_snap.creation_time = snapshot_name
                snapshot_xml_path = new_snap.xml
                options += "--redefine %s --current" % snapshot_xml_path
                snapshot_result = virsh.snapshot_create(vm_name,
                                                        options,
                                                        debug=True)
                if snapshot_result.exit_status:
                    test.fail("Failed to create snapshot --current."
                              "Error:%s." % snapshot_result.stderr.strip())

        if status_error:
            if not snapshot_del_test:
                test.fail("Success to create snapshot in negative"
                          " case\nDetail: %s" % snapshot_result)

        # Touch a file in VM.
        if vm.is_dead():
            vm.start()
        session = vm.wait_for_login()

        # Init a unique name for tmp_file.
        tmp_file = tempfile.NamedTemporaryFile(prefix=("snapshot_test_"),
                                               dir="/tmp")
        tmp_file_path = tmp_file.name
        tmp_file.close()

        echo_cmd = "echo SNAPSHOT_DISK_TEST >> %s" % tmp_file_path
        status, output = session.cmd_status_output(echo_cmd)
        logging.debug("The echo output in domain is: '%s'", output)
        if status:
            test.fail("'%s' run failed with '%s'" % (tmp_file_path, output))
        status, output = session.cmd_status_output("cat %s" % tmp_file_path)
        logging.debug("File created with content: '%s'", output)

        session.close()

        # As only internal snapshot revert works now, let's only do revert
        # with internal, and move the all skip external cases back to pass.
        # After external also supported, just move the following code back.
        if snapshot_disk == 'internal':
            # Destroy vm for snapshot revert.
            if not libvirt_version.version_compare(1, 2, 3):
                virsh.destroy(vm_name)
            # Revert snapshot.
            revert_options = ""
            if snapshot_revert_paused:
                revert_options += " --paused"
            revert_result = virsh.snapshot_revert(vm_name,
                                                  snapshot_name,
                                                  revert_options,
                                                  debug=True)
            if revert_result.exit_status:
                # Attempts to revert external snapshots will FAIL with an error
                # "revert to external disk snapshot not supported yet" or "revert
                # to external snapshot not supported yet" since d410e6f. Thus,
                # let's check for that and handle as a SKIP for now. Check bug:
                # https://bugzilla.redhat.com/show_bug.cgi?id=1071264
                if re.search(
                        "revert to external \w* ?snapshot not supported yet",
                        revert_result.stderr):
                    test.cancel(revert_result.stderr.strip())
                else:
                    test.fail("Revert snapshot failed. %s" %
                              revert_result.stderr.strip())

            if vm.is_dead():
                test.fail("Revert snapshot failed.")

            if snapshot_revert_paused:
                if vm.is_paused():
                    vm.resume()
                else:
                    test.fail("Revert command successed, but VM is not "
                              "paused after reverting with --paused"
                              "  option.")
            # login vm.
            session = vm.wait_for_login()
            # Check the result of revert.
            status, output = session.cmd_status_output("cat %s" %
                                                       tmp_file_path)
            logging.debug("After revert cat file output='%s'", output)
            if not status:
                test.fail("Tmp file exists, revert failed.")

            # Close the session.
            session.close()

        # Test delete snapshot without "--metadata", delete external disk
        # snapshot will fail for now.
        # Only do this when snapshot creat succeed which filtered in cfg file.
        if snapshot_del_test:
            if snapshot_name:
                del_result = virsh.snapshot_delete(vm_name,
                                                   snapshot_name,
                                                   debug=True,
                                                   ignore_status=True)
                del_status = del_result.exit_status
                snap_xml_path = snap_cfg_path + "%s.xml" % snapshot_name
                if del_status:
                    if not status_error:
                        test.fail("Failed to delete snapshot.")
                    else:
                        if not os.path.exists(snap_xml_path):
                            test.fail("Snapshot xml file %s missing" %
                                      snap_xml_path)
                else:
                    if status_error:
                        err_msg = "Snapshot delete succeed but expect fail."
                        test.fail(err_msg)
                    else:
                        if os.path.exists(snap_xml_path):
                            test.fail("Snapshot xml file %s still" %
                                      snap_xml_path + " exist")

    finally:
        if vm.is_alive():
            vm.destroy(gracefully=False)
        virsh.detach_disk(vm_name, target="vdf", extra="--persistent")
        if image:
            image.remove()
        if del_status and snapshot_name:
            virsh.snapshot_delete(vm_name, snapshot_name, "--metadata")
        for disk in snapshot_external_disk:
            if os.path.exists(disk):
                os.remove(disk)
        vmxml_backup.sync("--snapshots-metadata")

        libvirtd = utils_libvirtd.Libvirtd()
        if disk_source_protocol == 'gluster':
            utlv.setup_or_cleanup_gluster(False, vol_name, brick_path)
            if multi_gluster_disks:
                brick_path = os.path.join(tmp_dir, "gluster-pool2")
                utlv.setup_or_cleanup_gluster(False, "gluster-vol2",
                                              brick_path)
            libvirtd.restart()

        if snapshot_xml_path:
            if os.path.exists(snapshot_xml_path):
                os.unlink(snapshot_xml_path)
        if pvt:
            try:
                pvt.cleanup_pool(pool_name,
                                 pool_type,
                                 pool_target,
                                 emulated_image,
                                 source_name=vol_name)
            except exceptions.TestFail as detail:
                libvirtd.restart()
                logging.error(str(detail))
コード例 #54
0
def run(test, params, env):
    """
    Test command: virsh reboot.

    Run a reboot command in the target domain.
    1.Prepare test environment.
    2.When the libvirtd == "off", stop the libvirtd service.
    3.Perform virsh reboot operation.
    4.Recover test environment.(libvirts service)
    5.Confirm the test result.
    """

    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)

    # run test case
    libvirtd = params.get("libvirtd", "on")
    vm_ref = params.get("reboot_vm_ref")
    status_error = ("yes" == params.get("status_error"))
    extra = params.get("reboot_extra", "")
    remote_ip = params.get("remote_ip", "REMOTE.EXAMPLE.COM")
    local_ip = params.get("local_ip", "LOCAL.EXAMPLE.COM")
    remote_pwd = params.get("remote_pwd", "password")
    agent = ("yes" == params.get("reboot_agent", "no"))
    mode = params.get("reboot_mode", "")
    pre_domian_status = params.get("reboot_pre_domian_status", "running")
    xml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    try:
        # Add or remove qemu-agent from guest before test
        try:
            vm.prepare_guest_agent(channel=agent, start=agent)
        except virt_vm.VMError as e:
            logging.debug(e)
            # qemu-guest-agent is not available on REHL5
            test.cancel("qemu-guest-agent package is not available")

        if pre_domian_status == "shutoff":
            virsh.destroy(vm_name)
        if libvirtd == "off":
            utils_libvirtd.libvirtd_stop()

        domid = vm.get_id()
        domuuid = vm.get_uuid()
        if vm_ref == "id":
            vm_ref = domid
        elif vm_ref == "name":
            vm_ref = vm_name
        elif vm_ref == "uuid":
            vm_ref = domuuid
        elif vm_ref == "hex_id":
            vm_ref = hex(int(domid))
        elif vm_ref.find("invalid") != -1:
            vm_ref = params.get(vm_ref)
        elif vm_ref == "remote_name":
            if remote_ip.count("EXAMPLE.COM") or local_ip.count("EXAMPLE.COM"):
                test.cancel("remote_ip and/or local_ip parameters"
                            " not changed from default values")
            complete_uri = libvirt_vm.complete_uri(local_ip)
            try:
                session = remote.remote_login("ssh", remote_ip, "22", "root",
                                              remote_pwd, "#")
                session.cmd_output('LANG=C')
                command = "virsh -c %s reboot %s %s" % (complete_uri, vm_name,
                                                        mode)
                status, output = session.cmd_status_output(command,
                                                           internal_timeout=5)
                session.close()
                if not status:
                    # the operation before the end of reboot
                    # may result in data corruption
                    vm.wait_for_login().close()
            except (remote.LoginError, process.CmdError,
                    aexpect.ShellError) as e:
                logging.error("Exception: %s", str(e))
                status = -1
        if vm_ref != "remote_name":
            vm_ref = "%s" % vm_ref
            if extra:
                vm_ref += " %s" % extra
            cmdresult = virsh.reboot(vm_ref,
                                     mode,
                                     ignore_status=True,
                                     debug=True)
            status = cmdresult.exit_status
            if status:
                logging.debug("Error status, cmd error: %s", cmdresult.stderr)
                if not virsh.has_command_help_match('reboot', '\s+--mode\s+'):
                    # old libvirt doesn't support reboot
                    status = -2
            else:
                vm.wait_for_login().close()
        output = virsh.dom_list(ignore_status=True).stdout.strip()

        # recover libvirtd service start
        if libvirtd == "off":
            utils_libvirtd.libvirtd_start()

        # check status_error
        if status_error:
            if not status:
                test.fail("Run successfully with wrong command!")
        else:
            if status or (not re.search(vm_name, output)):
                if status == -2:
                    test.cancel("Reboot command doesn't work on older libvirt "
                                "versions")
                test.fail("Run failed with right command")
    finally:
        xml_backup.sync()
コード例 #55
0
ファイル: virsh_setvcpus.py プロジェクト: Antique/virt-test
def run_virsh_setvcpus(test, params, env):
    """
    Test command: virsh setvcpus.

    The conmand can change the number of virtual CPUs in the guest domain.
    1.Prepare test environment,destroy or suspend a VM.
    2.Perform virsh setvcpus operation.
    3.Recover test environment.
    4.Confirm the test result.
    """

    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    xml_file = params.get("setvcpus_xml_file", "vm.xml")
    virsh.dumpxml(vm_name, extra="--inactive", to_file=xml_file)
    tmp_file = params.get("setvcpus_tmp_file", "tmp.xml")
    pre_vm_state = params.get("setvcpus_pre_vm_state")
    command = params.get("setvcpus_command", "setvcpus")
    options = params.get("setvcpus_options")
    domain = params.get("setvcpus_domain")
    count = params.get("setvcpus_count")
    extra_param = params.get("setvcpus_extra_param")
    count_option = "%s %s" % (count, extra_param)
    status_error = params.get("status_error")

    def get_current_vcpus():
        """
        Get current vcpu number.
        """
        vcpus_set = ""
        virsh.dumpxml(vm_name, extra="", to_file=tmp_file)
        dom = parse(tmp_file)
        root = dom.documentElement
        vcpus_2 = root.getElementsByTagName("vcpu")
        for n in vcpus_2:
            vcpus_set += n.getAttribute("current")
            vcpus_set = int(vcpus_set)
        dom.unlink()
        return vcpus_set

    if vm.is_alive():
        vm.destroy()
    vm_xml = libvirt_xml.VMXML()
    vm_xml.set_vm_vcpus(vm_name, 2)
    vm.start()
    vm.wait_for_login()

    if status_error == "no":
        vcpus_new = len(vm.vcpuinfo())
    domid = vm.get_id()
    domuuid = vm.get_uuid()
    if pre_vm_state == "paused":
        vm.pause()
    elif pre_vm_state == "shut off":
        vm.destroy()

    if domain == "remote_name":
        remote_ssh_addr = params.get("remote_ip", None)
        remote_addr = params.get("local_ip", None)
        remote_password = params.get("remote_password", None)
        host_type = virsh.driver()
        if host_type == "qemu":
            remote_string = "qemu+ssh://%s/system" % remote_addr
        elif host_type == "xen":
            remote_string = "xen+ssh://%s" % remote_addr
        command = "virsh -c %s setvcpus %s 1 --live" % (remote_string, vm_name)
        if virsh.has_command_help_match(command, "--live") is None:
            status_error = "yes"
        session = remote.remote_login(
            "ssh", remote_ssh_addr, "22", "root", remote_password, "#")
        session.cmd_output('LANG=C')
        status, output = session.cmd_status_output(command, internal_timeout=5)
        session.close()
        vcpus_current = len(vm.vcpuinfo())
    else:
        if domain == "name":
            dom_option = vm_name
        elif domain == "id":
            dom_option = domid
            if params.get("setvcpus_hex_id") is not None:
                dom_option = hex(int(domid))
            elif params.get("setvcpus_invalid_id") is not None:
                dom_option = params.get("setvcpus_invalid_id")
        elif domain == "uuid":
            dom_option = domuuid
            if params.get("setvcpus_invalid_uuid") is not None:
                dom_option = params.get("setvcpus_invalid_uuid")
        else:
            dom_option = domain
        option_list = options.split(" ")
        for item in option_list:
            if virsh.has_command_help_match(command, item) is None:
                status_error = "yes"
                break
        status = virsh.setvcpus(
            dom_option, count_option, options, ignore_status=True).exit_status
        if pre_vm_state == "paused":
            virsh.resume(vm_name, ignore_status=True)
        if status_error == "no":
            if status == 0:
                if pre_vm_state == "shut off":
                    if options == "--config":
                        vcpus_set = len(vm.vcpuinfo())
                    elif options == "--current":
                        vcpus_set = get_current_vcpus()
                    elif options == "--maximum --config":
                        vcpus_set = ""
                        dom = parse("/etc/libvirt/qemu/%s.xml" % vm_name)
                        vcpus_set = dom.getElementsByTagName(
                            "vcpu")[0].firstChild.data
                        vcpus_set = int(vcpus_set)
                        dom.unlink()
                else:
                    vcpus_set = len(vm.vcpuinfo())
                if domain == "id":
                    cmd_chk = "cat /etc/libvirt/qemu/%s.xml" % vm_name
                    output1 = commands.getoutput(cmd_chk)
                    logging.info("guest-info:\n%s" % output1)

    virsh.destroy(vm_name)
    virsh.undefine(vm_name)
    virsh.define(xml_file)
    if os.path.exists(xml_file):
        os.remove(xml_file)
    if os.path.exists(tmp_file):
        os.remove(tmp_file)

    # check status_error
    if status_error == "yes":
        if status == 0:
            raise error.TestFail("Run successfully with wrong command!")
    else:
        if status != 0:
            raise error.TestFail("Run failed with right command")
        else:
            if options == "--maximum --config":
                if vcpus_set != 4:
                    raise error.TestFail("Run failed with right command1")
            elif domain == "id":
                if options == "--config":
                    if vcpus_set != vcpus_new or not re.search('<vcpu current=\'1\'>%s</vcpu>' % vcpus_new, output1):
                        raise error.TestFail("Run failed with right command2")
                elif options == "--config --live":
                    if vcpus_set != 1 or not re.search('<vcpu current=\'1\'>%s</vcpu>' % vcpus_new, output1):
                        raise error.TestFail("Run failed with right command3")
                else:
                    if vcpus_set != 1 or re.search('<vcpu current=\'1\'>%s</vcpu>' % vcpus_new, output1):
                        raise error.TestFail("Run failed with right command4")
            else:
                if vcpus_set != 1:
                    raise error.TestFail("Run failed with right command5")
コード例 #56
0
def run(test, params, env):
    """
    Test the virtio bus automated assignment for passthrough devices

    1. prepare a passthrough device xml without bus defined
    2. start the guest and check if the device can be attached
    3. check if the new device is properly listed in guest xml
    """
    if not libvirt_version.version_compare(6, 3, 0):
        test.cancel('The feature of automatic assignment of virtio bus for '
                    'passthrough devices is supported since version 6.3.0')
    vm_name = params.get("main_vm", "avocado-vt-vm1")

    # Create a new passthrough device without bus assigned
    input_dev = Input(type_name="passthrough")
    input_dev.source_evdev = "/dev/input/event1"

    # Check whether host has passthrough device
    if process.run("ls /dev/input/event1", ignore_status=True).exit_status:
        test.cancel("Host doesn't have passthrough device")

    xml = input_dev.get_xml()
    logging.debug('Attached device xml:\n{}'.format(input_dev.xmltreefile))
    logging.debug('New Passthrough device XML is available at:{}'.format(xml))
    # Start the VM
    vm = env.get_vm(vm_name)
    vm_xml = VMXML.new_from_dumpxml(vm_name)
    vm_xml_backup = vm_xml.copy()
    if vm.is_alive():
        vm.destroy()
    vm_xml.remove_all_device_by_type('input')

    try:
        vm.start()
        vm.wait_for_login().close()
        # Attach new device and check for result
        cmd_result = virsh.attach_device(vm_name,
                                         input_dev.get_xml(),
                                         debug=True)
        if cmd_result.exit_status != 0:
            test.error(cmd_result.stderr_text)
        # Get the VM XML and check for a new device
        vm_xml = VMXML.new_from_dumpxml(vm_name)
        device_list = vm_xml.get_devices()
        for device in device_list:
            if device['device_tag'] == 'input':
                device_xml = device['xml']
                # Create a new instance of Input device and fill with input
                # device found
                input_device = Input(type_name="passthrough")
                input_device.set_xml(device_xml)
                if input_device.type_name == "passthrough":
                    with open(device_xml, 'r') as device_xml_file:
                        for line in device_xml_file:
                            logging.debug(line.rstrip())
                    if not input_device.input_bus == "virtio":
                        test.fail(
                            "The newly attached passthrough device has no"
                            " added virtio as a bus by default.")
                    else:
                        logging.debug(
                            "Newly added passthrough device has a "
                            "virtio automatically assigned as a bus.")
    finally:
        if vm.is_alive():
            virsh.destroy(vm_name)
        vm_xml_backup.sync()
コード例 #57
0
ファイル: virsh_event.py プロジェクト: OnePaaS/tp-libvirt
 def trigger_events(events_list=[]):
     """
     Trigger various events in events_list
     """
     expected_events_list = []
     tmpdir = data_dir.get_tmp_dir()
     save_path = os.path.join(tmpdir, "vm_event.save")
     new_disk = os.path.join(tmpdir, "new_disk.img")
     try:
         for event in events_list:
             if event in ["start", "restore"]:
                 if vm.is_alive():
                     vm.destroy()
             else:
                 if not vm.is_alive():
                     vm.start()
                     vm.wait_for_login().close()
             if event == "start":
                 virsh.start(vm_name, **virsh_dargs)
                 expected_events_list.append("'lifecycle' for %s:" " Started Booted")
                 vm.wait_for_login().close()
             elif event == "save":
                 virsh.save(vm_name, save_path, **virsh_dargs)
                 expected_events_list.append("'lifecycle' for %s:" " Stopped Saved")
             elif event == "restore":
                 if not os.path.exists(save_path):
                     logging.error("%s not exist", save_path)
                 else:
                     virsh.restore(save_path, **virsh_dargs)
                     expected_events_list.append("'lifecycle' for %s:" " Started Restored")
             elif event == "destroy":
                 virsh.destroy(vm_name, **virsh_dargs)
                 expected_events_list.append("'lifecycle' for %s:" " Stopped Destroyed")
             elif event == "reset":
                 virsh.reset(vm_name, **virsh_dargs)
                 expected_events_list.append("'reboot' for %s")
             elif event == "vcpupin":
                 virsh.vcpupin(vm_name, "0", "0", **virsh_dargs)
                 expected_events_list.append("'tunable' for %s:" "\n\tcputune.vcpupin0: 0")
             elif event == "emulatorpin":
                 virsh.emulatorpin(vm_name, "0", **virsh_dargs)
                 expected_events_list.append("'tunable' for %s:" "\n\tcputune.emulatorpin: 0")
             elif event == "setmem":
                 virsh.setmem(vm_name, 1048576, **virsh_dargs)
                 expected_events_list.append("'balloon-change' for %s:")
             elif event == "detach-disk":
                 if not os.path.exists(new_disk):
                     open(new_disk, "a").close()
                 # Attach disk firstly, this event will not be catched
                 virsh.attach_disk(vm_name, new_disk, "vdb", **virsh_dargs)
                 virsh.detach_disk(vm_name, "vdb", **virsh_dargs)
                 expected_events_list.append("'device-removed' for %s:" " virtio-disk1")
             else:
                 raise error.TestError("Unsupported event: %s" % event)
             # Event may not received immediately
             time.sleep(3)
     finally:
         if os.path.exists(save_path):
             os.unlink(save_path)
         if os.path.exists(new_disk):
             os.unlink(new_disk)
         return expected_events_list
コード例 #58
0
def run(test, params, env):
    """
    Test command: virsh domstate.

    1.Prepare test environment.
    2.When the libvirtd == "off", stop the libvirtd service.
    3.Perform virsh domstate operation.
    4.Recover test environment.
    5.Confirm the test result.
    """
    vm_name = params.get("main_vm", "avocado-vt-vm1")
    vm = env.get_vm(vm_name)

    libvirtd_state = params.get("libvirtd", "on")
    vm_ref = params.get("domstate_vm_ref")
    status_error = (params.get("status_error", "no") == "yes")
    extra = params.get("domstate_extra", "")
    vm_action = params.get("domstate_vm_action", "")
    vm_oncrash_action = params.get("domstate_vm_oncrash")
    reset_action = "yes" == params.get("reset_action", "no")
    dump_option = params.get("dump_option", "")
    start_action = params.get("start_action", "normal")
    kill_action = params.get("kill_action", "normal")
    check_libvirtd_log = params.get("check_libvirtd_log", "no")
    err_msg = params.get("err_msg", "")

    domid = vm.get_id()
    domuuid = vm.get_uuid()

    if vm_ref == "id":
        vm_ref = domid
    elif vm_ref == "hex_id":
        vm_ref = hex(int(domid))
    elif vm_ref.find("invalid") != -1:
        vm_ref = params.get(vm_ref)
    elif vm_ref == "name":
        vm_ref = vm_name
    elif vm_ref == "uuid":
        vm_ref = domuuid

    # Back up xml file.
    vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    backup_xml = vmxml.copy()

    # Back up qemu.conf
    qemu_conf = utils_config.LibvirtQemuConfig()
    libvirtd = utils_libvirtd.Libvirtd()

    # Config libvirtd log
    if check_libvirtd_log == "yes":
        libvirtd_conf = utils_config.LibvirtdConfig()
        libvirtd_log_file = os.path.join(test.tmpdir, "libvirtd.log")
        libvirtd_conf["log_level"] = '1'
        libvirtd_conf["log_filters"] = ('"1:json 1:libvirt 1:qemu 1:monitor '
                                        '3:remote 4:event"')
        libvirtd_conf["log_outputs"] = '"1:file:%s"' % libvirtd_log_file
        logging.debug("the libvirtd config file content is:\n %s" %
                      libvirtd_conf)
        libvirtd.restart()

    # Get image file
    image_source = vm.get_first_disk_devices()['source']
    logging.debug("image source: %s" % image_source)
    new_image_source = image_source + '.rename'

    dump_path = os.path.join(test.tmpdir, "dump/")
    logging.debug("dump_path: %s", dump_path)
    try:
        os.mkdir(dump_path)
    except OSError:
        # If the path already exists then pass
        pass
    dump_file = ""
    try:
        # Let's have guest memory less so that dumping core takes
        # time which doesn't timeout the testcase
        if vm_oncrash_action in ['coredump-destroy', 'coredump-restart']:
            memory_value = int(params.get("memory_value", "2097152"))
            memory_unit = params.get("memory_unit", "KiB")
            vmxml.set_memory(memory_value)
            vmxml.set_memory_unit(memory_unit)
            logging.debug(vmxml)
            vmxml.sync()

        if vm_action == "crash":
            if vm.is_alive():
                vm.destroy(gracefully=False)
            vmxml.on_crash = vm_oncrash_action
            if not vmxml.xmltreefile.find('devices').findall('panic'):
                # Add <panic> device to domain
                panic_dev = Panic()
                if "ppc" not in platform.machine():
                    panic_dev.addr_type = "isa"
                    panic_dev.addr_iobase = "0x505"
                vmxml.add_device(panic_dev)
            vmxml.sync()
            # Config auto_dump_path in qemu.conf
            qemu_conf.auto_dump_path = dump_path
            libvirtd.restart()
            if vm_oncrash_action in ['coredump-destroy', 'coredump-restart']:
                dump_file = dump_path + "*" + vm_name[:20] + "-*"
            # Start VM and check the panic device
            virsh.start(vm_name, ignore_status=False)
            vmxml_new = vm_xml.VMXML.new_from_dumpxml(vm_name)
            # Skip this test if no panic device find
            if not vmxml_new.xmltreefile.find('devices').findall('panic'):
                test.cancel("No 'panic' device in the guest. Maybe your "
                            "libvirt version doesn't support it.")
        try:
            if vm_action == "suspend":
                virsh.suspend(vm_name, ignore_status=False)
            elif vm_action == "resume":
                virsh.suspend(vm_name, ignore_status=False)
                virsh.resume(vm_name, ignore_status=False)
            elif vm_action == "destroy":
                virsh.destroy(vm_name, ignore_status=False)
            elif vm_action == "start":
                virsh.destroy(vm_name, ignore_status=False)
                if start_action == "rename":
                    # rename the guest image file to make guest fail to start
                    os.rename(image_source, new_image_source)
                    virsh.start(vm_name, ignore_status=True)
                else:
                    virsh.start(vm_name, ignore_status=False)
            elif vm_action == "kill":
                if kill_action == "stop_libvirtd":
                    libvirtd.stop()
                    utils_misc.kill_process_by_pattern(vm_name)
                    libvirtd.restart()
                elif kill_action == "reboot_vm":
                    virsh.reboot(vm_name, ignore_status=False)
                    utils_misc.kill_process_tree(vm.get_pid(), signal.SIGKILL)
                else:
                    utils_misc.kill_process_tree(vm.get_pid(), signal.SIGKILL)
            elif vm_action == "crash":
                session = vm.wait_for_login()
                session.cmd("service kdump stop", ignore_all_errors=True)
                # Enable sysRq
                session.cmd("echo 1 > /proc/sys/kernel/sysrq")
                # Send key ALT-SysRq-c to crash VM, and command will not
                # return as vm crashed, so fail early for 'destroy' and
                # 'preserve' action. For 'restart', 'coredump-restart'
                # and 'coredump-destroy' actions, they all need more time
                # to dump core file or restart OS, so using the default
                # session command timeout(60s)
                try:
                    if vm_oncrash_action in ['destroy', 'preserve']:
                        timeout = 3
                    else:
                        timeout = 60
                    session.cmd("echo c > /proc/sysrq-trigger",
                                timeout=timeout)
                except (ShellTimeoutError, ShellProcessTerminatedError):
                    pass
                session.close()
            elif vm_action == "dump":
                dump_file = dump_path + "*" + vm_name + "-*"
                virsh.dump(vm_name,
                           dump_file,
                           dump_option,
                           ignore_status=False)
        except process.CmdError as detail:
            test.error("Guest prepare action error: %s" % detail)

        if libvirtd_state == "off":
            libvirtd.stop()

        if vm_ref == "remote":
            remote_ip = params.get("remote_ip", "REMOTE.EXAMPLE.COM")
            local_ip = params.get("local_ip", "LOCAL.EXAMPLE.COM")
            remote_pwd = params.get("remote_pwd", None)
            if remote_ip.count("EXAMPLE.COM") or local_ip.count("EXAMPLE.COM"):
                test.cancel("Test 'remote' parameters not setup")
            status = 0
            try:
                remote_uri = libvirt_vm.complete_uri(local_ip)
                session = remote.remote_login("ssh", remote_ip, "22", "root",
                                              remote_pwd, "#")
                session.cmd_output('LANG=C')
                command = "virsh -c %s domstate %s" % (remote_uri, vm_name)
                status, output = session.cmd_status_output(command,
                                                           internal_timeout=5)
                session.close()
            except process.CmdError:
                status = 1
        else:
            result = virsh.domstate(vm_ref,
                                    extra,
                                    ignore_status=True,
                                    debug=True)
            status = result.exit_status
            output = result.stdout.strip()

        # check status_error
        if status_error:
            if not status:
                test.fail("Run successfully with wrong command!")
        else:
            if status or not output:
                test.fail("Run failed with right command")
            if extra.count("reason"):
                if vm_action == "suspend":
                    # If not, will cost long time to destroy vm
                    virsh.destroy(vm_name)
                    if not output.count("user"):
                        test.fail(err_msg % vm_action)
                elif vm_action == "resume":
                    if not output.count("unpaused"):
                        test.fail(err_msg % vm_action)
                elif vm_action == "destroy":
                    if not output.count("destroyed"):
                        test.fail(err_msg % vm_action)
                elif vm_action == "start":
                    if start_action == "rename":
                        if not output.count("shut off (failed)"):
                            test.fail(err_msg % vm_action)
                    else:
                        if not output.count("booted"):
                            test.fail(err_msg % vm_action)
                elif vm_action == "kill":
                    if not output.count("crashed"):
                        test.fail(err_msg % vm_action)
                elif vm_action == "crash":
                    if not check_crash_state(output, vm_oncrash_action,
                                             vm_name, dump_file):
                        test.fail(err_msg % vm_action)
                    # VM will be in preserved state, perform virsh reset
                    # and check VM reboots and domstate reflects running
                    # state from crashed state as bug is observed here
                    if vm_oncrash_action == "preserve" and reset_action:
                        virsh_dargs = {'debug': True, 'ignore_status': True}
                        ret = virsh.reset(vm_name, **virsh_dargs)
                        libvirt.check_exit_status(ret)
                        ret = virsh.domstate(vm_name, extra,
                                             **virsh_dargs).stdout.strip()
                        if "paused (crashed)" not in ret:
                            test.fail("vm fails to change state from crashed"
                                      " to paused after virsh reset")
                        # it will be in paused (crashed) state after reset
                        # and resume is required for the vm to reboot
                        ret = virsh.resume(vm_name, **virsh_dargs)
                        libvirt.check_exit_status(ret)
                        vm.wait_for_login()
                        cmd_output = virsh.domstate(vm_name,
                                                    '--reason').stdout.strip()
                        if "running" not in cmd_output:
                            test.fail("guest state failed to get updated")
                    if vm_oncrash_action in [
                            'coredump-destroy', 'coredump-restart'
                    ]:
                        if not find_dump_file:
                            test.fail("Core dump file is not created in dump "
                                      "path: %s" % dump_path)
                    # For cover bug 1178652
                    if (vm_oncrash_action == "rename-restart"
                            and check_libvirtd_log == "yes"):
                        libvirtd.restart()
                        if not os.path.exists(libvirtd_log_file):
                            test.fail("Expected VM log file: %s not exists" %
                                      libvirtd_log_file)
                        cmd = ("grep -nr '%s' %s" %
                               (err_msg, libvirtd_log_file))
                        if not process.run(cmd, ignore_status=True,
                                           shell=True).exit_status:
                            test.fail(
                                "Find error message %s from log file: %s." %
                                (err_msg, libvirtd_log_file))
                elif vm_action == "dump":
                    if dump_option == "--live":
                        if not output.count("running (unpaused)"):
                            test.fail(err_msg % vm_action)
                    elif dump_option == "--crash":
                        if not output.count("shut off (crashed)"):
                            test.fail(err_msg % vm_action)
            if vm_ref == "remote":
                if not (re.search("running", output) or re.search(
                        "blocked", output) or re.search("idle", output)):
                    test.fail("Run failed with right command")
    finally:
        qemu_conf.restore()
        if check_libvirtd_log == "yes":
            libvirtd_conf.restore()
            if os.path.exists(libvirtd_log_file):
                os.remove(libvirtd_log_file)
        libvirtd.restart()
        if vm_action == "start" and start_action == "rename":
            os.rename(new_image_source, image_source)
        if vm.is_alive():
            vm.destroy(gracefully=False)
        backup_xml.sync()
        if os.path.exists(dump_path):
            shutil.rmtree(dump_path)
コード例 #59
0
ファイル: iface_hotplug.py プロジェクト: xiaodwan/tp-libvirt
def run(test, params, env):
    """
    Test interafce xml options.

    1.Prepare test environment,destroy or suspend a VM.
    2.Edit xml and start the domain.
    3.Perform test operation.
    4.Recover test environment.
    5.Confirm the test result.
    """
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    if vm.is_alive():
        vm.wait_for_login()

    def create_iface_xml(mac=None):
        """
        Create interface xml file
        """
        iface = Interface(type_name=iface_type)
        iface.source = iface_source
        iface.model = iface_model if iface_model else "virtio"
        if iface_target:
            iface.target = {'dev': iface_target}
        if mac:
            iface.mac_address = mac
        if iface_rom:
            iface.rom = eval(iface_rom)
        logging.debug("Create new interface xml: %s", iface)
        return iface

    def get_all_mac_in_vm():
        """
        get the mac address list of all the interfaces from a running vm os

        return: a list of the mac addresses
        """
        mac_list = []
        interface_list = vm.get_interfaces()
        for iface_ in interface_list:
            mac_ = vm.get_interface_mac(iface_)
            mac_list.append(mac_)
        return mac_list

    # Interface specific attributes.
    iface_num = params.get("iface_num", '1')
    iface_type = params.get("iface_type", "network")
    iface_source = eval(params.get("iface_source",
                                   "{'network':'default'}"))
    iface_model = params.get("iface_model")
    iface_target = params.get("iface_target")
    iface_mac = params.get("iface_mac")
    iface_rom = params.get("iface_rom")
    attach_device = "yes" == params.get("attach_device", "no")
    attach_iface = "yes" == params.get("attach_iface", "no")
    attach_option = params.get("attach_option", "")
    detach_device = "yes" == params.get("detach_device")
    stress_test = "yes" == params.get("stress_test")
    restart_libvirtd = "yes" == params.get("restart_libvirtd",
                                           "no")
    start_vm = "yes" == params.get("start_vm", "yes")
    options_test = "yes" == params.get("options_test", "no")
    username = params.get("username")
    password = params.get("password")
    poll_timeout = int(params.get("poll_timeout", 10))
    err_msgs1 = params.get("err_msgs1")
    err_msgs2 = params.get("err_msgs2")
    err_msg_rom = params.get("err_msg_rom")
    del_pci = "yes" == params.get("add_pci", "no")
    del_mac = "yes" == params.get("del_mac", "no")
    set_pci = "yes" == params.get("set_pci", "no")
    set_mac = "yes" == params.get("set_mac", "no")
    status_error = "yes" == params.get("status_error", "no")
    pci_addr = params.get("pci")
    check_mac = "yes" == params.get("check_mac", "no")
    vnet_mac = params.get("vnet_mac", None)

    # stree_test require detach operation
    stress_test_detach_device = False
    stress_test_detach_interface = False
    if stress_test:
        if attach_device:
            stress_test_detach_device = True
        if attach_iface:
            stress_test_detach_interface = True

    # The following detach-device step also using attach option
    detach_option = attach_option

    # Back up xml file.
    vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    #iface_mac = vm_xml.VMXML.get_first_mac_by_name(vm_name)
    libvirtd = utils_libvirtd.Libvirtd()

    # Check virsh command option
    check_cmds = []
    sep_options = attach_option.split()
    logging.debug("sep_options: %s" % sep_options)
    for sep_option in sep_options:
        if attach_device and sep_option:
            check_cmds.append(('attach-device', sep_option))
        if attach_iface and sep_option:
            check_cmds.append(('attach-interface', sep_option))
        if (detach_device or stress_test_detach_device) and sep_option:
            check_cmds.append(('detach-device', sep_option))
        if stress_test_detach_interface and sep_option:
            check_cmds.append(('detach-device', sep_option))

    for cmd, option in check_cmds:
        libvirt.virsh_cmd_has_option(cmd, option)

    try:
        try:
            # Attach an interface when vm is running
            iface_list = []
            err_msgs = ("No more available PCI slots",
                        "No more available PCI addresses")
            if not start_vm:
                virsh.destroy(vm_name)
            for i in range(int(iface_num)):
                if attach_device:
                    logging.info("Try to attach device loop %s" % i)
                    if iface_mac:
                        mac = iface_mac
                        iface_xml_obj = create_iface_xml(mac)
                    elif check_mac:
                        iface_xml_obj = create_iface_xml()
                    else:
                        mac = utils_net.generate_mac_address_simple()
                        iface_xml_obj = create_iface_xml(mac)
                    iface_xml_obj.xmltreefile.write()
                    if check_mac:
                        mac_bef = get_all_mac_in_vm()
                    ret = virsh.attach_device(vm_name, iface_xml_obj.xml,
                                              flagstr=attach_option,
                                              ignore_status=True,
                                              debug=True)
                elif attach_iface:
                    logging.info("Try to attach interface loop %s" % i)
                    if iface_mac:
                        mac = iface_mac
                    else:
                        mac = utils_net.generate_mac_address_simple()
                    options = ("%s %s --model %s --mac %s %s" %
                               (iface_type, iface_source['network'],
                                iface_model, mac, attach_option))
                    ret = virsh.attach_interface(vm_name, options,
                                                 ignore_status=True)
                if ret.exit_status:
                    if any([msg in ret.stderr for msg in err_msgs]):
                        logging.debug("No more pci slots, can't attach more devices")
                        break
                    elif ret.stderr.count("doesn't support option %s" % attach_option):
                        test.cancel(ret.stderr)
                    elif err_msgs1 in ret.stderr:
                        logging.debug("option %s is not supported when domain running is %s" % (attach_option, vm.is_alive()))
                        if start_vm or ("--live" not in sep_options and attach_option):
                            test.fail("return not supported, but it is unexpected")
                    elif err_msgs2 in ret.stderr:
                        logging.debug("options %s are mutually exclusive" % attach_option)
                        if not ("--current" in sep_options and len(sep_options) > 1):
                            test.fail("return mutualy exclusive, but it is unexpected")
                    elif err_msg_rom and err_msg_rom in ret.stderr:
                        logging.debug("Attach failed with expect err msg: %s" % err_msg_rom)
                    else:
                        test.fail("Failed to attach-interface: %s" % ret.stderr.strip())
                elif stress_test:
                    if attach_device:
                        # Detach the device immediately for stress test
                        ret = virsh.detach_device(vm_name, iface_xml_obj.xml,
                                                  flagstr=detach_option,
                                                  ignore_status=True)
                    elif attach_iface:
                        # Detach the device immediately for stress test
                        options = ("--type %s --mac %s %s" %
                                   (iface_type, mac, detach_option))
                        ret = virsh.detach_interface(vm_name, options,
                                                     ignore_status=True)
                    libvirt.check_exit_status(ret)
                else:
                    if attach_device:
                        if check_mac:
                            mac_aft = get_all_mac_in_vm()
                            add_mac = list(set(mac_aft).difference(set(mac_bef)))
                            try:
                                mac = add_mac[0]
                                logging.debug("The mac address of the attached interface is %s" % mac)
                            except IndexError:
                                test.fail("Can not find the new added interface in the guest os!")
                        iface_list.append({'mac': mac,
                                           'iface_xml': iface_xml_obj})
                    elif attach_iface:
                        iface_list.append({'mac': mac})
            # Restart libvirtd service
            if restart_libvirtd:
                libvirtd.restart()
                # After restart libvirtd, the old console was invalidated,
                # so we need create a new serial console
                vm.cleanup_serial_console()
                vm.create_serial_console()
            # in options test, check if the interface is attached
            # in current state when attach return true

            def check_iface_exist():
                try:
                    session = vm.wait_for_serial_login(username=username,
                                                       password=password)
                    if utils_net.get_linux_ifname(session, iface['mac']):
                        return True
                    else:
                        logging.debug("can not find interface in vm")
                except Exception:
                    return False
            if options_test:
                for iface in iface_list:
                    if 'mac' in iface:
                        # Check interface in dumpxml output
                        if_attr = vm_xml.VMXML.get_iface_by_mac(vm_name,
                                                                iface['mac'])
                        if vm.is_alive() and attach_option == "--config":
                            if if_attr:
                                test.fail("interface should not exists "
                                          "in current live vm while "
                                          "attached by --config")
                        else:
                            if if_attr:
                                logging.debug("interface %s found current "
                                              "state in xml" % if_attr['mac'])
                            else:
                                test.fail("no interface found in "
                                          "current state in xml")

                        if if_attr:
                            if if_attr['type'] != iface_type or \
                                    if_attr['source'] != \
                                    iface_source['network']:
                                test.fail("Interface attribute doesn't "
                                          "match attachment options")
                        # check interface in vm only when vm is active
                        if vm.is_alive():
                            logging.debug("check interface in current state "
                                          "in vm")

                            if not utils_misc.wait_for(check_iface_exist, timeout=20):
                                if not attach_option == "--config":
                                    test.fail("Can't see interface "
                                              "in current state in vm")
                                else:
                                    logging.debug("find interface in "
                                                  "current state in vm")
                            else:
                                logging.debug("find interface in "
                                              "current state in vm")
                            # in options test, if the attach is performed
                            # when the vm is running
                            # need to destroy and start to check again
                            vm.destroy()

            # Start the domain if needed
            if vm.is_dead():
                vm.start()
            session = vm.wait_for_serial_login(username=username,
                                               password=password)

            # check if interface is attached
            for iface in iface_list:
                if 'mac' in iface:
                    logging.debug("check interface in xml")
                    # Check interface in dumpxml output
                    if_attr = vm_xml.VMXML.get_iface_by_mac(vm_name,
                                                            iface['mac'])
                    logging.debug(if_attr)
                    if if_attr:
                        logging.debug("interface {} is found in xml".
                                      format(if_attr['mac']))
                        if (if_attr['type'] != iface_type or
                                if_attr['source'] != iface_source['network']):
                            test.fail("Interface attribute doesn't "
                                      "match attachment options")
                        if options_test and start_vm and attach_option \
                                in ("--current", "--live", ""):
                            test.fail("interface should not exists when "
                                      "restart vm in options_test")
                    else:
                        logging.debug("no interface found in xml")
                        if options_test and start_vm and attach_option in \
                                ("--current", "--live", ""):
                            logging.debug("interface not exists next state "
                                          "in xml with %s" % attach_option)
                        else:
                            test.fail("Can't see interface in dumpxml")

                    # Check interface on guest
                    if not utils_misc.wait_for(check_iface_exist, timeout=20):
                        logging.debug("can't see interface next state in vm")
                        if start_vm and attach_option in \
                                ("--current", "--live", ""):
                            logging.debug("it is expected")
                        else:
                            test.fail("should find interface "
                                      "but no seen in next state in vm")
                    if vnet_mac:
                        # get the name of the backend tap device
                        iface_params = vm_xml.VMXML.get_iface_by_mac(vm_name, mac)
                        target_name = iface_params['target']['dev']
                        # check the tap device mac on host
                        tap_info = process.run("ip l show %s" % target_name, shell=True, ignore_status=True).stdout_text
                        logging.debug("vnet_mac should be %s" % vnet_mac)
                        logging.debug("check on host for the details of tap device %s: %s" % (target_name, tap_info))
                        if vnet_mac not in tap_info:
                            test.fail("The mac address of the tap device do not match!")
            # Detach hot/cold-plugged interface at last
            if detach_device:
                logging.debug("detach interface here:")
                if attach_device:
                    vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
                    iface_xml_ls = vmxml.get_devices("interface")
                    iface_xml_ls_det = iface_xml_ls[1:]
                    for iface_xml_det in iface_xml_ls_det:
                        if del_mac:
                            iface_xml_det.del_mac_address()
                        if del_pci:
                            iface_xml_det.del_address()
                        if set_mac:
                            mac = utils_net.generate_mac_address_simple()
                            iface_xml_det.set_mac_address(mac)
                        if set_pci:
                            pci_dict = ast.literal_eval(pci_addr)
                            addr = iface_xml_det.new_iface_address(**{"attrs": pci_dict})
                            iface_xml_det.set_address(addr)
                        ori_pid_libvirtd = process.getoutput("pidof libvirtd")
                        ret = virsh.detach_device(vm_name,
                                                  iface_xml_det.xml,
                                                  flagstr="",
                                                  ignore_status=True)
                        libvirt.check_exit_status(ret, status_error)
                        aft_pid_libvirtd = process.getoutput("pidof libvirtd")
                        if not utils_libvirtd.Libvirtd.is_running or ori_pid_libvirtd != aft_pid_libvirtd:
                            test.fail("Libvirtd crash after detach non-exists interface")
                else:
                    for iface in iface_list:
                        options = ("%s --mac %s" %
                                   (iface_type, iface['mac']))
                        ret = virsh.detach_interface(vm_name, options,
                                                     ignore_status=True)
                        libvirt.check_exit_status(ret)

                # Check if interface was detached
                if not status_error:
                    for iface in iface_list:
                        if 'mac' in iface:
                            polltime = time.time() + poll_timeout
                            while True:
                                # Check interface in dumpxml output
                                if not vm_xml.VMXML.get_iface_by_mac(vm_name,
                                                                     iface['mac']):
                                    break
                                else:
                                    time.sleep(2)
                                    if time.time() > polltime:
                                        test.fail("Interface still "
                                                  "exists after detachment")
            session.close()
        except virt_vm.VMStartError as e:
            logging.info(str(e))
            test.fail('VM failed to start:\n%s' % e)

    finally:
        # Recover VM.
        logging.info("Restoring vm...")
        if vm.is_alive():
            vm.destroy(gracefully=False)
        vmxml_backup.sync()
コード例 #60
0
def run(test, params, env):
    """
    Test the sound virtual devices
    1. prepare a guest with different sound devices
    2. check whether the guest can be started
    3. check the xml and qemu cmd line
    """
    # Sound element supported since 0.4.3.
    if not libvirt_version.version_compare(0, 4, 3):
        test.cancel("Sound device is not supported " "on current version.")
    # Codec sub-element supported since 0.9.13
    codec_type = params.get("codec_type", None)
    if codec_type and not libvirt_version.version_compare(0, 9, 13):
        test.cancel("codec sub-element is not supported "
                    "on current version.")

    def check_dumpxml():
        """
        Check whether the added devices are shown in the guest xml
        """
        pattern = "<sound model=\"%s\">" % sound_model
        # Check sound model
        xml_after_adding_device = VMXML.new_from_dumpxml(vm_name)
        if pattern not in str(xml_after_adding_device):
            test.fail("Can not find the %s sound device xml "
                      "in the guest xml file." % sound_model)
        # Check codec type
        if codec_type:
            pattern = "<codec type=\"%s\" />" % codec_type
            if pattern not in str(xml_after_adding_device):
                test.fail("Can not find the %s codec xml for sound dev "
                          "in the guest xml file." % codec_type)

    def check_qemu_cmd_line():
        """
        Check whether the added devices are shown in the qemu cmd line
        """
        if not vm.get_pid():
            test.fail('VM pid file missing.')
        with open('/proc/%s/cmdline' % vm.get_pid()) as cmdline_file:
            cmdline = cmdline_file.read()
        # Check sound model
        if sound_model == "ac97":
            pattern = r"-device.*AC97"
        elif sound_model == "ich6":
            pattern = r"-device.*intel-hda"
        else:
            pattern = r"-device.*ich9-intel-hda"
        if not re.search(pattern, cmdline):
            test.fail("Can not find the %s sound device "
                      "in qemu cmd line." % sound_model)
        # Check codec type
        if sound_model in ["ich6", "ich9"]:
            if codec_type == "micro":
                pattern = r"-device.*hda-micro"
            else:
                # Duplex is default in qemu cli even codec not set
                # But before 0.9.13, no codec_type so no default
                if libvirt_version.version_compare(0, 9, 13):
                    pattern = r"-device.*hda-duplex"
            if not re.search(pattern, cmdline):
                test.fail("Can not find the %s codec for sound dev "
                          "in qemu cmd line." % codec_type)

    vm_name = params.get("main_vm", "avocado-vt-vm1")
    vm = env.get_vm(vm_name)

    status_error = params.get("status_error", "no") == "yes"
    sound_model = params.get("sound_model")

    # AC97 sound model supported since 0.6.0
    if sound_model == "ac97":
        if not libvirt_version.version_compare(0, 6, 0):
            test.cancel("ac97 sound model is not supported "
                        "on current version.")
    # Ich6 sound model supported since 0.8.8
    if sound_model == "ich6":
        if not libvirt_version.version_compare(0, 8, 8):
            test.cancel("ich6 sound model is not supported "
                        "on current version.")
    # Ich9 sound model supported since 1.1.3
    if sound_model == "ich9":
        if not libvirt_version.version_compare(1, 1, 3):
            test.cancel("ich9 sound model is not supported "
                        "on current version.")

    vm_xml = VMXML.new_from_dumpxml(vm_name)
    vm_xml_backup = vm_xml.copy()
    logging.debug("vm xml is %s", vm_xml_backup)

    if vm.is_alive():
        vm.destroy()

    try:
        vm_xml.remove_all_device_by_type('sound')
        sound_dev = Sound()
        sound_dev.model_type = sound_model
        if codec_type:
            sound_dev.codec_type = codec_type
        vm_xml.add_device(sound_dev)
        vm_xml.sync()
        virsh.start(vm_name, ignore_status=False)
        check_dumpxml()
        check_qemu_cmd_line()
    finally:
        if vm.is_alive():
            virsh.destroy(vm_name)
        vm_xml_backup.sync()