コード例 #1
0
def run_restart_save_restore(params, libvirtd, vm):
    """
    Save and restore a domain after restart daemon.
    """
    libvirtd.restart()
    save_path = os.path.join(data_dir.get_tmp_dir(), 'tmp.save')
    virsh.save(vm.name, save_path)
    virsh.restore(save_path)
コード例 #2
0
 def operations():
     """
     Do save | domstats | blkdeviotune operations
     """
     if self.operation == "save":
         virsh.save(self.vm_name, self.save_file, debug=True, timeout=self.operation_timeout)
     if self.operation == "domstats":
         virsh.domstats(self.vm_name, opertions=self.operation_option, debug=True, timeout=self.operation_timeout)
     if self.operation == "blkdeviotune":
         virsh.blkdeviotune(self.vm_name, self.disk_tgt, debug=True, timeout=self.operation_timeout)
コード例 #3
0
def run_job_acquire(params, libvirtd, vm):
    """
    Save domain after queried block info
    """
    vm.start()
    res = virsh.qemu_monitor_command(vm.name, 'info block', '--hmp')
    logging.debug(res)
    save_path = os.path.join(data_dir.get_tmp_dir(), 'tmp.save')
    virsh.save(vm.name, save_path)
    vm.wait_for_shutdown()
コード例 #4
0
def run_job_acquire(params, libvirtd, vm):
    """
    Save domain after queried block info
    """
    vm.start()
    res = virsh.qemu_monitor_command(vm.name, 'info block', '--hmp')
    logging.debug(res)
    save_path = os.path.join(data_dir.get_tmp_dir(), 'tmp.save')
    virsh.save(vm.name, save_path)
    vm.wait_for_shutdown()
コード例 #5
0
    def test_save_restore_hostdev_device_with_teaming():
        logging.info("Save/restore VM.")
        save_file = os.path.join(data_dir.get_tmp_dir(), "save_file")
        virsh.save(vm_name,
                   save_file,
                   debug=True,
                   ignore_status=False,
                   timeout=10)
        if not libvirt.check_vm_state(vm_name, "shut off"):
            test.fail("The guest should be down after executing 'virsh save'.")
        virsh.restore(save_file, debug=True, ignore_status=False)
        if not libvirt.check_vm_state(vm_name, "running"):
            test.fail(
                "The guest should be running after executing 'virsh restore'.")
        vm.cleanup_serial_console()
        vm.create_serial_console()
        vm_session = vm.wait_for_serial_login()
        ping_ip = get_ping_dest(vm_session, mac_addr)
        check_vm_network_accessed(vm_session,
                                  ping_dest=ping_ip,
                                  tcpdump_iface=bridge_name,
                                  tcpdump_status_error=True)
        logging.info("Detach the hostdev device.")
        hostdev_dev = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name).devices.\
            by_device_tag("hostdev")
        virsh.detach_device(vm_name,
                            hostdev_dev.xml,
                            wait_remove_event=True,
                            debug=True,
                            ignore_status=False)
        check_hostdev = vm_xml.VMXML.new_from_dumpxml(vm_name)\
            .devices.by_device_tag('hostdev')
        if check_hostdev:
            test.fail("The hostdev device exists after detaching %s." %
                      check_hostdev)

        check_vm_network_accessed(vm_session,
                                  2,
                                  ping_dest=ping_ip,
                                  tcpdump_iface=bridge_name,
                                  tcpdump_status_error=False)
        logging.info("Attach the hostdev device.")
        virsh.attach_device(vm_name,
                            hostdev_dev.xml,
                            debug=True,
                            ignore_status=False)
        check_vm_network_accessed(vm_session,
                                  ping_dest=ping_ip,
                                  tcpdump_iface=bridge_name,
                                  tcpdump_status_error=True)
コード例 #6
0
ファイル: powerpc_hmi.py プロジェクト: yalzhang/tp-libvirt
 def set_condn(action, recover=False):
     """
     Set/reset guest state/action
     :param action: Guest state change/action
     :param recover: whether to recover given state default: False
     """
     if not recover:
         if action == "pin_vcpu":
             for i in range(cur_vcpu):
                 virsh.vcpupin(vm_name, i, hmi_cpu, "--live",
                               ignore_status=False, debug=True)
                 virsh.emulatorpin(vm_name,  hmi_cpu, "live",
                                   ignore_status=False, debug=True)
         elif action == "filetrans":
             utils_test.run_file_transfer(test, params, env)
         elif action == "save":
             save_file = os.path.join(data_dir.get_tmp_dir(),
                                      vm_name + ".save")
             result = virsh.save(vm_name, save_file, ignore_status=True,
                                 debug=True)
             utils_test.libvirt.check_exit_status(result)
             time.sleep(10)
             if os.path.exists(save_file):
                 result = virsh.restore(save_file, ignore_status=True,
                                        debug=True)
                 utils_test.libvirt.check_exit_status(result)
                 os.remove(save_file)
         elif action == "suspend":
             result = virsh.suspend(vm_name, ignore_status=True, debug=True)
             utils_test.libvirt.check_exit_status(result)
             time.sleep(10)
             result = virsh.resume(vm_name, ignore_status=True, debug=True)
             utils_test.libvirt.check_exit_status(result)
     return
コード例 #7
0
def manipulate_domain(vm_name, action, recover=False):
    """
    Save/managedsave/S3/S4 domain or recover it.
    """
    tmp_dir = data_dir.get_tmp_dir()
    save_file = os.path.join(tmp_dir, vm_name + ".save")
    if not recover:
        if action == "save":
            save_option = ""
            result = virsh.save(vm_name,
                                save_file,
                                save_option,
                                ignore_status=True,
                                debug=True)
            libvirt.check_exit_status(result)
        elif action == "managedsave":
            managedsave_option = ""
            result = virsh.managedsave(vm_name,
                                       managedsave_option,
                                       ignore_status=True,
                                       debug=True)
            libvirt.check_exit_status(result)
        elif action == "s3":
            suspend_target = "mem"
            result = virsh.dompmsuspend(vm_name,
                                        suspend_target,
                                        ignore_status=True,
                                        debug=True)
            libvirt.check_exit_status(result)
        elif action == "s4":
            suspend_target = "disk"
            result = virsh.dompmsuspend(vm_name,
                                        suspend_target,
                                        ignore_status=True,
                                        debug=True)
            libvirt.check_exit_status(result)
            # Wait domain state change: 'in shutdown' -> 'shut off'
            utils_misc.wait_for(lambda: virsh.is_dead(vm_name), 5)
        else:
            logging.debug("No operation for the domain")

    else:
        if action == "save":
            if os.path.exists(save_file):
                result = virsh.restore(save_file,
                                       ignore_status=True,
                                       debug=True)
                libvirt.check_exit_status(result)
                os.remove(save_file)
            else:
                raise error.TestError("No save file for domain restore")
        elif action in ["managedsave", "s4"]:
            result = virsh.start(vm_name, ignore_status=True, debug=True)
            libvirt.check_exit_status(result)
        elif action == "s3":
            suspend_target = "mem"
            result = virsh.dompmwakeup(vm_name, ignore_status=True, debug=True)
            libvirt.check_exit_status(result)
        else:
            logging.debug("No need recover the domain")
コード例 #8
0
    def check_disk_save_restore(save_file, device_targets):
        """
        Check domain save and restore operation.
        """
        # Save the domain.
        ret = virsh.save(vm_name, save_file, **virsh_dargs)
        libvirt.check_exit_status(ret)

        # Restore the domain.
        ret = virsh.restore(save_file, **virsh_dargs)
        libvirt.check_exit_status(ret)

        # Connect to the domain and check disk.
        try:
            session = vm.wait_for_login()
            cmd = ("ls /dev/%s && mkfs.ext3 -F /dev/%s && mount /dev/%s"
                   " /mnt && ls /mnt && touch /mnt/test && umount /mnt" %
                   (device_targets[0], device_targets[0], device_targets[0]))
            s, o = session.cmd_status_output(cmd)
            if s:
                session.close()
                raise error.TestError("Failed to read/write disk in VM:"
                                      " %s" % o)
            session.close()
        except (remote.LoginError, virt_vm.VMError, aexpect.ShellError), e:
            raise error.TestError(str(e))
コード例 #9
0
    def check_disk_save_restore(save_file, device_targets,
                                startup_policy):
        """
        Check domain save and restore operation.
        """
        # Save the domain.
        ret = virsh.save(vm_name, save_file,
                         **virsh_dargs)
        libvirt.check_exit_status(ret)

        # Restore the domain.
        restore_error = False
        # Check disk startup policy option
        if "optional" in startup_policy:
            os.remove(disks[0]["source"])
            restore_error = True
        ret = virsh.restore(save_file, **virsh_dargs)
        libvirt.check_exit_status(ret, restore_error)
        if restore_error:
            return

        # Connect to the domain and check disk.
        try:
            session = vm.wait_for_login()
            cmd = ("ls /dev/%s && mkfs.ext3 -F /dev/%s && mount /dev/%s"
                   " /mnt && ls /mnt && touch /mnt/test && umount /mnt"
                   % (device_targets[0], device_targets[0], device_targets[0]))
            s, o = session.cmd_status_output(cmd)
            if s:
                session.close()
                raise error.TestError("Failed to read/write disk in VM:"
                                      " %s" % o)
            session.close()
        except (remote.LoginError, virt_vm.VMError, aexpect.ShellError), e:
            raise error.TestError(str(e))
コード例 #10
0
    def check_disk_save_restore(save_file, device_targets,
                                startup_policy):
        """
        Check domain save and restore operation.
        """
        # Save the domain.
        ret = virsh.save(vm_name, save_file,
                         **virsh_dargs)
        libvirt.check_exit_status(ret)

        # Restore the domain.
        restore_error = False
        # Check disk startup policy option
        if "optional" in startup_policy:
            os.remove(disks[0]["source"])
            restore_error = True
        ret = virsh.restore(save_file, **virsh_dargs)
        libvirt.check_exit_status(ret, restore_error)
        if restore_error:
            return

        # Connect to the domain and check disk.
        try:
            session = vm.wait_for_login()
            cmd = ("ls /dev/%s && mkfs.ext3 -F /dev/%s && mount /dev/%s"
                   " /mnt && ls /mnt && touch /mnt/test && umount /mnt"
                   % (device_targets[0], device_targets[0], device_targets[0]))
            status, output = session.cmd_status_output(cmd)
            if status:
                session.close()
                raise exceptions.TestError("Failed to read/write disk in VM:"
                                           " %s" % output)
            session.close()
        except (remote.LoginError, virt_vm.VMError, aexpect.ShellError), detail:
            raise exceptions.TestError(str(detail))
コード例 #11
0
 def operations():
     """
     Do save | domstats | blkdeviotune operations
     """
     if self.operation == "save":
         virsh.save(self.vm_name,
                    self.save_file,
                    debug=True,
                    timeout=self.operation_timeout)
     if self.operation == "domstats":
         virsh.domstats(self.vm_name,
                        opertions=self.operation_option,
                        debug=True,
                        timeout=self.operation_timeout)
     if self.operation == "blkdeviotune":
         virsh.blkdeviotune(self.vm_name,
                            self.disk_tgt,
                            debug=True,
                            timeout=self.operation_timeout)
コード例 #12
0
 def save_restore():  # pylint: disable=W0611
     """
     Sub test for save and restore
     """
     save_path = os.path.join(data_dir.get_tmp_dir(),
                              '%s.save' % params['os_variant'])
     ret = virsh.save(vm_name, save_path)
     libvirt.check_exit_status(ret)
     ret = virsh.restore(save_path)
     libvirt.check_exit_status(ret)
コード例 #13
0
    def check_save_restore(save_file):
        """
        Test domain save and restore.
        """
        # Save the domain.
        ret = virsh.save(vm_name, save_file, **virsh_dargs)
        libvirt.check_exit_status(ret)

        # Restore the domain.
        ret = virsh.restore(save_file, **virsh_dargs)
        libvirt.check_exit_status(ret)
コード例 #14
0
    def check_save_restore(save_file):
        """
        Test domain save and restore.
        """
        # Save the domain.
        ret = virsh.save(vm_name, save_file, **virsh_dargs)
        libvirt.check_exit_status(ret)

        # Restore the domain.
        ret = virsh.restore(save_file, **virsh_dargs)
        libvirt.check_exit_status(ret)
コード例 #15
0
ファイル: nvdimm.py プロジェクト: Yingshun/tp-libvirt
def test_with_label(vm, params, test):
    """
    Test nvdimm with label setting

    :param vm: vm object
    :param params: dict, test parameters
    :param test: test object
    :raises: test.fail if checkpoints fail
    """
    test_str = params.get('test_str')
    test_file = params.get('test_file')
    vm_name = params.get('main_vm')
    vm_session = vm.wait_for_login()
    # Create a file on the nvdimm device.
    create_file_within_nvdimm_disk(vm_session, test_file, test_str, test, block_size=4096)

    # Reboot the guest, and remount the nvdimm device in the guest.
    # Check the file foo-label is exited
    vm_session.close()
    virsh.reboot(vm_name, debug=True)
    vm_session = vm.wait_for_login()

    vm_session.cmd('mount -o dax /dev/pmem0  /mnt')
    if test_str not in vm_session.cmd('cat /mnt/foo-label '):
        test.fail('"%s" should be in output' % test_str)
    vm_session.close()
    if params.get('check_life_cycle', 'no') == 'yes':
        virsh.managedsave(vm_name, ignore_status=False, debug=True)
        vm.start()
        vm_session = vm.wait_for_login()
        check_nvdimm_file(test_str, test_file, vm_session, test)
        vm_session.close()
        vm_s1 = vm_name + ".s1"
        virsh.save(vm_name, vm_s1, ignore_status=False, debug=True)
        virsh.restore(vm_s1, ignore_status=False, debug=True)
        vm_session = vm.wait_for_login()
        check_nvdimm_file(test_str, test_file, vm_session, test)
        vm_session.close()
        virsh.snapshot_create_as(vm_name, vm_s1, ignore_status=False, debug=True)
        virsh.snapshot_revert(vm_name, vm_s1, ignore_status=False, debug=True)
        virsh.snapshot_delete(vm_name, vm_s1, ignore_status=False, debug=True)
コード例 #16
0
    def check_bootorder_snapshot(disk_name):
        """
        Check VM disk's bootorder option with snapshot.

        :param disk_name. The target disk to be checked.
        """
        logging.info("Checking diskorder option with snapshot...")
        snapshot1 = "s1"
        snapshot2 = "s2"
        snapshot2_file = os.path.join(test.tmpdir, "s2")
        ret = virsh.snapshot_create(vm_name, "", **virsh_dargs)
        libvirt.check_exit_status(ret)

        ret = virsh.snapshot_create_as(vm_name, "%s --disk-only" % snapshot1,
                                       **virsh_dargs)
        libvirt.check_exit_status(ret)

        ret = virsh.snapshot_dumpxml(vm_name, snapshot1)
        libvirt.check_exit_status(ret)

        cmd = "echo \"%s\" | grep %s.%s" % (ret.stdout, disk_name, snapshot1)
        if utils.run(cmd, ignore_status=True).exit_status:
            raise error.TestError("Check snapshot disk failed")

        ret = virsh.snapshot_create_as(vm_name,
                                       "%s --memspec file=%s,snapshot=external"
                                       % (snapshot2, snapshot2_file),
                                       **virsh_dargs)
        libvirt.check_exit_status(ret)

        ret = virsh.dumpxml(vm_name)
        libvirt.check_exit_status(ret)

        cmd = ("echo \"%s\" | grep -A 16 %s.%s | grep \"boot order='%s'\""
               % (ret.stdout, disk_name, snapshot2, bootorder))
        if utils.run(cmd, ignore_status=True).exit_status:
            raise error.TestError("Check snapshot disk with bootorder failed")

        snap_lists = virsh.snapshot_list(vm_name)
        if snapshot1 not in snap_lists or snapshot2 not in snap_lists:
            raise error.TestError("Check snapshot list failed")

        # Check virsh save command after snapshot.
        save_file = "/tmp/%s.save" % vm_name
        ret = virsh.save(vm_name, save_file, **virsh_dargs)
        libvirt.check_exit_status(ret)

        # Check virsh restore command after snapshot.
        ret = virsh.restore(save_file, **virsh_dargs)
        libvirt.check_exit_status(ret)

        #Passed all test.
        os.remove(save_file)
コード例 #17
0
    def test_save_restore_hostdev_iface_with_teaming():
        logging.info("Save/restore VM.")
        save_file = os.path.join(data_dir.get_tmp_dir(), "save_file")
        virsh.save(vm_name,
                   save_file,
                   debug=True,
                   ignore_status=False,
                   timeout=10)
        if not libvirt.check_vm_state(vm_name, "shut off"):
            test.fail("The guest should be down after executing 'virsh save'.")
        virsh.restore(save_file, debug=True, ignore_status=False)
        if not libvirt.check_vm_state(vm_name, "running"):
            test.fail(
                "The guest should be running after executing 'virsh restore'.")

        vm.cleanup_serial_console()
        vm.create_serial_console()
        vm_session = vm.wait_for_serial_login()
        check_vm_network_accessed(vm_session,
                                  tcpdump_iface=bridge_name,
                                  tcpdump_status_error=True)
コード例 #18
0
def check_save_restore(vm_name):
    """
    Do save/restore operation and check status
    """
    save_file = os.path.join(data_dir.get_tmp_dir(), vm_name + ".save")
    try:
        result = virsh.save(vm_name, save_file, ignore_status=True, debug=True)
        libvirt.check_exit_status(result)
        result = virsh.restore(save_file, ignore_status=True, debug=True)
        libvirt.check_exit_status(result)
    finally:
        os.remove(save_file)
コード例 #19
0
    def check_bootorder_snapshot(disk_name):
        """
        Check VM disk's bootorder option with snapshot.

        :param disk_name. The target disk to be checked.
        """
        logging.info("Checking diskorder option with snapshot...")
        snapshot1 = "s1"
        snapshot2 = "s2"
        snapshot2_file = os.path.join(test.tmpdir, "s2")
        ret = virsh.snapshot_create(vm_name, "", **virsh_dargs)
        libvirt.check_exit_status(ret)

        ret = virsh.snapshot_create_as(vm_name, "%s --disk-only" % snapshot1,
                                       **virsh_dargs)
        libvirt.check_exit_status(ret)

        ret = virsh.snapshot_dumpxml(vm_name, snapshot1)
        libvirt.check_exit_status(ret)

        cmd = "echo \"%s\" | grep %s.%s" % (ret.stdout, disk_name, snapshot1)
        if process.system(cmd, ignore_status=True, shell=True):
            raise exceptions.TestError("Check snapshot disk failed")

        ret = virsh.snapshot_create_as(vm_name,
                                       "%s --memspec file=%s,snapshot=external"
                                       % (snapshot2, snapshot2_file),
                                       **virsh_dargs)
        libvirt.check_exit_status(ret)

        ret = virsh.dumpxml(vm_name)
        libvirt.check_exit_status(ret)

        cmd = ("echo \"%s\" | grep -A 16 %s.%s | grep \"boot order='%s'\""
               % (ret.stdout, disk_name, snapshot2, bootorder))
        if process.system(cmd, ignore_status=True, shell=True):
            raise exceptions.TestError("Check snapshot disk with bootorder failed")

        snap_lists = virsh.snapshot_list(vm_name)
        if snapshot1 not in snap_lists or snapshot2 not in snap_lists:
            raise exceptions.TestError("Check snapshot list failed")

        # Check virsh save command after snapshot.
        save_file = "/tmp/%s.save" % vm_name
        ret = virsh.save(vm_name, save_file, **virsh_dargs)
        libvirt.check_exit_status(ret)

        # Check virsh restore command after snapshot.
        ret = virsh.restore(save_file, **virsh_dargs)
        libvirt.check_exit_status(ret)

        #Passed all test.
        os.remove(save_file)
コード例 #20
0
def check_save_restore(vm_name):
    """
    Do save/restore operation and check status
    """
    save_file = os.path.join(data_dir.get_tmp_dir(), vm_name + ".save")
    try:
        result = virsh.save(vm_name, save_file, ignore_status=True, debug=True)
        libvirt.check_exit_status(result)
        result = virsh.restore(save_file, ignore_status=True, debug=True)
        libvirt.check_exit_status(result)
    finally:
        os.remove(save_file)
コード例 #21
0
 def check_save_restore():
     """
     Test save and restore operation
     """
     save_file = os.path.join(data_dir.get_tmp_dir(), "%s.save" % vm_name)
     ret = virsh.save(vm_name, save_file, **virsh_dargs)
     libvirt.check_exit_status(ret)
     ret = virsh.restore(save_file, **virsh_dargs)
     libvirt.check_exit_status(ret)
     if os.path.exists(save_file):
         os.remove(save_file)
     # Login to check vm status
     vm.wait_for_login().close()
コード例 #22
0
    def check_disk_save_restore(save_file):
        """
        Check domain save and restore operation.

        :param save_file: the path to saved file
        """
        # Save the domain.
        ret = virsh.save(vm_name, save_file, **virsh_dargs)
        libvirt.check_exit_status(ret)

        # Restore the domain.
        ret = virsh.restore(save_file, **virsh_dargs)
        libvirt.check_exit_status(ret)
コード例 #23
0
 def check_save_restore():
     """
     Test save and restore operation
     """
     save_file = os.path.join(test.tmpdir, "%s.save" % vm_name)
     ret = virsh.save(vm_name, save_file, **virsh_dargs)
     libvirt.check_exit_status(ret)
     ret = virsh.restore(save_file, **virsh_dargs)
     libvirt.check_exit_status(ret)
     if os.path.exists(save_file):
         os.remove(save_file)
     # Login to check vm status
     vm.wait_for_login().close()
コード例 #24
0
ファイル: virsh_setmem.py プロジェクト: nertpinx/tp-libvirt
def manipulate_domain(vm_name, action, recover=False):
    """
    Save/managedsave/S3/S4 domain or recover it.
    """
    tmp_dir = data_dir.get_tmp_dir()
    save_file = os.path.join(tmp_dir, vm_name + ".save")
    if not recover:
        if action == "save":
            save_option = ""
            result = virsh.save(vm_name, save_file, save_option,
                                ignore_status=True, debug=True)
            libvirt.check_exit_status(result)
        elif action == "managedsave":
            managedsave_option = ""
            result = virsh.managedsave(vm_name, managedsave_option,
                                       ignore_status=True, debug=True)
            libvirt.check_exit_status(result)
        elif action == "s3":
            suspend_target = "mem"
            result = virsh.dompmsuspend(vm_name, suspend_target,
                                        ignore_status=True, debug=True)
            libvirt.check_exit_status(result)
        elif action == "s4":
            suspend_target = "disk"
            result = virsh.dompmsuspend(vm_name, suspend_target,
                                        ignore_status=True, debug=True)
            libvirt.check_exit_status(result)
            # Wait domain state change: 'in shutdown' -> 'shut off'
            utils_misc.wait_for(lambda: virsh.is_dead(vm_name), 5)
        else:
            logging.debug("No operation for the domain")

    else:
        if action == "save":
            if os.path.exists(save_file):
                result = virsh.restore(save_file, ignore_status=True, debug=True)
                libvirt.check_exit_status(result)
                os.remove(save_file)
            else:
                raise error.TestError("No save file for domain restore")
        elif action in ["managedsave", "s4"]:
            result = virsh.start(vm_name, ignore_status=True, debug=True)
            libvirt.check_exit_status(result)
        elif action == "s3":
            suspend_target = "mem"
            result = virsh.dompmwakeup(vm_name, ignore_status=True, debug=True)
            libvirt.check_exit_status(result)
        else:
            logging.debug("No need recover the domain")
コード例 #25
0
 def snapshot():  # pylint: disable=W0611
     """
     Sub test for snapshot
     """
     for i in range(1, 4):
         ret = virsh.snapshot_create_as(vm_name, "sn%s --disk-only" % i)
         libvirt.check_exit_status(ret)
     process.system("systemctl restart libvirtd")
     save_path = os.path.join(tmp_dir, "test.save")
     ret = virsh.save(vm_name, save_path)
     libvirt.check_exit_status(ret)
     ret = virsh.restore(save_path)
     libvirt.check_exit_status(ret)
     session = vm.wait_for_login()
     session.close()
コード例 #26
0
def run_pm_test(params, libvirtd, vm):
    """
    Destroy VM after executed a series of operations about S3 and save restore
    """

    vm_name = vm.name
    vm_xml = VMXML.new_from_inactive_dumpxml(vm_name)
    vm_xml_backup = vm_xml.copy()
    save_path = os.path.join(data_dir.get_tmp_dir(), 'tmp.save')
    try:
        pm_xml = VMPMXML()
        pm_xml.mem_enabled = 'yes'
        vm_xml.pm = pm_xml
        vm_xml.sync()
        vm.prepare_guest_agent()
        virsh.dompmsuspend(vm.name, 'mem')
        virsh.dompmwakeup(vm.name)
        virsh.save(vm.name, save_path)
        virsh.restore(save_path)
        virsh.dompmsuspend(vm.name, 'mem')
        virsh.save(vm.name, save_path)
        virsh.destroy(vm.name)
    finally:
        vm_xml_backup.sync()
コード例 #27
0
def run_pm_test(params, libvirtd, vm):
    """
    Destroy VM after executed a series of operations about S3 and save restore
    """

    vm_name = vm.name
    vm_xml = VMXML.new_from_inactive_dumpxml(vm_name)
    vm_xml_backup = vm_xml.copy()
    save_path = os.path.join(data_dir.get_tmp_dir(), 'tmp.save')
    try:
        pm_xml = VMPMXML()
        pm_xml.mem_enabled = 'yes'
        vm_xml.pm = pm_xml
        vm_xml.sync()
        vm.prepare_guest_agent()
        virsh.dompmsuspend(vm.name, 'mem')
        virsh.dompmwakeup(vm.name)
        virsh.save(vm.name, save_path)
        virsh.restore(save_path)
        virsh.dompmsuspend(vm.name, 'mem')
        virsh.save(vm.name, save_path)
        virsh.destroy(vm.name)
    finally:
        vm_xml_backup.sync()
コード例 #28
0
 def save_restore_hook():
     """
     Do save/restore operation and check the results.
     """
     hook_para = "%s %s" % (hook_file, vm_name)
     save_file = os.path.join(test.tmpdir,
                              "%s.save" % vm_name)
     disk_src = vm.get_first_disk_devices()['source']
     if domainxml_test:
         disk_dist = "/tmp/%s.move" % vm_name
         shutil.copy(disk_src, disk_dist)
         script = (hook_script %
                   (vm_name, disk_src, disk_dist))
         prepare_hook_file(script)
     elif basic_test:
         prepare_hook_file(hook_script %
                           (vm_name, hook_log))
     ret = virsh.save(vm_name, save_file, **virsh_dargs)
     libvirt.check_exit_status(ret)
     if domainxml_test:
         disk_src_save = vm.get_first_disk_devices()['source']
         if disk_src != disk_src_save:
             test.fail("Failed to check hooks for"
                       " save operation")
     ret = virsh.restore(save_file, **virsh_dargs)
     libvirt.check_exit_status(ret)
     if os.path.exists(save_file):
         os.remove(save_file)
     if domainxml_test:
         disk_src_restore = vm.get_first_disk_devices()['source']
         if disk_dist != disk_src_restore:
             test.fail("Failed to check hooks for"
                       " restore operation")
         vm.destroy()
         if os.path.exists(disk_dist):
             os.remove(disk_dist)
         vmxml_backup.sync()
     if basic_test:
         hook_str = hook_para + " restore begin -"
         if not check_hooks(hook_str):
             test.fail("Failed to check "
                       "restore hooks.")
コード例 #29
0
ファイル: libvirt_hooks.py プロジェクト: lento-sun/tp-libvirt
 def save_restore_hook():
     """
     Do save/restore operation and check the results.
     """
     hook_para = "%s %s" % (hook_file, vm_name)
     save_file = os.path.join(test.tmpdir,
                              "%s.save" % vm_name)
     disk_src = vm.get_first_disk_devices()['source']
     if domainxml_test:
         disk_dist = "/tmp/%s.move" % vm_name
         shutil.copy(disk_src, disk_dist)
         script = (hook_script %
                   (vm_name, disk_src, disk_dist))
         prepare_hook_file(script)
     elif basic_test:
         prepare_hook_file(hook_script %
                           (vm_name, hook_log))
     ret = virsh.save(vm_name, save_file, **virsh_dargs)
     libvirt.check_exit_status(ret)
     if domainxml_test:
         disk_src_save = vm.get_first_disk_devices()['source']
         if disk_src != disk_src_save:
             raise error.TestFail("Failed to check hooks for"
                                  " save operation")
     ret = virsh.restore(save_file, **virsh_dargs)
     libvirt.check_exit_status(ret)
     if os.path.exists(save_file):
         os.remove(save_file)
     if domainxml_test:
         disk_src_restore = vm.get_first_disk_devices()['source']
         if disk_dist != disk_src_restore:
             raise error.TestFail("Failed to check hooks for"
                                  " restore operation")
         vm.destroy()
         if os.path.exists(disk_dist):
             os.remove(disk_dist)
         vmxml_backup.sync()
     if basic_test:
         hook_str = hook_para + " restore begin -"
         if not check_hooks(hook_str):
             raise error.TestFail("Failed to check "
                                  "restore hooks.")
コード例 #30
0
ファイル: boot_integration.py プロジェクト: mxie91/tp-libvirt
def domain_lifecycle(vmxml, vm, test, virsh_dargs, **kwargs):
    """
    Test the lifrcycle of the domain

    :params: vmxml: the xml of the vm
    :params: vm: vm to wait for login the vm
    :params: test: system parameter
    :params: virsh_dargs: the debugging status of virsh command
    """
    vm_name = kwargs.get("vm_name")
    save_file = kwargs.get("save_file")
    boot_type = kwargs.get("boot_type")
    nvram_file = kwargs.get("nvram_file")

    ret = virsh.define(vmxml.xml, **virsh_dargs)
    stdout_patt = "Domain .*%s.* defined from %s" % (vm_name, vmxml.xml)
    utlv.check_result(ret, expected_match=[stdout_patt])

    ret = virsh.start(vm_name, **virsh_dargs)
    stdout_patt = "Domain .*%s.* started" % vm_name
    utlv.check_result(ret, expected_match=[stdout_patt])
    vm.wait_for_login()

    ret = virsh.destroy(vm_name, **virsh_dargs)
    stdout_patt = "Domain .*%s.* destroyed" % vm_name
    utlv.check_result(ret, expected_match=[stdout_patt])

    vm.start()
    ret = virsh.save(vm_name, save_file, **virsh_dargs)
    stdout_patt = "Domain .*%s.* saved to %s" % (vm_name, save_file)
    utlv.check_result(ret, expected_match=[stdout_patt])

    ret = virsh.restore(save_file, **virsh_dargs)
    stdout_patt = "Domain restored from %s" % save_file
    utlv.check_result(ret, expected_match=[stdout_patt])

    ret = virsh.undefine(vm_name, options="--nvram", **virsh_dargs)
    stdout_patt = "Domain .*%s.* has been undefined" % vm_name
    utlv.check_result(ret, expected_match=[stdout_patt])
    if boot_type == "ovmf":
        if os.path.exists(nvram_file):
            test.fail("nvram file still exists after vm undefine")
コード例 #31
0
ファイル: libvirt_mem.py プロジェクト: yalzhang/tp-libvirt
    def check_save_restore():
        """
        Test save and restore operation
        """
        save_file = os.path.join(data_dir.get_tmp_dir(), "%s.save" % vm_name)
        ret = virsh.save(vm_name, save_file, **virsh_dargs)
        libvirt.check_exit_status(ret)

        def _wait_for_restore():
            try:
                virsh.restore(save_file, debug=True, ignore_status=False)
                return True
            except Exception as e:
                logging.error(e)

        utils_misc.wait_for(_wait_for_restore, 30, step=5)
        if os.path.exists(save_file):
            os.remove(save_file)
        # Login to check vm status
        vm.wait_for_login().close()
コード例 #32
0
def run(test, params, env):
    """
    Test command: virsh domjobabort.

    The command can abort the currently running domain job.
    1.Prepare test environment,destroy or suspend a VM.
    2.Do action to get a subprocess(dump, save, managedsave).
    3.Perform virsh domjobabort operation to abort VM's job.
    4.Recover the VM's status and wait for the subprocess over.
    5.Confirm the test result.
    """

    vm_name = params.get("main_vm", "vm1")
    vm = env.get_vm(vm_name)
    start_vm = params.get("start_vm")
    pre_vm_state = params.get("pre_vm_state", "start")
    if start_vm == "no" and vm.is_alive():
        vm.destroy()

    # Instead of "paused_after_start_vm", use "pre_vm_state".
    # After start the VM, wait for some time to make sure the job
    # can be created on this domain.
    if start_vm == "yes":
        vm.wait_for_login()
        if params.get("pre_vm_state") == "suspend":
            vm.pause()

    domid = vm.get_id()
    domuuid = vm.get_uuid()
    original_speed = virsh.migrate_getspeed(vm_name).stdout.strip()

    def get_subprocess(action, vm_name, file, remote_uri=None):
        """
        Execute background virsh command, return subprocess w/o waiting for exit()

        :param cmd : virsh command.
        :param guest_name : VM's name
        :param file_source : virsh command's file option.
        """
        args = ""
        if action == "managedsave":
            file = ""
        elif action == "migrate":
            # Slow down migration for domjobabort
            virsh.migrate_setspeed(vm_name, "1")
            file = remote_uri
            args = "--unsafe"
        command = "virsh %s %s %s %s" % (action, vm_name, file, args)
        logging.debug("Action: %s", command)
        p = subprocess.Popen(command,
                             shell=True,
                             stdout=subprocess.PIPE,
                             stderr=subprocess.PIPE)
        return p

    action = params.get("jobabort_action", "dump")
    dump_opt = params.get("dump_opt", None)
    status_error = params.get("status_error", "no")
    job = params.get("jobabort_job", "yes")
    tmp_file = os.path.join(data_dir.get_tmp_dir(), "domjobabort.tmp")
    tmp_pipe = os.path.join(data_dir.get_tmp_dir(), "domjobabort.fifo")
    vm_ref = params.get("jobabort_vm_ref")
    remote_uri = params.get("jobabort_remote_uri")
    remote_host = params.get("migrate_dest_host")
    remote_user = params.get("migrate_dest_user", "root")
    remote_pwd = params.get("migrate_dest_pwd")
    saved_data = None

    # Build job action
    if dump_opt:
        action = "dump --crash"

    if action == "managedsave":
        tmp_pipe = '/var/lib/libvirt/qemu/save/%s.save' % vm.name

    if action == "restore":
        virsh.save(vm_name, tmp_file, ignore_status=True)

    if action == "migrate":
        if remote_host.count("EXAMPLE"):
            test.cancel("Remote host should be configured " "for migrate.")
        else:
            # Config ssh autologin for remote host
            ssh_key.setup_ssh_key(remote_host,
                                  remote_user,
                                  remote_pwd,
                                  port=22)

    if vm_ref == "id":
        vm_ref = domid
    elif vm_ref == "hex_id":
        vm_ref = hex(int(domid))
    elif vm_ref == "uuid":
        vm_ref = domuuid
    elif vm_ref.find("invalid") != -1:
        vm_ref = params.get(vm_ref)
    elif vm_ref == "name":
        vm_ref = vm_name

    # Get the subprocess of VM.
    # The command's effect is to abort the currently running domain job.
    # So before do "domjobabort" action, we must create a job on the domain.
    process = None
    if job == "yes" and start_vm == "yes" and status_error == "no":
        if os.path.exists(tmp_pipe):
            os.unlink(tmp_pipe)
        os.mkfifo(tmp_pipe)

        process = get_subprocess(action, vm_name, tmp_pipe, remote_uri)

        saved_data = None
        if action == "restore":
            with open(tmp_file, 'r') as tmp_f:
                saved_data = tmp_f.read(10 * 1024 * 1024)
            f = open(tmp_pipe, 'w')
            f.write(saved_data[:1024 * 1024])
        elif action == "migrate":
            f = None
        else:
            f = open(tmp_pipe, 'rb')
            dummy = f.read(1024 * 1024).decode(locale.getpreferredencoding(),
                                               'ignore')

    # Give enough time for starting job
    t = 0
    while t < 5:
        jobtype = vm.get_job_type()
        if "None" == jobtype:
            t += 1
            time.sleep(1)
            continue
        elif jobtype is False:
            logging.error("Get job type failed.")
            break
        else:
            logging.debug("Job started: %s", jobtype)
            break
    ret = virsh.domjobabort(vm_ref, ignore_status=True, debug=True)
    status = ret.exit_status

    if process and f:
        if saved_data:
            f.write(saved_data[1024 * 1024:])
        else:
            dummy = f.read()
        f.close()

        try:
            os.unlink(tmp_pipe)
        except OSError as detail:
            logging.info("Can't remove %s: %s", tmp_pipe, detail)
        try:
            os.unlink(tmp_file)
        except OSError as detail:
            logging.info("Cant' remove %s: %s", tmp_file, detail)

    # Recover the environment.
    if pre_vm_state == "suspend":
        vm.resume()
    if process:
        if process.poll():
            try:
                process.kill()
            except OSError:
                pass

    if action == "migrate":
        # Recover migration speed
        virsh.migrate_setspeed(vm_name, original_speed)
        utlv.MigrationTest().cleanup_dest_vm(vm, None, remote_uri)

    # check status_error
    if status_error == "yes":
        if status == 0:
            test.fail("Run successfully with wrong command!")
    elif status_error == "no":
        if status != 0:
            test.fail("Run failed with right command")
コード例 #33
0
def run(test, params, env):
    """
    Attach/Detach an iscsi network/volume disk to domain

    1. For secret usage testing:
        1.1. Setup an iscsi target with CHAP authentication.
        1.2. Define a secret for iscsi target usage
        1.3. Set secret value
    2. Create
    4. Create an iscsi network disk XML
    5. Attach disk with the XML file and check the disk inside the VM
    6. Detach the disk
    """
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)

    disk_device = params.get("disk_device", "disk")
    disk_type = params.get("disk_type", "network")
    disk_src_protocal = params.get("disk_source_protocal", "iscsi")
    disk_src_host = params.get("disk_source_host", "127.0.0.1")
    disk_src_port = params.get("disk_source_port", "3260")
    disk_src_pool = params.get("disk_source_pool")
    disk_src_mode = params.get("disk_source_mode", "host")
    pool_type = params.get("pool_type", "iscsi")
    pool_src_host = params.get("pool_source_host", "127.0.0.1")
    disk_target = params.get("disk_target", "vdb")
    disk_target_bus = params.get("disk_target_bus", "virtio")
    disk_readonly = params.get("disk_readonly", "no")
    chap_auth = "yes" == params.get("chap_auth", "no")
    chap_user = params.get("chap_username", "")
    chap_passwd = params.get("chap_password", "")
    secret_usage_target = params.get("secret_usage_target")
    secret_ephemeral = params.get("secret_ephemeral", "no")
    secret_private = params.get("secret_private", "yes")
    status_error = "yes" == params.get("status_error", "no")

    if disk_type == "volume":
        if not libvirt_version.version_compare(1, 0, 5):
            raise error.TestNAError("'volume' type disk doesn't support in"
                                    + " current libvirt version.")
    # Back VM XML
    vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)

    virsh_dargs = {'debug': True, 'ignore_status': True}
    try:
        if chap_auth:
            # Create a secret xml to define it
            secret_xml = SecretXML(secret_ephemeral, secret_private)
            secret_xml.auth_type = "chap"
            secret_xml.auth_username = chap_user
            secret_xml.usage = disk_src_protocal
            secret_xml.target = secret_usage_target
            logging.debug("Define secret by XML: %s", open(secret_xml.xml).read())
            # Define secret
            cmd_result = virsh.secret_define(secret_xml.xml, **virsh_dargs)
            libvirt.check_exit_status(cmd_result)
            # Get secret uuid
            try:
                secret_uuid = cmd_result.stdout.strip().split()[1]
            except IndexError:
                raise error.TestError("Fail to get new created secret uuid")

            # Set secret value
            secret_string = base64.b64encode(chap_passwd)
            cmd_result = virsh.secret_set_value(secret_uuid, secret_string,
                                                **virsh_dargs)
            libvirt.check_exit_status(cmd_result)
        else:
            # Set chap_user and chap_passwd to empty to avoid setup
            # CHAP authentication when export iscsi target
            chap_user = ""
            chap_passwd = ""

        # Setup iscsi target
        iscsi_target = libvirt.setup_or_cleanup_iscsi(is_setup=True,
                                                      is_login=False,
                                                      chap_user=chap_user,
                                                      chap_passwd=chap_passwd)
        # Create iscsi pool
        if disk_type == "volume":
            # Create an iscsi pool xml to create it
            pool_src_xml = pool_xml.SourceXML()
            pool_src_xml.hostname = pool_src_host
            pool_src_xml.device_path = iscsi_target
            poolxml = pool_xml.PoolXML(pool_type=pool_type)
            poolxml.name = disk_src_host
            poolxml.set_source(pool_src_xml)
            poolxml.target_path = "/dev/disk/by-path"
            # Create iscsi pool
            cmd_result = virsh.pool_create(poolxml.xml, **virsh_dargs)
            libvirt.check_exit_status(cmd_result)
            # Get volume name
            cmd_result = virsh.vol_list(disk_src_pool, **virsh_dargs)
            libvirt.check_exit_status(cmd_result)
            try:
                vol_name = re.findall(r"(\S+)\ +(\S+)[\ +\n]",
                                      str(cmd_result.stdout))[1][0]
            except IndexError:
                raise error.TestError("Fail to get volume name")

        # Create iscsi network disk XML
        disk_params = {'device_type': disk_device,
                       'type_name': disk_type,
                       'target_dev': disk_target,
                       'target_bus': disk_target_bus,
                       'readonly': disk_readonly}
        disk_params_src = {}
        if disk_type == "network":
            disk_params_src = {'source_protocol': disk_src_protocal,
                               'source_name': iscsi_target + "/1",
                               'source_host_name': disk_src_host,
                               'source_host_port': disk_src_port}
        elif disk_type == "volume":
            disk_params_src = {'source_pool': disk_src_pool,
                               'source_volume': vol_name,
                               'source_mode': disk_src_mode}
        else:
            error.TestNAError("Unsupport disk type in this test")
        disk_params.update(disk_params_src)
        if chap_auth:
            disk_params_auth = {'auth_user': chap_user,
                                'secret_type': disk_src_protocal,
                                'secret_usage': secret_xml.target}
            disk_params.update(disk_params_auth)
        disk_xml = libvirt.create_disk_xml(disk_params)

        start_vm = "yes" == params.get("start_vm", "yes")
        if start_vm:
            if vm.is_dead():
                vm.start()
        else:
            if not vm.is_dead():
                vm.destroy()
        attach_option = params.get("attach_option", "")
        # Attach the iscsi network disk to domain
        logging.debug("Attach disk by XML: %s", open(disk_xml).read())
        cmd_result = virsh.attach_device(domainarg=vm_name, filearg=disk_xml,
                                         flagstrs=attach_option,
                                         dargs=virsh_dargs)
        libvirt.check_exit_status(cmd_result, status_error)

        if vm.is_dead():
            vm.start()
            cmd_result = virsh.start(vm_name, **virsh_dargs)
            libvirt.check_exit_status(cmd_result)

        domain_operation = params.get("domain_operation", "")
        if domain_operation == "save":
            save_file = os.path.join(test.tmpdir, "vm.save")
            cmd_result = virsh.save(vm_name, save_file, **virsh_dargs)
            libvirt.check_exit_status(cmd_result)
            cmd_result = virsh.restore(save_file)
            libvirt.check_exit_status(cmd_result)
            if os.path.exists(save_file):
                os.remove(save_file)
        elif domain_operation == "snapshot":
            # Run snapshot related commands: snapshot-create-as, snapshot-list
            # snapshot-info, snapshot-dumpxml, snapshot-create
            snapshot_name1 = "snap1"
            snapshot_name2 = "snap2"
            cmd_result = virsh.snapshot_create_as(vm_name, snapshot_name1,
                                                  **virsh_dargs)
            libvirt.check_exit_status(cmd_result)

            cmd_result = virsh.snapshot_list(vm_name, **virsh_dargs)
            libvirt.check_exit_status(cmd_result)

            cmd_result = virsh.snapshot_info(vm_name, snapshot_name1,
                                             **virsh_dargs)
            libvirt.check_exit_status(cmd_result)

            cmd_result = virsh.snapshot_dumpxml(vm_name, snapshot_name1,
                                                **virsh_dargs)
            libvirt.check_exit_status(cmd_result)

            cmd_result = virsh.snapshot_create(vm_name, **virsh_dargs)
            libvirt.check_exit_status(cmd_result)

            cmd_result = virsh.snapshot_current(vm_name, **virsh_dargs)
            libvirt.check_exit_status(cmd_result)

            sn_create_op = "%s --disk_ony %s" % (snapshot_name2, disk_target)
            cmd_result = virsh.snapshot_create_as(vm_name, sn_create_op,
                                                  **virsh_dargs)

            libvirt.check_exit_status(cmd_result)
            cmd_result = virsh.snapshot_revert(vm_name, snapshot_name1,
                                               **virsh_dargs)

            cmd_result = virsh.snapshot_list(vm_name, **virsh_dargs)
            libvirt.check_exit_status(cmd_result)

            cmd_result = virsh.snapshot_delete(vm_name, snapshot_name2,
                                               **virsh_dargs)
            libvirt.check_exit_status(cmd_result)
            pass
        else:
            logging.error("Unsupport operation %s in this case, so skip it",
                          domain_operation)

        def find_attach_disk(expect=True):
            """
            Find attached disk inside the VM
            """
            found_disk = False
            if vm.is_dead():
                raise error.TestError("Domain %s is not running" % vm_name)
            else:
                try:
                    session = vm.wait_for_login()
                    cmd = "grep %s /proc/partitions" % disk_target
                    s, o = session.cmd_status_output(cmd)
                    logging.info("%s output: %s", cmd, o)
                    session.close()
                    if s == 0:
                        found_disk = True
                except (LoginError, VMError, ShellError), e:
                    logging.error(str(e))
            if found_disk == expect:
                logging.debug("Check disk inside the VM PASS as expected")
            else:
                raise error.TestError("Check disk inside the VM FAIL")

        # Check disk inside the VM, expect is False if status_error=True
        find_attach_disk(not status_error)

        # Detach disk
        cmd_result = virsh.detach_disk(vm_name, disk_target)
        libvirt.check_exit_status(cmd_result, status_error)

        # Check disk inside the VM
        find_attach_disk(False)
コード例 #34
0
def run(test, params, env):
    """
    Test virsh {at|de}tach-disk command.

    The command can attach new disk/detach disk.
    1.Prepare test environment,destroy or suspend a VM.
    2.Perform virsh attach/detach-disk operation.
    3.Recover test environment.
    4.Confirm the test result.
    """
    def check_vm_partition(vm, device, os_type, target_name, old_parts):
        """
        Check VM disk's partition.

        :param vm. VM guest.
        :param os_type. VM's operation system type.
        :param target_name. Device target type.
        :return: True if check successfully.
        """
        logging.info("Checking VM partittion...")
        if vm.is_dead():
            vm.start()
        try:
            attached = False
            if os_type == "linux":
                session = vm.wait_for_login()
                new_parts = utils_disk.get_parts_list(session)
                added_parts = list(set(new_parts).difference(set(old_parts)))
                logging.debug("Added parts: %s" % added_parts)
                for i in range(len(added_parts)):
                    if device == "disk":
                        if target_name.startswith("vd"):
                            if added_parts[i].startswith("vd"):
                                attached = True
                        elif target_name.startswith(
                                "hd") or target_name.startswith("sd"):
                            if added_parts[i].startswith("sd"):
                                attached = True
                    elif device == "cdrom":
                        if added_parts[i].startswith("sr"):
                            attached = True
                session.close()
            return attached
        except (remote.LoginError, virt_vm.VMError, aexpect.ShellError) as e:
            logging.error(str(e))
            return False

    def acpiphp_module_modprobe(vm, os_type):
        """
        Add acpiphp module if VM's os type is rhle5.*

        :param vm. VM guest.
        :param os_type. VM's operation system type.
        :return: True if operate successfully.
        """
        if vm.is_dead():
            vm.start()
        try:
            if os_type == "linux":
                session = vm.wait_for_login()
                s_rpm, _ = session.cmd_status_output("rpm --version")
                # If status is different from 0, this
                # guest OS doesn't support the rpm package
                # manager
                if s_rpm:
                    session.close()
                    return True
                _, o_vd = session.cmd_status_output(
                    "rpm -qa | grep redhat-release")
                if o_vd.find("5Server") != -1:
                    s_mod, o_mod = session.cmd_status_output(
                        "modprobe acpiphp")
                    del o_mod
                    if s_mod != 0:
                        session.close()
                        return False
                session.close()
            return True
        except (remote.LoginError, virt_vm.VMError, aexpect.ShellError) as e:
            logging.error(str(e))
            return False

    def check_shareable(at_with_shareable, test_twice):
        """
        check if current libvirt version support shareable option

        at_with_shareable: True or False. Whether attach disk with shareable option
        test_twice: True or False. Whether perform operations twice
        return: True or cancel the test
        """
        if at_with_shareable or test_twice:
            if libvirt_version.version_compare(3, 9, 0):
                return True
            else:
                test.cancel(
                    "Current libvirt version doesn't support shareable feature"
                )

    # Get test command.
    test_cmd = params.get("at_dt_disk_test_cmd", "attach-disk")

    vm_ref = params.get("at_dt_disk_vm_ref", "name")
    at_options = params.get("at_dt_disk_at_options", "")
    dt_options = params.get("at_dt_disk_dt_options", "")
    at_with_shareable = "yes" == params.get("at_with_shareable", 'no')
    pre_vm_state = params.get("at_dt_disk_pre_vm_state", "running")
    status_error = "yes" == params.get("status_error", 'no')
    no_attach = params.get("at_dt_disk_no_attach", 'no')
    os_type = params.get("os_type", "linux")
    qemu_file_lock = params.get("qemu_file_lock", "")
    if qemu_file_lock:
        if utils_misc.compare_qemu_version(2, 9, 0):
            logging.info('From qemu-kvm-rhev 2.9.0:'
                         'QEMU image locking, which should prevent multiple '
                         'runs of QEMU or qemu-img when a VM is running.')
            if test_cmd == "detach-disk" or pre_vm_state == "shut off":
                test.cancel('This case is not supported.')
            else:
                logging.info(
                    'The expect result is failure as opposed with succeed')
                status_error = True

    # Disk specific attributes.
    device = params.get("at_dt_disk_device", "disk")
    device_source_name = params.get("at_dt_disk_device_source", "attach.img")
    device_source_format = params.get("at_dt_disk_device_source_format", "raw")
    device_target = params.get("at_dt_disk_device_target", "vdd")
    device_disk_bus = params.get("at_dt_disk_bus_type", "virtio")
    source_path = "yes" == params.get("at_dt_disk_device_source_path", "yes")
    create_img = "yes" == params.get("at_dt_disk_create_image", "yes")
    test_twice = "yes" == params.get("at_dt_disk_test_twice", "no")
    test_type = "yes" == params.get("at_dt_disk_check_type", "no")
    test_audit = "yes" == params.get("at_dt_disk_check_audit", "no")
    test_block_dev = "yes" == params.get("at_dt_disk_iscsi_device", "no")
    test_logcial_dev = "yes" == params.get("at_dt_disk_logical_device", "no")
    restart_libvirtd = "yes" == params.get("at_dt_disk_restart_libvirtd", "no")
    detach_disk_with_print_xml = "yes" == params.get(
        "detach_disk_with_print_xml", "no")
    vg_name = params.get("at_dt_disk_vg", "vg_test_0")
    lv_name = params.get("at_dt_disk_lv", "lv_test_0")
    serial = params.get("at_dt_disk_serial", "")
    address = params.get("at_dt_disk_address", "")
    address2 = params.get("at_dt_disk_address2", "")
    cache_options = params.get("cache_options", "")
    time_sleep = params.get("time_sleep", 3)
    if check_shareable(at_with_shareable, test_twice):
        at_options += " --mode shareable"
    if serial:
        at_options += (" --serial %s" % serial)
    if address2:
        at_options_twice = at_options + (" --address %s" % address2)
    if address:
        at_options += (" --address %s" % address)
    if cache_options:
        if cache_options.count("directsync"):
            if not libvirt_version.version_compare(1, 0, 0):
                test.cancel("'directsync' cache option doesn't "
                            "support in current libvirt version.")
        at_options += (" --cache %s" % cache_options)

    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)

    # Start vm and get all partions in vm.
    if vm.is_dead():
        vm.start()
    session = vm.wait_for_login()
    old_parts = utils_disk.get_parts_list(session)
    session.close()
    vm.destroy(gracefully=False)

    # Back up xml file.
    backup_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)

    # Create virtual device file.
    device_source_path = os.path.join(data_dir.get_tmp_dir(),
                                      device_source_name)
    if test_block_dev:
        device_source = libvirt.setup_or_cleanup_iscsi(True)
        if not device_source:
            # We should skip this case
            test.cancel("Can not get iscsi device name in host")
        if test_logcial_dev:
            lv_utils.vg_create(vg_name, device_source)
            device_source = libvirt.create_local_disk("lvm",
                                                      size="10M",
                                                      vgname=vg_name,
                                                      lvname=lv_name)
            logging.debug("New created volume: %s", lv_name)
    else:
        if source_path and create_img:
            device_source = libvirt.create_local_disk(
                "file",
                path=device_source_path,
                size="1G",
                disk_format=device_source_format)
        else:
            device_source = device_source_name

    # if we are testing audit, we need to start audit servcie first.
    if test_audit:
        auditd_service = Factory.create_service("auditd")
        if not auditd_service.status():
            auditd_service.start()
        logging.info("Auditd service status: %s" % auditd_service.status())

    # If we are testing cdrom device, we need to detach hdc in VM first.
    if device == "cdrom":
        if vm.is_alive():
            vm.destroy(gracefully=False)
        s_detach = virsh.detach_disk(vm_name, device_target, "--config")
        if not s_detach:
            logging.error("Detach hdc failed before test.")

    # If we are testing detach-disk, we need to attach certain device first.
    if test_cmd == "detach-disk" and no_attach != "yes":
        s_at_options = "--driver qemu --config"
        #Since lock feature is introduced in libvirt 3.9.0 afterwards, disk shareable options
        #need be set if disk needs be attached multitimes
        if check_shareable(at_with_shareable, test_twice):
            s_at_options += " --mode shareable"

        s_attach = virsh.attach_disk(vm_name,
                                     device_source,
                                     device_target,
                                     s_at_options,
                                     debug=True).exit_status
        if s_attach != 0:
            logging.error("Attaching device failed before testing detach-disk")
        else:
            logging.debug(
                "Attaching device succeeded before testing detach-disk")
        if test_twice:
            device_target2 = params.get("at_dt_disk_device_target2",
                                        device_target)
            device_source = libvirt.create_local_disk(
                "file",
                path=device_source_path,
                size="1",
                disk_format=device_source_format)
            s_attach = virsh.attach_disk(vm_name, device_source,
                                         device_target2,
                                         s_at_options).exit_status
            if s_attach != 0:
                logging.error("Attaching device failed before testing "
                              "detach-disk test_twice")

    vm.start()
    vm.wait_for_login()

    # Add acpiphp module before testing if VM's os type is rhle5.*
    if not acpiphp_module_modprobe(vm, os_type):
        test.error("Add acpiphp module failed before test.")

    # Turn VM into certain state.
    if pre_vm_state == "paused":
        logging.info("Suspending %s..." % vm_name)
        if vm.is_alive():
            vm.pause()
    elif pre_vm_state == "shut off":
        logging.info("Shuting down %s..." % vm_name)
        if vm.is_alive():
            vm.destroy(gracefully=False)

    # Get disk count before test.
    disk_count_before_cmd = vm_xml.VMXML.get_disk_count(vm_name)

    # Test.
    domid = vm.get_id()
    domuuid = vm.get_uuid()

    # Confirm how to reference a VM.
    if vm_ref == "name":
        vm_ref = vm_name
    elif vm_ref.find("invalid") != -1:
        vm_ref = params.get(vm_ref)
    elif vm_ref == "id":
        vm_ref = domid
    elif vm_ref == "hex_id":
        vm_ref = hex(int(domid))
    elif vm_ref == "uuid":
        vm_ref = domuuid
    else:
        vm_ref = ""

    if test_cmd == "attach-disk":
        status = virsh.attach_disk(vm_ref,
                                   device_source,
                                   device_target,
                                   at_options,
                                   debug=True).exit_status
    elif test_cmd == "detach-disk":
        # For detach disk with print-xml option, it only print information,and not actual disk detachment.
        if detach_disk_with_print_xml and libvirt_version.version_compare(
                4, 5, 0):
            ret = virsh.detach_disk(vm_ref, device_target, at_options)
            libvirt.check_exit_status(ret)
            cmd = ("echo \"%s\" | grep -A 16 %s" %
                   (ret.stdout.strip(), device_source_name))
            if process.system(cmd, ignore_status=True, shell=True):
                test.error("Check disk with source image name failed")
        status = virsh.detach_disk(vm_ref,
                                   device_target,
                                   dt_options,
                                   debug=True).exit_status

    if restart_libvirtd:
        libvirtd_serv = utils_libvirtd.Libvirtd()
        libvirtd_serv.restart()

    if test_twice:
        device_target2 = params.get("at_dt_disk_device_target2", device_target)
        device_source = libvirt.create_local_disk(
            "file",
            path=device_source_path,
            size="1G",
            disk_format=device_source_format)
        if test_cmd == "attach-disk":
            if address2:
                at_options = at_options_twice
            status = virsh.attach_disk(vm_ref,
                                       device_source,
                                       device_target2,
                                       at_options,
                                       debug=True).exit_status
        elif test_cmd == "detach-disk":
            status = virsh.detach_disk(vm_ref,
                                       device_target2,
                                       dt_options,
                                       debug=True).exit_status

    # Resume guest after command. On newer libvirt this is fixed as it has
    # been a bug. The change in xml file is done after the guest is resumed.
    if pre_vm_state == "paused":
        vm.resume()
        time.sleep(5)

    # Check audit log
    check_audit_after_cmd = True
    if test_audit:
        grep_audit = ('grep "%s" /var/log/audit/audit.log' %
                      test_cmd.split("-")[0])
        cmd = (grep_audit + ' | ' +
               'grep "%s" | tail -n1 | grep "res=success"' % device_source)
        if process.run(cmd, shell=True).exit_status:
            logging.error("Audit check failed")
            check_audit_after_cmd = False

    # Need wait a while for xml to sync
    time.sleep(float(time_sleep))
    # Check disk count after command.
    check_count_after_cmd = True
    disk_count_after_cmd = vm_xml.VMXML.get_disk_count(vm_name)
    if test_cmd == "attach-disk":
        if disk_count_after_cmd == disk_count_before_cmd:
            check_count_after_cmd = False
    elif test_cmd == "detach-disk":
        if disk_count_after_cmd < disk_count_before_cmd:
            check_count_after_cmd = False

    # Recover VM state.
    if pre_vm_state == "shut off":
        vm.start()

    # Check in VM after command.
    check_vm_after_cmd = True
    check_vm_after_cmd = check_vm_partition(vm, device, os_type, device_target,
                                            old_parts)

    # Check disk type after attach.
    check_disk_type = True
    if test_type:
        if test_block_dev:
            check_disk_type = vm_xml.VMXML.check_disk_type(
                vm_name, device_source, "block")
        else:
            check_disk_type = vm_xml.VMXML.check_disk_type(
                vm_name, device_source, "file")
    # Check disk serial after attach.
    check_disk_serial = True
    if serial:
        disk_serial = vm_xml.VMXML.get_disk_serial(vm_name, device_target)
        if serial != disk_serial:
            check_disk_serial = False

    # Check disk address after attach.
    check_disk_address = True
    if address:
        disk_address = vm_xml.VMXML.get_disk_address(vm_name, device_target)
        if address != disk_address:
            check_disk_address = False

    # Check multifunction address after attach.
    check_disk_address2 = True
    if address2:
        disk_address2 = vm_xml.VMXML.get_disk_address(vm_name, device_target2)
        if address2 != disk_address2:
            check_disk_address2 = False

    # Check disk cache option after attach.
    check_cache_after_cmd = True
    if cache_options:
        disk_cache = vm_xml.VMXML.get_disk_attr(vm_name, device_target,
                                                "driver", "cache")
        if cache_options == "default":
            if disk_cache is not None:
                check_cache_after_cmd = False
        elif disk_cache != cache_options:
            check_cache_after_cmd = False

    # Eject cdrom test
    eject_cdrom = "yes" == params.get("at_dt_disk_eject_cdrom", "no")
    save_vm = "yes" == params.get("at_dt_disk_save_vm", "no")
    save_file = os.path.join(data_dir.get_tmp_dir(), "vm.save")
    try:
        if eject_cdrom:
            eject_params = {
                'type_name': "file",
                'device_type': "cdrom",
                'target_dev': device_target,
                'target_bus': device_disk_bus
            }
            eject_xml = libvirt.create_disk_xml(eject_params)
            with open(eject_xml) as eject_file:
                logging.debug("Eject CDROM by XML: %s", eject_file.read())
            # Run command tiwce to make sure cdrom tray open first #BZ892289
            # Open tray
            virsh.attach_device(domainarg=vm_name,
                                filearg=eject_xml,
                                debug=True)
            # Add time sleep between two attach commands.
            if time_sleep:
                time.sleep(float(time_sleep))
            # Eject cdrom
            result = virsh.attach_device(domainarg=vm_name,
                                         filearg=eject_xml,
                                         debug=True)
            if result.exit_status != 0:
                test.fail("Eject CDROM failed")
            if vm_xml.VMXML.check_disk_exist(vm_name, device_source):
                test.fail("Find %s after do eject" % device_source)
        # Save and restore VM
        if save_vm:
            result = virsh.save(vm_name, save_file, debug=True)
            libvirt.check_exit_status(result)
            result = virsh.restore(save_file, debug=True)
            libvirt.check_exit_status(result)
            if vm_xml.VMXML.check_disk_exist(vm_name, device_source):
                test.fail("Find %s after do restore" % device_source)

        # Destroy VM.
        vm.destroy(gracefully=False)

        # Check disk count after VM shutdown (with --config).
        check_count_after_shutdown = True
        inactive_vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
        disk_count_after_shutdown = len(inactive_vmxml.get_disk_all())
        if test_cmd == "attach-disk":
            if disk_count_after_shutdown == disk_count_before_cmd:
                check_count_after_shutdown = False
        elif test_cmd == "detach-disk":
            if disk_count_after_shutdown < disk_count_before_cmd:
                check_count_after_shutdown = False

    finally:
        # Recover VM.
        if vm.is_alive():
            vm.destroy(gracefully=False)
        logging.debug("Restore the VM XML")
        backup_xml.sync()
        if os.path.exists(save_file):
            os.remove(save_file)
        if test_block_dev:
            if test_logcial_dev:
                libvirt.delete_local_disk("lvm",
                                          vgname=vg_name,
                                          lvname=lv_name)
                lv_utils.vg_remove(vg_name)
                process.run("pvremove %s" % device_source,
                            shell=True,
                            ignore_status=True)
            libvirt.setup_or_cleanup_iscsi(False)
        else:
            libvirt.delete_local_disk("file", device_source)

    # Check results.
    if status_error:
        if not status:
            test.fail("virsh %s exit with unexpected value." % test_cmd)
    else:
        if status:
            test.fail("virsh %s failed." % test_cmd)
        if test_cmd == "attach-disk":
            if at_options.count("config"):
                if not check_count_after_shutdown:
                    test.fail("Cannot see config attached device "
                              "in xml file after VM shutdown.")
                if not check_disk_serial:
                    test.fail("Serial set failed after attach")
                if not check_disk_address:
                    test.fail("Address set failed after attach")
                if not check_disk_address2:
                    test.fail("Address(multifunction) set failed"
                              " after attach")
            else:
                if not check_count_after_cmd:
                    test.fail("Cannot see device in xml file" " after attach.")
                if not check_vm_after_cmd:
                    test.fail("Cannot see device in VM after" " attach.")
                if not check_disk_type:
                    test.fail("Check disk type failed after" " attach.")
                if not check_audit_after_cmd:
                    test.fail("Audit hotplug failure after attach")
                if not check_cache_after_cmd:
                    test.fail("Check cache failure after attach")
                if at_options.count("persistent"):
                    if not check_count_after_shutdown:
                        test.fail("Cannot see device attached "
                                  "with persistent after "
                                  "VM shutdown.")
                else:
                    if check_count_after_shutdown:
                        test.fail("See non-config attached device "
                                  "in xml file after VM shutdown.")
        elif test_cmd == "detach-disk":
            if dt_options.count("config"):
                if check_count_after_shutdown:
                    test.fail("See config detached device in "
                              "xml file after VM shutdown.")
            else:
                if check_count_after_cmd:
                    test.fail("See device in xml file " "after detach.")
                if check_vm_after_cmd:
                    test.fail("See device in VM after detach.")
                if not check_audit_after_cmd:
                    test.fail("Audit hotunplug failure " "after detach")

                if dt_options.count("persistent"):
                    if check_count_after_shutdown:
                        test.fail("See device deattached "
                                  "with persistent after "
                                  "VM shutdown.")
                else:
                    if not check_count_after_shutdown:
                        test.fail("See non-config detached "
                                  "device in xml file after "
                                  "VM shutdown.")

        else:
            test.error("Unknown command %s." % test_cmd)
コード例 #35
0
ファイル: virsh_restore.py プロジェクト: QiuMike/virt-test
def run_virsh_restore(test, params, env):
    """
    Test command: virsh restore.

    Restore a domain from a saved state in a file
    1.Prepare test environment.
    2.When the libvirtd == "off", stop the libvirtd service.
    3.Run virsh restore command with assigned option.
    4.Recover test environment.
    5.Confirm the test result.
    """

    vm_name = params.get("main_vm")
    vm = env.get_vm(params["main_vm"])
    session = vm.wait_for_login()

    os_type = params.get("os_type")
    status_error = params.get("restore_status_error")
    libvirtd = params.get("restore_libvirtd")
    extra_param = params.get("restore_extra_param")
    pre_status =  params.get("restore_pre_status")
    vm_ref = params.get("restore_vm_ref")

    #run test
    if vm_ref =="" or vm_ref == "xyz":
        status = virsh.restore(vm_ref, ignore_status=True).exit_status
    else:
        if os_type == "linux":
            cmd = "cat /proc/cpuinfo"
            status, output = session.cmd_status_output(cmd,
                                                       internal_timeout = 10)
            session.close()
            if not re.search("processor", output):
                raise error.TestFail("Unable to read /proc/cpuinfo")
        tmp_file = vm_ref
        if vm_ref == "/tmp/save.file":
            virsh.save(vm_name, vm_ref)
            vm_ref = "%s %s" % (vm_ref, extra_param)
        elif  vm_ref == "/tmp/new.file":
            open(vm_ref, 'w').close()
        if vm.is_alive():
            vm.destroy()
        if pre_status == "start":
            virsh.start(vm_name)
        if libvirtd == "off":
            libvirt_vm.libvirtd_stop()
        status = virsh.restore(vm_ref, ignore_status=True).exit_status
        os.unlink(tmp_file)
    if status_error == "no":
        list_output = virsh.dom_list().stdout.strip()

    session.close()

    #recover libvirtd service start
    if libvirtd == "off":
        libvirt_vm.libvirtd_start()
    if vm.is_alive():
        vm.destroy()

    if status_error == "yes":
        if status == 0:
            raise error.TestFail("Run successfully with wrong command!")
    elif status_error == "no":
        if status != 0:
            raise error.TestFail("Run failed with right command")
        else:
            if not re.search(vm_name, list_output):
                raise error.TestFail("Run failed with right command")
コード例 #36
0
    # Process cartesian parameters
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    vm.wait_for_login()

    restore_state = params.get("restore_state", "running")
    vm_save = params.get("vm_save", "vm.save")

    try:
        # Get a tmp_dir.
        tmp_dir = data_dir.get_tmp_dir()
        if os.path.dirname(vm_save) is "":
            vm_save = os.path.join(tmp_dir, vm_save)

        # Save the RAM state of a running domain
        cmd_result = virsh.save(vm_name, vm_save, debug=True)
        if cmd_result.exit_status:
            raise error.TestFail("Failed to save running domain %s" % vm_name)

        # Edit the xml in the saved state file
        edit_image_xml()

        # Restore domain
        cmd_result = virsh.restore(vm_save, debug=True)
        if cmd_result.exit_status:
            raise error.TestFail("Failed to restore domain %s" % vm_name)
        os.remove(vm_save)

        vm_state_check()

    finally:
コード例 #37
0
ファイル: virsh_restore.py プロジェクト: Antique/tp-libvirt
def run(test, params, env):
    """
    Test command: virsh restore.

    Restore a domain from a saved state in a file
    1.Prepare test environment.
    2.When the libvirtd == "off", stop the libvirtd service.
    3.Run virsh restore command with assigned option.
    4.Recover test environment.
    5.Confirm the test result.
    """

    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    session = vm.wait_for_login()

    os_type = params.get("os_type")
    status_error = ("yes" == params.get("status_error"))
    libvirtd = params.get("libvirtd", "on")
    extra_param = params.get("restore_extra_param")
    pre_status = params.get("restore_pre_status")
    vm_ref = params.get("restore_vm_ref")
    uri = params.get("virsh_uri")
    unprivileged_user = params.get('unprivileged_user')
    if unprivileged_user:
        if unprivileged_user.count('EXAMPLE'):
            unprivileged_user = '******'

    if not libvirt_version.version_compare(1, 1, 1):
        if params.get('setup_libvirt_polkit') == 'yes':
            raise error.TestNAError("API acl test not supported in current"
                                    " libvirt version.")

    # run test
    if vm_ref == "" or vm_ref == "xyz":
        status = virsh.restore(vm_ref, extra_param, debug=True,
                               unprivileged_user=unprivileged_user,
                               uri=uri,
                               ignore_status=True).exit_status
    else:
        if os_type == "linux":
            cmd = "cat /proc/cpuinfo"
            try:
                status, output = session.cmd_status_output(cmd, timeout=10)
            finally:
                session.close()
            if not re.search("processor", output):
                raise error.TestFail("Unable to read /proc/cpuinfo")
        tmp_file = os.path.join(test.tmpdir, "save.file")
        virsh.save(vm_name, tmp_file)
        if vm_ref == "saved_file":
            vm_ref = tmp_file
        elif vm_ref == "empty_new_file":
            tmp_file = os.path.join(test.tmpdir, "new.file")
            open(tmp_file, 'w').close()
            vm_ref = tmp_file
        if vm.is_alive():
            vm.destroy()
        if pre_status == "start":
            virsh.start(vm_name)
        if libvirtd == "off":
            utils_libvirtd.libvirtd_stop()
        status = virsh.restore(vm_ref, extra_param, debug=True,
                               unprivileged_user=unprivileged_user,
                               uri=uri,
                               ignore_status=True).exit_status
    if not status_error:
        list_output = virsh.dom_list().stdout.strip()

    session.close()

    # recover libvirtd service start
    if libvirtd == "off":
        utils_libvirtd.libvirtd_start()

    try:
        if status_error:
            if not status:
                raise error.TestFail("Run successfully with wrong command!")
        else:
            if status:
                raise error.TestFail("Run failed with right command")
            if not re.search(vm_name, list_output):
                raise error.TestFail("Run failed with right command")
            if extra_param.count("paused"):
                if not vm.is_paused():
                    raise error.TestFail("Guest state should be"
                                         " paused after restore"
                                         " due to the option --paused")
            if extra_param.count("running"):
                if vm.is_dead() or vm.is_paused():
                    raise error.TestFail("Guest state should be"
                                         " running after restore"
                                         " due to the option --running")
    finally:
        if vm.is_paused():
            virsh.resume(vm_name)
コード例 #38
0
ファイル: mtu.py プロジェクト: nasastry/tp-libvirt
def run(test, params, env):
    """
    Test mtu feature from virtual network
    """
    vm_name = params.get('main_vm')
    vm = env.get_vm(vm_name)
    mtu_type = params.get('mtu_type')
    mtu_size = params.get('mtu_size', '')
    net = params.get('net', DEFAULT_NET)
    net_type = params.get('net_type', '')
    with_iface = 'yes' == params.get('with_iface', 'no')
    with_net = 'yes' == params.get('with_net', 'no')
    status_error = 'yes' == params.get('status_error', 'no')
    check = params.get('check', '')
    error_msg = params.get('error_msg', '')
    bridge_name = 'br_mtu' + utils_misc.generate_random_string(3)
    add_pkg = params.get('add_pkg', '')
    model = params.get('model', 'virtio')

    def set_network(size, net='default'):
        """
        Set mtu size to a certain network
        """
        logging.info('Set mtu size of network "%s" to %s', net, size)
        default_xml = NetworkXML.new_from_net_dumpxml(net)
        default_xml.mtu = size
        default_xml.sync()
        logging.debug(virsh.net_dumpxml(net))

    def set_interface(mtu_size='', source_network='default',
                      iface_type='network', iface_model='virtio'):
        """
        Set mtu size to a certain interface
        """
        interface_type = 'bridge' if iface_type in ('bridge', 'openvswitch') else iface_type
        iface_dict = {
            'type': interface_type,
            'source': "{'%s': '%s'}" % (interface_type, source_network),
            'model': iface_model
        }

        if iface_type == 'openvswitch':
            iface_dict.update({'virtualport_type': 'openvswitch'})

        if mtu_size:
            iface_dict.update({'mtu': "{'size': %s}" % mtu_size})

        libvirt.modify_vm_iface(vm_name, 'update_iface', iface_dict)
        logging.debug(virsh.dumpxml(vm_name).stdout)

    def get_default_if():
        """
        Get default interface that is using by vm
        """
        ifaces = utils_net.get_sorted_net_if()
        logging.debug('Interfaces on host: %s', ifaces)
        for iface in ifaces[0]:
            if 'Link detected: yes' in process.run('ethtool %s' % iface).stdout_text:
                logging.debug('Found host interface "%s"', iface)
                return iface

    def create_bridge():
        """
        Create a bridge on host for test
        """
        cmd_create_br = 'nmcli con add type bridge con-name %s ifname %s'
        con_name = 'con_' + utils_misc.generate_random_string(3)
        bridge_name = 'br_' + utils_misc.generate_random_string(3)
        process.run(cmd_create_br % (con_name, bridge_name), verbose=True)
        return con_name, bridge_name

    def create_network_xml(name, network_type, base_if='', **kwargs):
        """
        Create a network xml to be defined
        """
        m_net = NetworkXML(name)
        m_net.forward = {'mode': 'bridge'}
        if network_type in ('bridge', 'openvswitch'):
            m_net.bridge = {'name': kwargs['bridge_name']}
        elif network_type == 'macvtap':
            if base_if:
                m_net.forward_interface = [{'dev': base_if}]
        if network_type == 'openvswitch':
            m_net.virtualport_type = 'openvswitch'
        if 'mtu' in kwargs:
            m_net.mtu = kwargs['mtu']
        logging.debug(m_net)
        return m_net.xml

    def create_iface(iface_type, **kwargs):
        """
        Create a interface to be attached to vm
        """
        m_iface = Interface(iface_type)
        m_iface.mac_address = utils_net.generate_mac_address_simple()
        if 'base_if' in kwargs:
            m_iface.source = {'dev': kwargs['base_if'],
                              'mode': 'vepa'}
        if 'source_net' in kwargs:
            m_iface.source = {'network': kwargs['source_net']}
        if 'mtu' in kwargs:
            m_iface.mtu = {'size': kwargs['mtu']}
        if 'model_net' in kwargs:
            m_iface.model = kwargs['model_net']
        logging.debug(m_iface.get_xml())
        logging.debug(m_iface)
        return m_iface

    def check_mtu(mtu_size, qemu=False):
        """
        Check if mtu meets expectation on host
        """
        error = ''
        live_vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
        iface_xml = live_vmxml.get_devices('interface')[0]
        logging.debug(iface_xml.target)
        dev = iface_xml.target['dev']
        ifconfig_info = process.run('ifconfig|grep mtu|grep %s' % dev,
                                    shell=True, verbose=True).stdout_text
        if 'mtu %s' % mtu_size in ifconfig_info:
            logging.info('PASS on ifconfig check for vnet.')
        else:
            error += 'Fail on ifconfig check for vnet.'
        if qemu:
            qemu_mtu_info = process.run('ps aux|grep qemu-kvm',
                                        shell=True, verbose=True).stdout_text
            if 'host_mtu=%s' % mtu_size in qemu_mtu_info:
                logging.info('PASS on qemu cmd line check.')
            else:
                error += 'Fail on qemu cmd line check.'
        if error:
            test.fail(error)

    def check_mtu_in_vm(fn_login, mtu_size):
        """
        Check if mtu meets expectations in vm
        """
        session = fn_login()
        check_cmd = 'ifconfig'
        output = session.cmd(check_cmd)
        session.close()
        logging.debug(output)
        if 'mtu %s' % mtu_size not in output:
            test.fail('MTU check inside vm failed.')
        else:
            logging.debug("MTU check inside vm passed.")

    try:
        bk_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
        bk_netxml = NetworkXML.new_from_net_dumpxml(DEFAULT_NET)
        if add_pkg:
            add_pkg = add_pkg.split()
            utils_package.package_install(add_pkg)
        if 'openvswitch' in add_pkg:
            br = 'ovsbr0' + utils_misc.generate_random_string(3)
            process.run('systemctl start openvswitch.service', shell=True, verbose=True)
            process.run('ovs-vsctl add-br %s' % br, shell=True, verbose=True)
            process.run('ovs-vsctl show', shell=True, verbose=True)

        if not check or check in ['save', 'managedsave', 'hotplug_save']:
            # Create bridge or network and set mtu
            iface_type = 'network'
            if net_type in ('bridge', 'openvswitch'):
                if net_type == 'bridge':
                    params['con_name'], br = create_bridge()
                if mtu_type == 'network':
                    test_net = create_network_xml(
                        bridge_name, net_type,
                        bridge_name=br
                    )
                    virsh.net_create(test_net, debug=True)
                    virsh.net_dumpxml(bridge_name, debug=True)
                if mtu_type == 'interface':
                    iface_type = net_type
                    bridge_name = br
            elif net_type == 'network':
                if mtu_type == 'network':
                    set_network(mtu_size)

            iface_mtu = 0
            if mtu_type == 'interface':
                iface_mtu = mtu_size
            if mtu_type == 'network' and with_iface:
                mtu_size = str(int(mtu_size)//2)
                iface_mtu = mtu_size

            source_net = bridge_name if net_type in ('bridge', 'openvswitch') else 'default'

            # set mtu in vm interface
            set_interface(iface_mtu, source_network=source_net, iface_type=iface_type, iface_model=model)
            vm.start()
            vm_login = vm.wait_for_serial_login if net_type in ('bridge', 'openvswitch') else vm.wait_for_login
            vm_login().close()
            check_qemu = True if mtu_type == 'interface' else False

            # Test mtu after save vm
            if check in ('save', 'hotplug_save'):
                if check == 'hotplug_save':
                    iface = create_iface('network', source_net='default',
                                         mtu=mtu_size, model_net=model)
                    params['mac'] = iface.mac_address
                    virsh.attach_device(vm_name, iface.xml, debug=True)
                    virsh.dumpxml(vm_name, debug=True)
                    dom_xml = vm_xml.VMXML.new_from_dumpxml(vm_name)
                    if params['mac'] not in str(dom_xml):
                        test.fail('Failed to attach interface with mtu')
                save_path = os.path.join(data_dir.get_tmp_dir(), vm_name + '.save')
                virsh.save(vm_name, save_path, debug=True)
                virsh.restore(save_path, debug=True)
            if check == 'managedsave':
                virsh.managedsave(vm_name, debug=True)
                virsh.start(vm_name, debug=True)

            # Check in both host and vm
            check_mtu(mtu_size, check_qemu)
            check_mtu_in_vm(vm_login, mtu_size)
            vm_login(timeout=60).close()

            if check == 'hotplug_save':
                virsh.detach_interface(vm_name, 'network %s' % params['mac'], debug=True)
                time.sleep(5)
                dom_xml = vm_xml.VMXML.new_from_dumpxml(vm_name)
                if params['mac'] in str(dom_xml):
                    test.fail('Failed to detach interface with mtu after save-restore')

        else:
            hotplug = 'yes' == params.get('hotplug', 'False')
            if check == 'net_update':
                result = virsh.net_update(
                    DEFAULT_NET, 'modify', 'mtu',
                    '''"<mtu size='%s'/>"''' % mtu_size,
                    debug=True
                )
            if check in ('macvtap', 'bridge_net', 'ovswitch_net'):
                base_if = get_default_if()
                macv_name = 'direct-macvtap' + utils_misc.generate_random_string(3)

                # Test mtu in different type of network
                if mtu_type == 'network':
                    if check == 'macvtap':
                        test_net = create_network_xml(macv_name, 'macvtap',
                                                      base_if, mtu=mtu_size)
                    if check == 'bridge_net':
                        params['con_name'], br = create_bridge()
                        test_net = create_network_xml(
                            bridge_name, 'bridge', mtu=mtu_size,
                            bridge_name=br
                        )
                    if check == 'ovswitch_net':
                        test_net = create_network_xml(
                            bridge_name, 'openvswitch', mtu=mtu_size,
                            bridge_name=br
                        )
                    if 'net_create' in params['id']:
                        result = virsh.net_create(test_net, debug=True)
                    if 'net_define' in params['id']:
                        result = virsh.net_define(test_net, debug=True)

                # Test mtu with or without a binding network
                elif mtu_type == 'interface':
                    vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
                    if with_net:
                        test_net = create_network_xml(macv_name, 'macvtap', base_if)
                        virsh.net_create(test_net, debug=True)
                        iface = create_iface('network', source_net=macv_name, mtu=mtu_size)
                        if hotplug:
                            result = virsh.attach_device(vm_name, iface.xml, debug=True)
                        else:
                            vmxml.add_device(iface)
                            vmxml.sync()
                            result = virsh.start(vm_name)
                    else:
                        iface = create_iface('direct', base_if=base_if, mtu=mtu_size)
                        if hotplug:
                            result = virsh.attach_device(vm_name, iface.xml, debug=True)
                        else:
                            vmxml.add_device(iface)
                            result = virsh.define(vmxml.xml, debug=True)
            if check == 'invalid_val':
                iface = create_iface('network', source_net='default', mtu=mtu_size)
                result = virsh.attach_device(vm_name, iface.xml, debug=True)

            # Check result
            libvirt.check_exit_status(result, status_error)
            libvirt.check_result(result, [error_msg])

    finally:
        bk_xml.sync()
        bk_netxml.sync()
        if 'test_net' in locals():
            virsh.net_destroy(bridge_name, debug=True)
        if params.get('con_name'):
            process.run('nmcli con del %s' % params['con_name'], verbose=True)
        if add_pkg:
            process.run("ovs-vsctl del-br %s" % br, verbose=True)
            utils_package.package_remove(add_pkg)
コード例 #39
0
    # Process cartesian parameters
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    vm.wait_for_login()

    restore_state = params.get("restore_state", "running")
    vm_save = params.get("vm_save", "vm.save")

    try:
        # Get a tmp_dir.
        tmp_dir = data_dir.get_tmp_dir()
        if os.path.dirname(vm_save) is "":
            vm_save = os.path.join(tmp_dir, vm_save)

        # Save the RAM state of a running domain
        cmd_result = virsh.save(vm_name, vm_save, debug=True)
        if cmd_result.exit_status:
            raise error.TestFail("Failed to save running domain %s" % vm_name)

        # Edit the xml in the saved state file
        edit_image_xml()

        # Restore domain
        cmd_result = virsh.restore(vm_save, debug=True)
        if cmd_result.exit_status:
            raise error.TestFail("Failed to restore domain %s" % vm_name)
        os.remove(vm_save)

        vm_state_check()

    finally:
コード例 #40
0
def run(test, params, env):
    """
    Test command: virsh blockcopy.

    This command can copy a disk backing image chain to dest.
    1. Positive testing
        1.1 Copy a disk to a new image file.
        1.2 Reuse existing destination copy.
        1.3 Valid blockcopy timeout and bandwidth test.
    2. Negative testing
        2.1 Copy a disk to a non-exist directory.
        2.2 Copy a disk with invalid options.
        2.3 Do block copy for a persistent domain.
    """

    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    target = params.get("target_disk", "")
    replace_vm_disk = "yes" == params.get("replace_vm_disk", "no")
    disk_source_protocol = params.get("disk_source_protocol")
    copy_to_nfs = "yes" == params.get("copy_to_nfs", "no")
    mnt_path_name = params.get("mnt_path_name")
    # check the source disk
    if not target:
        raise error.TestFail("Require target disk to copy")
    if vm_xml.VMXML.check_disk_exist(vm_name, target):
        logging.debug("Find %s in domain %s.", target, vm_name)
    else:
        raise error.TestFail("Can't find %s in domain %s." % (target, vm_name))
    options = params.get("blockcopy_options", "")
    bandwidth = params.get("blockcopy_bandwidth", "")
    default_timeout = params.get("default_timeout", "300")
    reuse_external = "yes" == params.get("reuse_external", "no")
    persistent_vm = params.get("persistent_vm", "no")
    status_error = "yes" == params.get("status_error", "no")
    active_error = "yes" == params.get("active_error", "no")
    active_snap = "yes" == params.get("active_snap", "no")
    active_save = "yes" == params.get("active_save", "no")
    check_state_lock = "yes" == params.get("check_state_lock", "no")
    bug_url = params.get("bug_url", "")
    timeout = int(params.get("timeout", 1200))
    rerun_flag = 0

    original_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    tmp_dir = data_dir.get_tmp_dir()

    # Prepare dest path params
    dest_path = params.get("dest_path", "")
    dest_format = params.get("dest_format", "")
    # Ugh... this piece of chicanery brought to you by the QemuImg which
    # will "add" the 'dest_format' extension during the check_format code.
    # So if we create the file with the extension and then remove it when
    # doing the check_format later, then we avoid erroneous failures.
    dest_extension = ""
    if dest_format != "":
        dest_extension = ".%s" % dest_format
    if not dest_path:
        tmp_file = time.strftime("%Y-%m-%d-%H.%M.%S.img")
        tmp_file += dest_extension
        if copy_to_nfs:
            tmp_dir = "%s/%s" % (tmp_dir, mnt_path_name)
        dest_path = os.path.join(tmp_dir, tmp_file)

    # Prepare for --reuse-external option
    if reuse_external:
        options += "--reuse-external"
        # Set rerun_flag=1 to do blockcopy twice, and the first time created
        # file can be reused in the second time if no dest_path given
        # This will make sure the image size equal to original disk size
        if dest_path == "/path/non-exist":
            if os.path.exists(dest_path) and not os.path.isdir(dest_path):
                os.remove(dest_path)
        else:
            rerun_flag = 1

    # Prepare other options
    if dest_format == "raw":
        options += "--raw"
    if len(bandwidth):
        options += "--bandwidth %s" % bandwidth

    # Prepare acl options
    uri = params.get("virsh_uri")
    unprivileged_user = params.get('unprivileged_user')
    if unprivileged_user:
        if unprivileged_user.count('EXAMPLE'):
            unprivileged_user = '******'

    if not libvirt_version.version_compare(1, 1, 1):
        if params.get('setup_libvirt_polkit') == 'yes':
            raise error.TestNAError("API acl test not supported in current"
                                    " libvirt version.")
        if not copy_to_nfs:
            raise error.TestNAError("Bug will not fix:"
                                    " https://bugzilla.redhat.com/show_bug."
                                    "cgi?id=924151")

    extra_dict = {'uri': uri, 'unprivileged_user': unprivileged_user,
                  'debug': True, 'ignore_status': True, 'timeout': timeout}

    libvirtd_utl = utils_libvirtd.Libvirtd()
    libvirtd_conf = utils_config.LibvirtdConfig()
    libvirtd_conf["log_filters"] = '"3:json 1:libvirt 1:qemu"'
    libvirtd_log_path = os.path.join(test.tmpdir, "libvirtd.log")
    libvirtd_conf["log_outputs"] = '"1:file:%s"' % libvirtd_log_path
    logging.debug("the libvirtd config file content is:\n %s" %
                  libvirtd_conf)
    libvirtd_utl.restart()

    def check_format(dest_path, dest_extension, expect):
        """
        Check the image format

        :param dest_path: Path of the copy to create
        :param expect: Expect image format
        """
        # And now because the QemuImg will add the extension for us
        # we have to remove it here.
        path_noext = dest_path.strip(dest_extension)
        params['image_name'] = path_noext
        params['image_format'] = expect
        image = qemu_storage.QemuImg(params, "/", path_noext)
        if image.get_format() == expect:
            logging.debug("%s format is %s.", dest_path, expect)
        else:
            raise error.TestFail("%s format is not %s." % (dest_path, expect))

    def blockcopy_chk():
        """
        Raise TestFail when blockcopy hang with state change lock
        """
        err_pattern = "Timed out during operation: cannot acquire"
        err_pattern += " state change lock"
        ret = chk_libvirtd_log(libvirtd_log_path, err_pattern, "error")
        if ret:
            raise error.TestFail("Hit on bug: %s" % bug_url)

    snap_path = ''
    save_path = ''
    try:
        # Domain disk replacement with desire type
        if replace_vm_disk:
            utl.set_vm_disk(vm, params, tmp_dir)
            new_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)

        # Prepare transient/persistent vm
        if persistent_vm == "no" and vm.is_persistent():
            vm.undefine()
        elif persistent_vm == "yes" and not vm.is_persistent():
            new_xml.define()

        # Run blockcopy command
        if rerun_flag == 1:
            options1 = "--wait --raw --finish --verbose"
            cmd_result = virsh.blockcopy(vm_name, target,
                                         dest_path, options1,
                                         **extra_dict)
            status = cmd_result.exit_status
            if status != 0:
                raise error.TestFail("Run blockcopy command fail.")
            elif not os.path.exists(dest_path):
                raise error.TestFail("Cannot find the created copy.")

        cmd_result = virsh.blockcopy(vm_name, target, dest_path,
                                     options,
                                     **extra_dict)
        status = cmd_result.exit_status

        if not libvirtd_utl.is_running():
            raise error.TestFail("Libvirtd service is dead.")

        if not status_error:
            blockcopy_chk()
            if status == 0:
                ret = utils_misc.wait_for(
                    lambda: check_xml(vm_name, target, dest_path, options), 5)
                if not ret:
                    raise error.TestFail("Domain xml not expected after"
                                         " blockcopy")
                if options.count("--bandwidth"):
                    utl.check_blockjob(vm_name, target,
                                       "bandwidth", bandwidth)
                    if check_state_lock:
                        # Run blockjob pivot in subprocess as it will hang
                        # for a while, run blockjob info again to check
                        # job state
                        command = "virsh blockjob %s %s --pivot" % (vm_name,
                                                                    target)
                        session = aexpect.ShellSession(command)
                        ret = virsh.blockjob(vm_name, target, "--info")
                        err_info = "cannot acquire state change lock"
                        if err_info in ret.stderr:
                            raise error.TestFail("Hit on bug: %s" % bug_url)
                        utl.check_exit_status(ret, status_error)
                        session.close()
                val = options.count("--pivot") + options.count("--finish")
                if val == 0:
                    try:
                        finish_job(vm_name, target, default_timeout)
                    except JobTimeout, excpt:
                        raise error.TestFail("Run command failed: %s" %
                                             excpt)
                if options.count("--raw"):
                    check_format(dest_path, dest_extension, dest_format)
                if active_snap:
                    snap_path = "%s/%s.snap" % (tmp_dir, vm_name)
                    snap_opt = "--disk-only --atomic --no-metadata "
                    snap_opt += "vda,snapshot=external,file=%s" % snap_path
                    ret = virsh.snapshot_create_as(vm_name, snap_opt,
                                                   ignore_statues=True,
                                                   debug=True)
                    utl.check_exit_status(ret, active_error)
                if active_save:
                    save_path = "%s/%s.save" % (tmp_dir, vm_name)
                    ret = virsh.save(vm_name, save_path,
                                     ignore_statues=True,
                                     debug=True)
                    utl.check_exit_status(ret, active_error)
            else:
                err_msg = "internal error: unable to execute QEMU command"
                err_msg += " 'block-job-complete'"
                if err_msg in cmd_result.stderr:
                    raise error.TestFail("Hit on bug: %s" % bug_url)
                raise error.TestFail(cmd_result.stderr)
        else:
コード例 #41
0
ファイル: virsh_event.py プロジェクト: OnePaaS/tp-libvirt
 def trigger_events(events_list=[]):
     """
     Trigger various events in events_list
     """
     expected_events_list = []
     tmpdir = data_dir.get_tmp_dir()
     save_path = os.path.join(tmpdir, "vm_event.save")
     new_disk = os.path.join(tmpdir, "new_disk.img")
     try:
         for event in events_list:
             if event in ["start", "restore"]:
                 if vm.is_alive():
                     vm.destroy()
             else:
                 if not vm.is_alive():
                     vm.start()
                     vm.wait_for_login().close()
             if event == "start":
                 virsh.start(vm_name, **virsh_dargs)
                 expected_events_list.append("'lifecycle' for %s:" " Started Booted")
                 vm.wait_for_login().close()
             elif event == "save":
                 virsh.save(vm_name, save_path, **virsh_dargs)
                 expected_events_list.append("'lifecycle' for %s:" " Stopped Saved")
             elif event == "restore":
                 if not os.path.exists(save_path):
                     logging.error("%s not exist", save_path)
                 else:
                     virsh.restore(save_path, **virsh_dargs)
                     expected_events_list.append("'lifecycle' for %s:" " Started Restored")
             elif event == "destroy":
                 virsh.destroy(vm_name, **virsh_dargs)
                 expected_events_list.append("'lifecycle' for %s:" " Stopped Destroyed")
             elif event == "reset":
                 virsh.reset(vm_name, **virsh_dargs)
                 expected_events_list.append("'reboot' for %s")
             elif event == "vcpupin":
                 virsh.vcpupin(vm_name, "0", "0", **virsh_dargs)
                 expected_events_list.append("'tunable' for %s:" "\n\tcputune.vcpupin0: 0")
             elif event == "emulatorpin":
                 virsh.emulatorpin(vm_name, "0", **virsh_dargs)
                 expected_events_list.append("'tunable' for %s:" "\n\tcputune.emulatorpin: 0")
             elif event == "setmem":
                 virsh.setmem(vm_name, 1048576, **virsh_dargs)
                 expected_events_list.append("'balloon-change' for %s:")
             elif event == "detach-disk":
                 if not os.path.exists(new_disk):
                     open(new_disk, "a").close()
                 # Attach disk firstly, this event will not be catched
                 virsh.attach_disk(vm_name, new_disk, "vdb", **virsh_dargs)
                 virsh.detach_disk(vm_name, "vdb", **virsh_dargs)
                 expected_events_list.append("'device-removed' for %s:" " virtio-disk1")
             else:
                 raise error.TestError("Unsupported event: %s" % event)
             # Event may not received immediately
             time.sleep(3)
     finally:
         if os.path.exists(save_path):
             os.unlink(save_path)
         if os.path.exists(new_disk):
             os.unlink(new_disk)
         return expected_events_list
コード例 #42
0
ファイル: virsh_domjobinfo.py プロジェクト: LeiCui/virt-test
def run_virsh_domjobinfo(test, params, env):
    """
    Test command: virsh domjobinfo.

    The command returns information about jobs running on a domain.
    1.Prepare test environment.
    2.When the libvirtd == "off", stop the libvirtd service.
    3.Perform virsh domjobinfo operation.
    4.Recover test environment.
    5.Confirm the test result.
    """
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)

    domid = vm.get_id()
    domuuid = vm.get_uuid()

    pre_vm_state = params.get("domjobinfo_pre_vm_state", "null")
    vm_ref = params.get("domjobinfo_vm_ref")
    status_error = params.get("status_error", "no")
    libvirtd = params.get("libvirtd", "on")
    tmp_file = os.path.join(test.tmpdir, '%s.tmp' % vm_name )

    #prepare the state of vm
    if pre_vm_state == "dump":
        virsh.dump(vm_name, tmp_file)
    elif pre_vm_state == "save":
        virsh.save(vm_name, tmp_file)
    elif pre_vm_state == "restore":
        virsh.save(vm_name, tmp_file)
        virsh.restore(tmp_file)
    elif pre_vm_state == "managedsave":
        virsh.managedsave(vm_name)

    #run test case
    if vm_ref == "id":
        vm_ref = domid
    elif vm_ref == "hex_id":
        vm_ref = hex(int(domid))
    elif vm_ref == "name":
        vm_ref = "%s %s" % (vm_name, params.get("domjobinfo_extra"))
    elif vm_ref == "uuid":
        vm_ref = domuuid
    elif vm_ref.find("invalid") != -1:
        vm_ref = params.get(vm_ref)

    if libvirtd == "off":
        utils_libvirtd.libvirtd_stop()

    status = virsh.domjobinfo(vm_ref, ignore_status=True).exit_status

    #recover libvirtd service start
    if libvirtd == "off":
        utils_libvirtd.libvirtd_start()

    #check status_error
    if status_error == "yes":
        if status == 0:
            raise error.TestFail("Run successfully with wrong command!")
    elif status_error == "no":
        if status != 0:
            raise error.TestFail("Run failed with right command")
コード例 #43
0
        logging.info("Trying to suspend/resume vm %s times", paused_times)
        while paused_times > 0:
            paused_times -= 1
            try:
                virsh.suspend(vm.name, debug=True, ignore_status=False)
                virsh.resume(vm.name, debug=True, ignore_status=False)
            except error.CmdError, detail:
                err_msg = "Suspend-Resume %s failed: %s" % (vm.name, detail)
    elif operation == "save_restore":
        save_times = int(params.get("save_times", 10))
        logging.info("Trying to save/restore vm %s times", save_times)
        while save_times > 0:
            save_times -= 1
            save_path = os.path.join(data_dir.get_tmp_dir(), "save.file")
            try:
                virsh.save(vm.name, save_path, debug=True,
                           ignore_status=False)
                virsh.restore(save_path, debug=True, ignore_status=False)
            except error.CmdError, detail:
                err_msg = "Save-Restore %s failed: %s" % (vm.name, detail)
            try:
                os.remove(save_path)
            except OSError:
                pass
    else:
        err_msg = "Unsupport operation in this function: %s" % operation
    return err_msg


def translate_timer_name(timer_name):
    """
    Translate timer name in XML to clock source name in VM.
コード例 #44
0
ファイル: guestpin.py プロジェクト: balamuruhans/tp-libvirt
    def set_condition(vm_name, condn, reset=False, guestbt=None):
        """
        Set domain to given state or reset it.
        """
        bt = None
        if not reset:
            if condn == "avocadotest":
                bt = utils_test.run_avocado_bg(vm, params, test)
                if not bt:
                    test.cancel("guest stress failed to start")
                # Allow stress to start
                time.sleep(condn_sleep_sec)
                return bt
            elif condn == "stress":
                utils_test.load_stress("stress_in_vms", params=params, vms=[vm])
            elif condn in ["save", "managedsave"]:
                # No action
                pass
            elif condn == "suspend":
                result = virsh.suspend(vm_name, ignore_status=True, debug=True)
                libvirt.check_exit_status(result)
            elif condn == "hotplug":
                result = virsh.setvcpus(vm_name, max_vcpu, "--live",
                                        ignore_status=True, debug=True)
                libvirt.check_exit_status(result)
                exp_vcpu = {'max_config': max_vcpu, 'max_live': max_vcpu,
                            'cur_config': current_vcpu, 'cur_live': max_vcpu,
                            'guest_live': max_vcpu}
                result = utils_hotplug.check_vcpu_value(vm, exp_vcpu,
                                                        option="--live")
            elif condn == "host_smt":
                if cpu.get_cpu_arch() == 'power9':
                    result = process.run("ppc64_cpu --smt=4", shell=True)
                else:
                    test.cancel("Host SMT changes not allowed during guest live")
            else:
                logging.debug("No operation for the domain")

        else:
            if condn == "save":
                save_file = os.path.join(data_dir.get_tmp_dir(), vm_name + ".save")
                result = virsh.save(vm_name, save_file,
                                    ignore_status=True, debug=True)
                libvirt.check_exit_status(result)
                time.sleep(condn_sleep_sec)
                if os.path.exists(save_file):
                    result = virsh.restore(save_file, ignore_status=True,
                                           debug=True)
                    libvirt.check_exit_status(result)
                    os.remove(save_file)
                else:
                    test.error("No save file for domain restore")
            elif condn == "managedsave":
                result = virsh.managedsave(vm_name,
                                           ignore_status=True, debug=True)
                libvirt.check_exit_status(result)
                time.sleep(condn_sleep_sec)
                result = virsh.start(vm_name, ignore_status=True, debug=True)
                libvirt.check_exit_status(result)
            elif condn == "suspend":
                result = virsh.resume(vm_name, ignore_status=True, debug=True)
                libvirt.check_exit_status(result)
            elif condn == "avocadotest":
                guestbt.join(ignore_status=True)
            elif condn == "stress":
                utils_test.unload_stress("stress_in_vms", params=params, vms=[vm])
            elif condn == "hotplug":
                result = virsh.setvcpus(vm_name, current_vcpu, "--live",
                                        ignore_status=True, debug=True)
                libvirt.check_exit_status(result)
                exp_vcpu = {'max_config': max_vcpu, 'max_live': current_vcpu,
                            'cur_config': current_vcpu, 'cur_live': current_vcpu,
                            'guest_live': current_vcpu}
                result = utils_hotplug.check_vcpu_value(vm, exp_vcpu,
                                                        option="--live")
            elif condn == "host_smt":
                result = process.run("ppc64_cpu --smt=2", shell=True)
                # Change back the host smt
                result = process.run("ppc64_cpu --smt=4", shell=True)
                # Work around due to known cgroup issue after cpu hot(un)plug
                # sequence
                root_cpuset_path = utils_cgroup.get_cgroup_mountpoint("cpuset")
                machine_cpuset_paths = []
                if os.path.isdir(os.path.join(root_cpuset_path,
                                              "machine.slice")):
                    machine_cpuset_paths.append(os.path.join(root_cpuset_path,
                                                             "machine.slice"))
                if os.path.isdir(os.path.join(root_cpuset_path, "machine")):
                    machine_cpuset_paths.append(os.path.join(root_cpuset_path,
                                                             "machine"))
                if not machine_cpuset_paths:
                    logging.warning("cgroup cpuset might not recover properly "
                                    "for guests after host smt changes, "
                                    "restore it manually")
                root_cpuset_cpus = os.path.join(root_cpuset_path, "cpuset.cpus")
                for path in machine_cpuset_paths:
                    machine_cpuset_cpus = os.path.join(path, "cpuset.cpus")
                    # check if file content differs
                    cmd = "diff %s %s" % (root_cpuset_cpus,
                                          machine_cpuset_cpus)
                    if process.system(cmd, verbose=True, ignore_status=True):
                        cmd = "cp %s %s" % (root_cpuset_cpus,
                                            machine_cpuset_cpus)
                        process.system(cmd, verbose=True)

            else:
                logging.debug("No need recover the domain")
        return bt
コード例 #45
0
            # Run command tiwce to make sure cdrom tray open first #BZ892289
            # Open tray
            virsh.attach_device(domainarg=vm_name, filearg=eject_xml, debug=True)
            # Add time sleep between two attach commands.
            if time_sleep:
                time.sleep(float(time_sleep))
            # Eject cdrom
            result = virsh.attach_device(domainarg=vm_name, filearg=eject_xml,
                                         debug=True)
            if result.exit_status != 0:
                raise error.TestFail("Eject CDROM failed")
            if vm_xml.VMXML.check_disk_exist(vm_name, device_source):
                raise error.TestFail("Find %s after do eject" % device_source)
        # Save and restore VM
        if save_vm:
            result = virsh.save(vm_name, save_file, debug=True)
            libvirt.check_exit_status(result)
            result = virsh.restore(save_file, debug=True)
            libvirt.check_exit_status(result)
            if vm_xml.VMXML.check_disk_exist(vm_name, device_source):
                raise error.TestFail("Find %s after do restore" % device_source)

        # Destroy VM.
        vm.destroy(gracefully=False)

        # Check disk count after VM shutdown (with --config).
        check_count_after_shutdown = True
        inactive_vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
        disk_count_after_shutdown = len(inactive_vmxml.get_disk_all())
        if test_cmd == "attach-disk":
            if disk_count_after_shutdown == disk_count_before_cmd:
コード例 #46
0
ファイル: virsh_restore.py プロジェクト: leidwang/tp-libvirt
def run(test, params, env):
    """
    Test command: virsh restore.

    Restore a domain from a saved state in a file
    1.Prepare test environment.
    2.When the libvirtd == "off", stop the libvirtd service.
    3.Run virsh restore command with assigned option.
    4.Recover test environment.
    5.Confirm the test result.
    """

    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    session = vm.wait_for_login()

    os_type = params.get("os_type")
    status_error = ("yes" == params.get("status_error"))
    libvirtd = params.get("libvirtd", "on")
    extra_param = params.get("restore_extra_param")
    pre_status = params.get("restore_pre_status")
    vm_ref = params.get("restore_vm_ref")
    uri = params.get("virsh_uri")
    unprivileged_user = params.get('unprivileged_user')
    time_before_save = int(params.get('time_before_save', 0))

    if unprivileged_user:
        if unprivileged_user.count('EXAMPLE'):
            unprivileged_user = '******'

    if not libvirt_version.version_compare(1, 1, 1):
        if params.get('setup_libvirt_polkit') == 'yes':
            test.cancel("API acl test not supported in current"
                        " libvirt version.")

    if "--xml" in extra_param:
        vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name, options="--migratable")
        backup_xml = vmxml.copy()
        # Grant more priveledge on the file in order for un-priveledge user
        # to access.
        os.chmod(vmxml.xml, stat.S_IRWXU | stat.S_IRGRP | stat.S_IROTH)
        extra_param = "--xml %s" % vmxml.xml
        dict_os_attrs = {}
        if "hd" in vmxml.os.boots:
            dict_os_attrs.update({"boots": ["cdrom"]})
            vmxml.set_os_attrs(**dict_os_attrs)
        else:
            test.cancel("Please add 'hd' in boots for --xml testing")
        logging.info("vmxml os is %s after update" % vmxml.os.xmltreefile)

    # run test
    if vm_ref == "" or vm_ref == "xyz":
        status = virsh.restore(vm_ref, extra_param, debug=True,
                               unprivileged_user=unprivileged_user,
                               uri=uri,
                               ignore_status=True).exit_status
    else:
        if os_type == "linux":
            cmd = "cat /proc/cpuinfo"
            try:
                status, output = session.cmd_status_output(cmd, timeout=10)
            finally:
                session.close()
            if not re.search("processor", output):
                test.fail("Unable to read /proc/cpuinfo")
        tmp_file = os.path.join(data_dir.get_tmp_dir(), "save.file")
        time.sleep(time_before_save)
        ret = virsh.save(vm_name, tmp_file, debug=True)
        libvirt.check_exit_status(ret)
        if vm_ref == "saved_file":
            vm_ref = tmp_file
        elif vm_ref == "empty_new_file":
            tmp_file = os.path.join(data_dir.get_tmp_dir(), "new.file")
            with open(tmp_file, 'w') as tmp:
                pass
            vm_ref = tmp_file
        if vm.is_alive():
            vm.destroy()
        if pre_status == "start":
            virsh.start(vm_name)
        if libvirtd == "off":
            utils_libvirtd.libvirtd_stop()
        status = virsh.restore(vm_ref, extra_param, debug=True,
                               unprivileged_user=unprivileged_user,
                               uri=uri,
                               ignore_status=True).exit_status
    if not status_error:
        list_output = virsh.dom_list().stdout.strip()

    session.close()

    # recover libvirtd service start
    if libvirtd == "off":
        utils_libvirtd.libvirtd_start()

    try:
        if status_error:
            if not status:
                if libvirtd == "off" and libvirt_version.version_compare(5, 6, 0):
                    logging.info("From libvirt version 5.6.0 libvirtd is restarted "
                                 "and command should succeed")
                else:
                    test.fail("Run successfully with wrong command!")
        else:
            if status:
                test.fail("Run failed with right command")
            if not re.search(vm_name, list_output):
                test.fail("Run failed with right command")
            if extra_param.count("paused"):
                if not vm.is_paused():
                    test.fail("Guest state should be"
                              " paused after restore"
                              " due to the option --paused")
            if (extra_param.count("running") or
                    extra_param.count("xml") or
                    not extra_param):
                if vm.is_dead() or vm.is_paused():
                    test.fail("Guest state should be"
                              " running after restore")
            if extra_param.count("xml"):
                aft_vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
                boots_list = aft_vmxml.os.boots
                if "hd" in boots_list or "cdrom" not in boots_list:
                    test.fail("Update xml with restore failed")
    finally:
        if vm.is_paused():
            virsh.resume(vm_name)
        if "--xml" in extra_param:
            backup_xml.sync()
コード例 #47
0
def run_virsh_save(test, params, env):
    """
    Test command: virsh save.

    The command can save the RAM state of a running domain.
    1.Prepare test environment.
    2.When the libvirtd == "off", stop the libvirtd service.
    3.Run virsh save command with assigned options.
    4.Recover test environment.(If the libvirtd service is stopped ,start
      the libvirtd service.)
    5.Confirm the test result.

    """
    vm_name = params.get("main_vm", "vm1")
    vm = env.get_vm(params["main_vm"])
    vm.verify_alive()

    domid = virsh.domid(vm_name).strip()
    domuuid = virsh.domuuid(vm_name).strip()

    savefile = params.get("save_file")
    pre_vm_state = params.get("save_pre_vm_state", "null")
    libvirtd = params.get("save_libvirtd")
    extra_param = params.get("save_extra_param")
    vm_ref = params.get("save_vm_ref")

    # prepare the environment
    if vm_ref == "name" and pre_vm_state == "paused":
        virsh.suspend(vm_name)
    elif vm_ref == "name" and pre_vm_state == "shut off":
        virsh.destroy(vm_name)

    # set the option
    if vm_ref == "id":
        vm_ref = domid
    elif vm_ref == "hex_id":
        vm_ref = hex(int(domid))
    elif vm_ref == "uuid":
        vm_ref = domuuid
    elif vm_ref == "save_invalid_id" or vm_ref == "save_invalid_uuid":
        vm_ref = params.get(vm_ref)
    elif vm_ref.find("name") != -1 or vm_ref == "extra_param":
        savefile = "%s %s" % (savefile, extra_param)
        if vm_ref == "only_name":
            savefile = " "
        vm_ref = vm_name

    if libvirtd == "off":
        libvirt_vm.libvirtd_stop()
    status = virsh.save(vm_ref, savefile, ignore_status=True).exit_status

    # recover libvirtd service start
    if libvirtd == "off":
        libvirt_vm.libvirtd_start()

    # cleanup
    if os.path.exists(savefile):
        virsh.restore(savefile)
        os.remove(savefile)

    # check status_error
    status_error = params.get("save_status_error")
    if status_error == "yes":
        if status == 0:
            raise error.TestFail("Run successfully with wrong command!")
    elif status_error == "no":
        if status != 0:
            raise error.TestFail("Run failed with right command")
コード例 #48
0
ファイル: virsh_save.py プロジェクト: yalzhang/tp-libvirt
def run(test, params, env):
    """
    Test command: virsh save.

    The command can save the RAM state of a running domain.
    1.Prepare test environment.
    2.When the libvirtd == "off", stop the libvirtd service.
    3.Run virsh save command with assigned options.
    4.Recover test environment.(If the libvirtd service is stopped ,start
      the libvirtd service.)
    5.Confirm the test result.

    """
    savefile = params.get("save_file", "save.file")
    if savefile:
        savefile = os.path.join(data_dir.get_tmp_dir(), savefile)
    libvirtd = params.get("libvirtd", "on")
    extra_param = params.get("save_extra_param")
    vm_ref = params.get("save_vm_ref")
    progress = ("yes" == params.get("save_progress", "no"))
    options = params.get("save_option", "")
    status_error = ("yes" == params.get("save_status_error", "yes"))
    vm_name = params.get("main_vm", "avocado-vt-vm1")
    vm = env.get_vm(vm_name)
    uri = params.get("virsh_uri")
    readonly = ("yes" == params.get("save_readonly", "no"))
    expect_msg = params.get("save_err_msg", "")
    unprivileged_user = params.get('unprivileged_user')
    if unprivileged_user:
        if unprivileged_user.count('EXAMPLE'):
            unprivileged_user = '******'

    if not libvirt_version.version_compare(1, 1, 1):
        if params.get('setup_libvirt_polkit') == 'yes':
            test.cancel("API acl test not supported in current"
                        " libvirt version.")

    domid = vm.get_id()
    domuuid = vm.get_uuid()

    # set the option
    if vm_ref == "id":
        vm_ref = domid
    elif vm_ref == "hex_id":
        vm_ref = hex(int(domid))
    elif vm_ref == "uuid":
        vm_ref = domuuid
    elif vm_ref.count("invalid"):
        vm_ref = params.get(vm_ref)
    elif vm_ref.count("name"):
        vm_ref = vm_name
    vm_ref += (" %s" % extra_param)

    if libvirtd == "off":
        utils_libvirtd.libvirtd_stop()

    if progress:
        options += " --verbose"
    result = virsh.save(vm_ref,
                        savefile,
                        options,
                        ignore_status=True,
                        unprivileged_user=unprivileged_user,
                        uri=uri,
                        debug=True,
                        readonly=readonly)
    status = result.exit_status
    err_msg = result.stderr.strip()

    # recover libvirtd service start
    if libvirtd == "off":
        utils_libvirtd.libvirtd_start()

    if savefile:
        virsh.restore(savefile, debug=True)

    # check status_error
    try:
        if status_error:
            if not status:
                if libvirtd == "off" and libvirt_version.version_compare(
                        5, 6, 0):
                    logging.info(
                        "From libvirt version 5.6.0 libvirtd is restarted "
                        "and command should succeed")
                else:
                    test.fail("virsh run succeeded with an "
                              "incorrect command")
            if readonly:
                if not re.search(expect_msg, err_msg):
                    test.fail("Fail to get expect err msg: %s" % expect_msg)
        else:
            if status:
                test.fail("virsh run failed with a " "correct command")
            if progress and not err_msg.count("Save:"):
                test.fail("No progress information outputted!")
            if options.count("running"):
                if vm.is_dead() or vm.is_paused():
                    test.fail("Guest state should be"
                              " running after restore"
                              " due to the option --running")
            elif options.count("paused"):
                if not vm.is_paused():
                    test.fail("Guest state should be"
                              " paused after restore"
                              " due to the option --paused")
            else:
                if vm.is_dead():
                    test.fail("Guest state should be"
                              " alive after restore"
                              " since no option was specified")
    finally:
        if vm.is_paused():
            virsh.resume(vm_name)
コード例 #49
0
def run(test, params, env):
    """
    Attach/Detach an iscsi network/volume disk to domain

    1. For secret usage testing:
        1.1. Setup an iscsi target with CHAP authentication.
        1.2. Define a secret for iscsi target usage
        1.3. Set secret value
    2. Create
    4. Create an iscsi network disk XML
    5. Attach disk with the XML file and check the disk inside the VM
    6. Detach the disk
    """
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)

    disk_device = params.get("disk_device", "disk")
    disk_type = params.get("disk_type", "network")
    disk_src_protocol = params.get("disk_source_protocol", "iscsi")
    disk_src_host = params.get("disk_source_host", "127.0.0.1")
    disk_src_port = params.get("disk_source_port", "3260")
    disk_src_pool = params.get("disk_source_pool")
    disk_src_mode = params.get("disk_source_mode", "host")
    pool_type = params.get("pool_type", "iscsi")
    pool_src_host = params.get("pool_source_host", "127.0.0.1")
    disk_target = params.get("disk_target", "vdb")
    disk_target_bus = params.get("disk_target_bus", "virtio")
    disk_readonly = params.get("disk_readonly", "no")
    chap_auth = "yes" == params.get("chap_auth", "no")
    chap_user = params.get("chap_username", "")
    chap_passwd = params.get("chap_password", "")
    secret_usage_target = params.get("secret_usage_target")
    secret_ephemeral = params.get("secret_ephemeral", "no")
    secret_private = params.get("secret_private", "yes")
    status_error = "yes" == params.get("status_error", "no")
    # Indicate the PPC platform
    on_ppc = False
    if platform.platform().count('ppc64'):
        on_ppc = True

    if disk_src_protocol == 'iscsi':
        if not libvirt_version.version_compare(1, 0, 4):
            raise error.TestNAError("'iscsi' disk doesn't support in"
                                    " current libvirt version.")
    if disk_type == "volume":
        if not libvirt_version.version_compare(1, 0, 5):
            raise error.TestNAError("'volume' type disk doesn't support in"
                                    " current libvirt version.")
    # Back VM XML
    vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)

    virsh_dargs = {'debug': True, 'ignore_status': True}
    try:
        if chap_auth:
            # Create a secret xml to define it
            secret_xml = SecretXML(secret_ephemeral, secret_private)
            secret_xml.auth_type = "chap"
            secret_xml.auth_username = chap_user
            secret_xml.usage = disk_src_protocol
            secret_xml.target = secret_usage_target
            logging.debug("Define secret by XML: %s",
                          open(secret_xml.xml).read())
            # Define secret
            cmd_result = virsh.secret_define(secret_xml.xml, **virsh_dargs)
            libvirt.check_exit_status(cmd_result)
            # Get secret uuid
            try:
                secret_uuid = cmd_result.stdout.strip().split()[1]
            except IndexError:
                raise error.TestError("Fail to get new created secret uuid")

            # Set secret value
            secret_string = base64.b64encode(chap_passwd)
            cmd_result = virsh.secret_set_value(secret_uuid, secret_string,
                                                **virsh_dargs)
            libvirt.check_exit_status(cmd_result)
        else:
            # Set chap_user and chap_passwd to empty to avoid setup
            # CHAP authentication when export iscsi target
            chap_user = ""
            chap_passwd = ""

        # Setup iscsi target
        iscsi_target, lun_num = libvirt.setup_or_cleanup_iscsi(
            is_setup=True,
            is_login=False,
            image_size='1G',
            chap_user=chap_user,
            chap_passwd=chap_passwd,
            portal_ip=disk_src_host)
        # Create iscsi pool
        if disk_type == "volume":
            # Create an iscsi pool xml to create it
            pool_src_xml = pool_xml.SourceXML()
            pool_src_xml.host_name = pool_src_host
            pool_src_xml.device_path = iscsi_target
            poolxml = pool_xml.PoolXML(pool_type=pool_type)
            poolxml.name = disk_src_pool
            poolxml.set_source(pool_src_xml)
            poolxml.target_path = "/dev/disk/by-path"
            # Create iscsi pool
            cmd_result = virsh.pool_create(poolxml.xml, **virsh_dargs)
            libvirt.check_exit_status(cmd_result)

            def get_vol():
                """Get the volume info"""
                # Refresh the pool
                cmd_result = virsh.pool_refresh(disk_src_pool)
                libvirt.check_exit_status(cmd_result)
                # Get volume name
                cmd_result = virsh.vol_list(disk_src_pool, **virsh_dargs)
                libvirt.check_exit_status(cmd_result)
                vol_list = []
                vol_list = re.findall(r"(\S+)\ +(\S+)[\ +\n]",
                                      str(cmd_result.stdout))
                if len(vol_list) > 1:
                    return vol_list[1]
                else:
                    return None

            # Wait for a while so that we can get the volume info
            vol_info = utils_misc.wait_for(get_vol, 10)
            if vol_info:
                vol_name, vol_path = vol_info
            else:
                raise error.TestError("Failed to get volume info")
            # Snapshot doesn't support raw disk format, create a qcow2 volume
            # disk for snapshot operation.
            process.run('qemu-img create -f qcow2 %s %s' % (vol_path, '100M'),
                        shell=True)

        # Create iscsi network disk XML
        disk_params = {
            'device_type': disk_device,
            'type_name': disk_type,
            'target_dev': disk_target,
            'target_bus': disk_target_bus,
            'readonly': disk_readonly
        }
        disk_params_src = {}
        if disk_type == "network":
            disk_params_src = {
                'source_protocol': disk_src_protocol,
                'source_name': iscsi_target + "/%s" % lun_num,
                'source_host_name': disk_src_host,
                'source_host_port': disk_src_port
            }
        elif disk_type == "volume":
            disk_params_src = {
                'source_pool': disk_src_pool,
                'source_volume': vol_name,
                'driver_type': 'qcow2',
                'source_mode': disk_src_mode
            }
        else:
            error.TestNAError("Unsupport disk type in this test")
        disk_params.update(disk_params_src)
        if chap_auth:
            disk_params_auth = {
                'auth_user': chap_user,
                'secret_type': disk_src_protocol,
                'secret_usage': secret_xml.target
            }
            disk_params.update(disk_params_auth)
        disk_xml = libvirt.create_disk_xml(disk_params)

        start_vm = "yes" == params.get("start_vm", "yes")
        if start_vm:
            if vm.is_dead():
                vm.start()
            vm.wait_for_login()
        else:
            if not vm.is_dead():
                vm.destroy()
        attach_option = params.get("attach_option", "")
        disk_xml_f = open(disk_xml)
        disk_xml_content = disk_xml_f.read()
        disk_xml_f.close()
        logging.debug("Attach disk by XML: %s", disk_xml_content)
        cmd_result = virsh.attach_device(domainarg=vm_name,
                                         filearg=disk_xml,
                                         flagstr=attach_option,
                                         dargs=virsh_dargs)
        libvirt.check_exit_status(cmd_result, status_error)

        if vm.is_dead():
            cmd_result = virsh.start(vm_name, **virsh_dargs)
            libvirt.check_exit_status(cmd_result)

        # Wait for domain is stable
        vm.wait_for_login().close()
        domain_operation = params.get("domain_operation", "")
        if domain_operation == "save":
            save_file = os.path.join(test.tmpdir, "vm.save")
            cmd_result = virsh.save(vm_name, save_file, **virsh_dargs)
            libvirt.check_exit_status(cmd_result)
            cmd_result = virsh.restore(save_file)
            libvirt.check_exit_status(cmd_result)
            if os.path.exists(save_file):
                os.remove(save_file)
        elif domain_operation == "snapshot":
            # Run snapshot related commands: snapshot-create-as, snapshot-list
            # snapshot-info, snapshot-dumpxml, snapshot-create
            snapshot_name1 = "snap1"
            snapshot_name2 = "snap2"
            cmd_result = virsh.snapshot_create_as(vm_name, snapshot_name1,
                                                  **virsh_dargs)
            libvirt.check_exit_status(cmd_result)

            try:
                virsh.snapshot_list(vm_name, **virsh_dargs)
            except process.CmdError:
                error.TestFail("Failed getting snapshots list for %s" %
                               vm_name)

            try:
                virsh.snapshot_info(vm_name, snapshot_name1, **virsh_dargs)
            except process.CmdError:
                error.TestFail("Failed getting snapshots info for %s" %
                               vm_name)

            cmd_result = virsh.snapshot_dumpxml(vm_name, snapshot_name1,
                                                **virsh_dargs)
            libvirt.check_exit_status(cmd_result)

            cmd_result = virsh.snapshot_create(vm_name, **virsh_dargs)
            libvirt.check_exit_status(cmd_result)

            cmd_result = virsh.snapshot_current(vm_name, **virsh_dargs)
            libvirt.check_exit_status(cmd_result)

            snapshot_file = os.path.join(test.tmpdir, snapshot_name2)
            sn_create_op = ("%s --disk-only --diskspec %s,file=%s" %
                            (snapshot_name2, disk_target, snapshot_file))
            cmd_result = virsh.snapshot_create_as(vm_name, sn_create_op,
                                                  **virsh_dargs)

            libvirt.check_exit_status(cmd_result)
            cmd_result = virsh.snapshot_revert(vm_name, snapshot_name1,
                                               **virsh_dargs)

            cmd_result = virsh.snapshot_list(vm_name, **virsh_dargs)
            if snapshot_name2 not in cmd_result:
                raise error.TestError("Snapshot %s not found" % snapshot_name2)

        else:
            logging.error("Unsupport operation %s in this case, so skip it",
                          domain_operation)

        def find_attach_disk(expect=True):
            """
            Find attached disk inside the VM
            """
            found_disk = False
            if vm.is_dead():
                raise error.TestError("Domain %s is not running" % vm_name)
            else:
                try:
                    session = vm.wait_for_login()
                    # Here the script needs wait for a while for the guest to
                    # recognize the hotplugged disk on PPC
                    if on_ppc:
                        time.sleep(10)
                    cmd = "grep %s /proc/partitions" % disk_target
                    s, o = session.cmd_status_output(cmd)
                    logging.info("%s output: %s", cmd, o)
                    session.close()
                    if s == 0:
                        found_disk = True
                except (LoginError, VMError, ShellError), e:
                    logging.error(str(e))
            if found_disk == expect:
                logging.debug("Check disk inside the VM PASS as expected")
            else:
                raise error.TestError("Check disk inside the VM FAIL")

        # Check disk inside the VM, expect is False if status_error=True
        find_attach_disk(not status_error)

        # Detach disk
        cmd_result = virsh.detach_disk(vm_name, disk_target)
        libvirt.check_exit_status(cmd_result, status_error)

        # Check disk inside the VM
        find_attach_disk(False)
コード例 #50
0
def run(test, params, env):
    """
    Test command: virsh domcontrol.

    The command can show the state of a control interface to the domain.
    1.Prepare test environment, destroy or suspend a VM.
    2.Do action to get a subprocess(dump, save, restore, managedsave) if
      domcontrol_job is set as yes.
    3.Perform virsh domcontrol to check state of a control interface to the
      domain.
    4.Recover the VM's status and wait for the subprocess over.
    5.Confirm the test result.
    """

    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    start_vm = params.get("start_vm")
    pre_vm_state = params.get("pre_vm_state", "running")
    options = params.get("domcontrol_options", "")
    action = params.get("domcontrol_action", "dump")
    tmp_file = os.path.join(test.tmpdir, "domcontrol.tmp")
    vm_ref = params.get("domcontrol_vm_ref")
    job = params.get("domcontrol_job", "yes")
    readonly = "yes" == params.get("readonly", "no")
    status_error = params.get("status_error", "no")
    if start_vm == "no" and vm.is_alive():
        vm.destroy()

    # Instead of "paused_after_start_vm", use "pre_vm_state".
    # After start the VM, wait for some time to make sure the job
    # can be created on this domain.
    if start_vm == "yes":
        vm.wait_for_login()
        if params.get("pre_vm_state") == "suspend":
            vm.pause()

    domid = vm.get_id()
    domuuid = vm.get_uuid()

    if vm_ref == "id":
        vm_ref = domid
    elif vm_ref == "hex_id":
        vm_ref = hex(int(domid))
    elif vm_ref == "uuid":
        vm_ref = domuuid
    elif vm_ref.find("invalid") != -1:
        vm_ref = params.get(vm_ref)
    elif vm_ref == "name":
        vm_ref = vm_name

    if action == "managedsave":
        tmp_file = '/var/lib/libvirt/qemu/save/%s.save' % vm.name

    if action == "restore":
        virsh.save(vm_name, tmp_file, ignore_status=True)

    process = None
    if job == "yes" and start_vm == "yes" and status_error == "no":
        # Check domain contorl interface state with job on domain.
        process = get_subprocess(action, vm_name, tmp_file)
        while process.poll() is None:
            if vm.is_alive():
                ret = virsh.domcontrol(vm_ref, options, ignore_status=True,
                                       debug=True)
                status = ret.exit_status
                # check status_error
                if status != 0:
                    # Do not raise error if domain is not running, as save,
                    # managedsave and restore will change the domain state
                    # from running to shutoff or reverse, and the timing of
                    # the state change is not predicatable, so skip the error
                    # of domain state change and focus on domcontrol command
                    # status while domain is running.
                    if vm.is_alive():
                        raise error.TestFail("Run failed with right command")
    else:
        # Check domain contorl interface state without job on domain.
        ret = virsh.domcontrol(vm_ref, options, readonly=readonly,
                               ignore_status=True, debug=True)
        status = ret.exit_status

        # check status_error
        if status_error == "yes":
            if status == 0:
                raise error.TestFail("Run successfully with wrong command!")
        elif status_error == "no":
            if status != 0:
                raise error.TestFail("Run failed with right command")

    # Recover the environment.
    if action == "managedsave":
        virsh.managedsave_remove(vm_name, ignore_status=True)
    if os.path.exists(tmp_file):
        os.unlink(tmp_file)
    if pre_vm_state == "suspend":
        vm.resume()
    if process:
        if process.poll() is None:
            process.kill()
コード例 #51
0
def run(test, params, env):
    """
    Attach/Detach an iscsi network/volume disk to domain

    1. For secret usage testing:
        1.1. Setup an iscsi target with CHAP authentication.
        1.2. Define a secret for iscsi target usage
        1.3. Set secret value
    2. Create
    4. Create an iscsi network disk XML
    5. Attach disk with the XML file and check the disk inside the VM
    6. Detach the disk
    """
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)

    disk_device = params.get("disk_device", "disk")
    disk_type = params.get("disk_type", "network")
    disk_src_protocol = params.get("disk_source_protocol", "iscsi")
    disk_src_host = params.get("disk_source_host", "127.0.0.1")
    disk_src_port = params.get("disk_source_port", "3260")
    disk_src_pool = params.get("disk_source_pool")
    disk_src_mode = params.get("disk_source_mode", "host")
    pool_type = params.get("pool_type", "iscsi")
    pool_src_host = params.get("pool_source_host", "127.0.0.1")
    disk_target = params.get("disk_target", "vdb")
    disk_target_bus = params.get("disk_target_bus", "virtio")
    disk_readonly = params.get("disk_readonly", "no")
    chap_auth = "yes" == params.get("chap_auth", "no")
    chap_user = params.get("chap_username", "")
    chap_passwd = params.get("chap_password", "")
    secret_usage_target = params.get("secret_usage_target")
    secret_ephemeral = params.get("secret_ephemeral", "no")
    secret_private = params.get("secret_private", "yes")
    status_error = "yes" == params.get("status_error", "no")
    # Indicate the PPC platform
    on_ppc = False
    if platform.platform().count('ppc64'):
        on_ppc = True

    if disk_src_protocol == 'iscsi':
        if not libvirt_version.version_compare(1, 0, 4):
            test.cancel("'iscsi' disk doesn't support in"
                        " current libvirt version.")
    if disk_type == "volume":
        if not libvirt_version.version_compare(1, 0, 5):
            test.cancel("'volume' type disk doesn't support in"
                        " current libvirt version.")
    # Back VM XML
    vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)

    virsh_dargs = {'debug': True, 'ignore_status': True}
    try:
        start_vm = "yes" == params.get("start_vm", "yes")
        if start_vm:
            if vm.is_dead():
                vm.start()
            vm.wait_for_login()
        else:
            if not vm.is_dead():
                vm.destroy()

        if chap_auth:
            # Create a secret xml to define it
            secret_xml = SecretXML(secret_ephemeral, secret_private)
            secret_xml.auth_type = "chap"
            secret_xml.auth_username = chap_user
            secret_xml.usage = disk_src_protocol
            secret_xml.target = secret_usage_target
            with open(secret_xml.xml) as f:
                logging.debug("Define secret by XML: %s", f.read())
            # Define secret
            cmd_result = virsh.secret_define(secret_xml.xml, **virsh_dargs)
            libvirt.check_exit_status(cmd_result)
            # Get secret uuid
            try:
                secret_uuid = cmd_result.stdout.strip().split()[1]
            except IndexError:
                test.error("Fail to get new created secret uuid")

            # Set secret value
            encoding = locale.getpreferredencoding()
            secret_string = base64.b64encode(chap_passwd.encode(encoding)).decode(encoding)
            cmd_result = virsh.secret_set_value(secret_uuid, secret_string,
                                                **virsh_dargs)
            libvirt.check_exit_status(cmd_result)
        else:
            # Set chap_user and chap_passwd to empty to avoid setup
            # CHAP authentication when export iscsi target
            chap_user = ""
            chap_passwd = ""

        # Setup iscsi target
        iscsi_target, lun_num = libvirt.setup_or_cleanup_iscsi(is_setup=True,
                                                               is_login=False,
                                                               image_size='1G',
                                                               chap_user=chap_user,
                                                               chap_passwd=chap_passwd,
                                                               portal_ip=disk_src_host)
        # Create iscsi pool
        if disk_type == "volume":
            # Create an iscsi pool xml to create it
            pool_src_xml = pool_xml.SourceXML()
            pool_src_xml.host_name = pool_src_host
            pool_src_xml.device_path = iscsi_target
            poolxml = pool_xml.PoolXML(pool_type=pool_type)
            poolxml.name = disk_src_pool
            poolxml.set_source(pool_src_xml)
            poolxml.target_path = "/dev/disk/by-path"
            # Create iscsi pool
            cmd_result = virsh.pool_create(poolxml.xml, **virsh_dargs)
            libvirt.check_exit_status(cmd_result)

            def get_vol():
                """Get the volume info"""
                # Refresh the pool
                cmd_result = virsh.pool_refresh(disk_src_pool)
                libvirt.check_exit_status(cmd_result)
                # Get volume name
                cmd_result = virsh.vol_list(disk_src_pool, **virsh_dargs)
                libvirt.check_exit_status(cmd_result)
                vol_list = []
                vol_list = re.findall(r"(\S+)\ +(\S+)",
                                      str(cmd_result.stdout.strip()))
                if len(vol_list) > 1:
                    return vol_list[1]
                else:
                    return None

            # Wait for a while so that we can get the volume info
            vol_info = utils_misc.wait_for(get_vol, 10)
            if vol_info:
                vol_name, vol_path = vol_info
            else:
                test.error("Failed to get volume info")
            # Snapshot doesn't support raw disk format, create a qcow2 volume
            # disk for snapshot operation.
            process.run('qemu-img create -f qcow2 %s %s' % (vol_path, '100M'),
                        shell=True)

        # Create iscsi network disk XML
        disk_params = {'device_type': disk_device,
                       'type_name': disk_type,
                       'target_dev': disk_target,
                       'target_bus': disk_target_bus,
                       'readonly': disk_readonly}
        disk_params_src = {}
        if disk_type == "network":
            disk_params_src = {'source_protocol': disk_src_protocol,
                               'source_name': iscsi_target + "/%s" % lun_num,
                               'source_host_name': disk_src_host,
                               'source_host_port': disk_src_port}
        elif disk_type == "volume":
            disk_params_src = {'source_pool': disk_src_pool,
                               'source_volume': vol_name,
                               'driver_type': 'qcow2',
                               'source_mode': disk_src_mode}
        else:
            test.cancel("Unsupport disk type in this test")
        disk_params.update(disk_params_src)
        if chap_auth:
            disk_params_auth = {'auth_user': chap_user,
                                'secret_type': disk_src_protocol,
                                'secret_usage': secret_xml.target}
            disk_params.update(disk_params_auth)
        disk_xml = libvirt.create_disk_xml(disk_params)
        attach_option = params.get("attach_option", "")
        cmd_result = virsh.attach_device(domainarg=vm_name, filearg=disk_xml,
                                         flagstr=attach_option,
                                         dargs=virsh_dargs)
        libvirt.check_exit_status(cmd_result, status_error)

        if vm.is_dead():
            cmd_result = virsh.start(vm_name, **virsh_dargs)
            libvirt.check_exit_status(cmd_result)

        # Wait for domain is stable
        vm.wait_for_login().close()
        domain_operation = params.get("domain_operation", "")
        if domain_operation == "save":
            save_file = os.path.join(data_dir.get_tmp_dir(), "vm.save")
            cmd_result = virsh.save(vm_name, save_file, **virsh_dargs)
            libvirt.check_exit_status(cmd_result)
            cmd_result = virsh.restore(save_file)
            libvirt.check_exit_status(cmd_result)
            if os.path.exists(save_file):
                os.remove(save_file)
        elif domain_operation == "snapshot":
            # Run snapshot related commands: snapshot-create-as, snapshot-list
            # snapshot-info, snapshot-dumpxml, snapshot-create
            snapshot_name1 = "snap1"
            snapshot_name2 = "snap2"
            cmd_result = virsh.snapshot_create_as(vm_name, snapshot_name1,
                                                  **virsh_dargs)
            libvirt.check_exit_status(cmd_result)

            try:
                virsh.snapshot_list(vm_name, **virsh_dargs)
            except process.CmdError:
                test.fail("Failed getting snapshots list for %s" % vm_name)

            try:
                virsh.snapshot_info(vm_name, snapshot_name1, **virsh_dargs)
            except process.CmdError:
                test.fail("Failed getting snapshots info for %s" % vm_name)

            cmd_result = virsh.snapshot_dumpxml(vm_name, snapshot_name1,
                                                **virsh_dargs)
            libvirt.check_exit_status(cmd_result)

            cmd_result = virsh.snapshot_create(vm_name, **virsh_dargs)
            libvirt.check_exit_status(cmd_result)

            cmd_result = virsh.snapshot_current(vm_name, **virsh_dargs)
            libvirt.check_exit_status(cmd_result)

            snapshot_file = os.path.join(data_dir.get_tmp_dir(), snapshot_name2)
            sn_create_op = ("%s --disk-only --diskspec %s,file=%s"
                            % (snapshot_name2, disk_target, snapshot_file))
            cmd_result = virsh.snapshot_create_as(vm_name, sn_create_op,
                                                  **virsh_dargs)

            libvirt.check_exit_status(cmd_result)
            cmd_result = virsh.snapshot_revert(vm_name, snapshot_name1,
                                               **virsh_dargs)

            cmd_result = virsh.snapshot_list(vm_name, **virsh_dargs)
            if snapshot_name2 not in cmd_result:
                test.error("Snapshot %s not found" % snapshot_name2)
        elif domain_operation == "":
            logging.debug("No domain operation provided, so skip it")
        else:
            logging.error("Unsupport operation %s in this case, so skip it",
                          domain_operation)

        def find_attach_disk(expect=True):
            """
            Find attached disk inside the VM
            """
            found_disk = False
            if vm.is_dead():
                test.error("Domain %s is not running" % vm_name)
            else:
                try:
                    session = vm.wait_for_login()
                    # Here the script needs wait for a while for the guest to
                    # recognize the hotplugged disk on PPC
                    if on_ppc:
                        time.sleep(10)
                    cmd = "grep %s /proc/partitions" % disk_target
                    s, o = session.cmd_status_output(cmd)
                    logging.info("%s output: %s", cmd, o)
                    session.close()
                    if s == 0:
                        found_disk = True
                except (LoginError, VMError, ShellError) as e:
                    logging.error(str(e))
            if found_disk == expect:
                logging.debug("Check disk inside the VM PASS as expected")
            else:
                test.error("Check disk inside the VM FAIL")

        # Check disk inside the VM, expect is False if status_error=True
        find_attach_disk(not status_error)

        # Detach disk
        cmd_result = virsh.detach_disk(vm_name, disk_target)
        libvirt.check_exit_status(cmd_result, status_error)

        # Check disk inside the VM
        find_attach_disk(False)

    finally:
        # Clean up snapshot
        # Shut down before cleaning up snapshots
        if vm.is_alive():
            vm.destroy()
        libvirt.clean_up_snapshots(vm_name, domxml=vmxml_backup)
        # Restore vm
        vmxml_backup.sync("--snapshots-metadata")
        # Destroy pool and undefine secret, which may not exist
        try:
            if disk_type == "volume":
                virsh.pool_destroy(disk_src_pool)
            if chap_auth:
                virsh.secret_undefine(secret_uuid)
        except Exception:
            pass
        libvirt.setup_or_cleanup_iscsi(is_setup=False)
コード例 #52
0
def run(test, params, env):
    """
    Test command: virsh save-image-dumpxml <file>
                  virsh save-image-define <file> [--xml <string>]

    1) Prepare test environment.
    2) Execute save-image-dumpxml to get VM xml description
    3) Edit the xml string and call virsh save-image-define to redefine it
    4) Restore VM
    5) Check the new xml of the VM
    """

    def get_image_xml():
        # Invoke save-image-dumpxml
        cmd_result = virsh.save_image_dumpxml(vm_save, debug=True)
        if cmd_result.exit_status:
            raise error.TestFail("Failed to dump xml from "
                                 "saved state file:%s" % vm_save)

        xml = cmd_result.stdout.strip()

        match_string = "<name>%s</name>" % vm_name
        if not re.search(match_string, xml):
            raise error.TestFail("The xml from saved state file "
                                 "is invalid")
        return xml

    def redefine_new_xml():
        if restore_state == "running":
            option = "--running"
        elif restore_state == "paused":
            option = "--paused"
        else:
            raise error.TestFail("Unknown save-image-define option")

        cmd_result = virsh.save_image_define(vm_save, xmlfile, option,
                                             debug=True)
        if cmd_result.exit_status:
            raise error.TestFail("Failed to redefine new xml %s for %s" %
                                 (xmlfile, vm_save))

    def vm_state_check():
        cmd_result = virsh.dumpxml(vm_name, debug=True)
        if cmd_result.exit_status:
            raise error.TestFail("Failed to dump xml of domain %s" % vm_name)

        # The xml should contain the match_string
        xml = cmd_result.stdout.strip()
        match_string = "<boot dev='cdrom'/>"
        if not re.search(match_string, xml):
            raise error.TestFail("After domain restore, "
                                 "the xml is not expected")

        domstate = virsh.domstate(vm_name, debug=True).stdout.strip()
        if restore_state != domstate:
            raise error.TestFail("The domain state is not expected")

    # MAIN TEST CODE ###
    # Process cartesian parameters
    vm_name = params.get("main_vm")

    restore_state = params.get("restore_state", "running")
    vm_save = params.get("vm_save", "vm.save")

    try:
        # Get a tmp_dir.
        tmp_dir = data_dir.get_tmp_dir()

        if os.path.dirname(vm_save) is "":
            vm_save = os.path.join(tmp_dir, vm_save)

        # Save the RAM state of a running domain
        cmd_result = virsh.save(vm_name, vm_save, debug=True)
        if cmd_result.exit_status:
            raise error.TestFail("Failed to save running domain %s" % vm_name)

        xml = get_image_xml()

        # Replace <boot dev='hd'/> to <boot dev='cdrom'/>
        newxml = xml.replace("<boot dev='hd'/>", "<boot dev='cdrom'/>")
        logging.debug("After string replacement, the new xml is %s", newxml)

        # Write new xml into a tempfile
        tmp_file = tempfile.NamedTemporaryFile(prefix=("new_vm_xml_"),
                                               dir=tmp_dir)
        xmlfile = tmp_file.name
        tmp_file.close()

        fd = open(xmlfile, 'w')
        fd.write(newxml)
        fd.close()

        # Redefine new xml for domain's saved state file
        redefine_new_xml()

        # Restore domain
        cmd_result = virsh.restore(vm_save, debug=True)
        if cmd_result.exit_status:
            raise error.TestFail("Failed to restore domain %s" % vm_name)
        os.remove(vm_save)

        vm_state_check()

    finally:
        # cleanup
        if restore_state == "paused":
            virsh.resume(vm_name)

        if os.path.exists(vm_save):
            virsh.restore(vm_save)
            os.remove(vm_save)

        if os.path.exists(xmlfile):
            os.remove(xmlfile)
コード例 #53
0
def run(test, params, env):
    """
    Test command: virsh restore.

    Restore a domain from a saved state in a file
    1.Prepare test environment.
    2.When the libvirtd == "off", stop the libvirtd service.
    3.Run virsh restore command with assigned option.
    4.Recover test environment.
    5.Confirm the test result.
    """
    def check_file_own(file_path, exp_uid, exp_gid):
        """
        Check the uid and gid of file_path

        :param file_path: The file path
        :param exp_uid: The expected uid
        :param exp_gid: The expected gid
        :raise: test.fail if the uid and gid of file are not expected
        """
        fstat_res = os.stat(file_path)
        if fstat_res.st_uid != exp_uid or fstat_res.st_gid != exp_gid:
            test.fail("The uid.gid {}.{} is not expected, it should be {}.{}.".
                      format(fstat_res.st_uid, fstat_res.st_gid, exp_uid,
                             exp_gid))

    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)

    os_type = params.get("os_type")
    status_error = ("yes" == params.get("status_error"))
    libvirtd = params.get("libvirtd", "on")
    extra_param = params.get("restore_extra_param")
    pre_status = params.get("restore_pre_status")
    vm_ref = params.get("restore_vm_ref")
    uri = params.get("virsh_uri")
    unprivileged_user = params.get('unprivileged_user')
    time_before_save = int(params.get('time_before_save', 0))
    setup_nfs = "yes" == params.get("setup_nfs", "no")
    setup_iscsi = "yes" == params.get("setup_iscsi", "no")
    check_log = params.get("check_log")
    check_str_not_in_log = params.get("check_str_not_in_log")
    qemu_conf_dict = eval(params.get("qemu_conf_dict", "{}"))

    vm_ref_uid = None
    vm_ref_gid = None
    qemu_conf = None

    if unprivileged_user:
        if unprivileged_user.count('EXAMPLE'):
            unprivileged_user = '******'

    if not libvirt_version.version_compare(1, 1, 1):
        if params.get('setup_libvirt_polkit') == 'yes':
            test.cancel("API acl test not supported in current"
                        " libvirt version.")
    try:
        if "--xml" in extra_param:
            vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name,
                                                  options="--migratable")
            backup_xml = vmxml.copy()
            # Grant more priveledge on the file in order for un-priveledge user
            # to access.
            os.chmod(vmxml.xml, stat.S_IRWXU | stat.S_IRGRP | stat.S_IROTH)
            if not setup_nfs:
                extra_param = "--xml %s" % vmxml.xml
                dict_os_attrs = {}
                if "hd" in vmxml.os.boots:
                    dict_os_attrs.update({"boots": ["cdrom"]})
                    vmxml.set_os_attrs(**dict_os_attrs)
                else:
                    test.cancel("Please add 'hd' in boots for --xml testing")
                logging.info("vmxml os is %s after update" %
                             vmxml.os.xmltreefile)
            else:
                params["mnt_path_name"] = params.get("nfs_mount_dir")
                vm_ref_uid = params["change_file_uid"] = pwd.getpwnam(
                    "qemu").pw_uid
                vm_ref_gid = params["change_file_gid"] = grp.getgrnam(
                    "qemu").gr_gid
                libvirt.set_vm_disk(vm, params)

        session = vm.wait_for_login()
        # Clear log file
        if check_log:
            cmd = "> %s" % check_log
            process.run(cmd, shell=True, verbose=True)
        if qemu_conf_dict:
            logging.debug("Update qemu configuration file.")
            qemu_conf = libvirt.customize_libvirt_config(
                qemu_conf_dict, "qemu")
            process.run("cat /etc/libvirt/qemu.conf", shell=True, verbose=True)

        # run test
        if vm_ref == "" or vm_ref == "xyz":
            status = virsh.restore(vm_ref,
                                   extra_param,
                                   debug=True,
                                   unprivileged_user=unprivileged_user,
                                   uri=uri,
                                   ignore_status=True).exit_status
        else:
            if os_type == "linux":
                cmd = "cat /proc/cpuinfo"
                try:
                    status, output = session.cmd_status_output(cmd, timeout=10)
                finally:
                    session.close()
                if not re.search("processor", output):
                    test.fail("Unable to read /proc/cpuinfo")
            tmp_file = os.path.join(data_dir.get_tmp_dir(), "save.file")
            if setup_iscsi:
                tmp_file = libvirt.setup_or_cleanup_iscsi(is_setup=True,
                                                          is_login=True,
                                                          image_size='1G')
            time.sleep(time_before_save)
            ret = virsh.save(vm_name, tmp_file, debug=True)
            libvirt.check_exit_status(ret)
            if vm_ref == "saved_file" or setup_iscsi:
                vm_ref = tmp_file
            elif vm_ref == "empty_new_file":
                tmp_file = os.path.join(data_dir.get_tmp_dir(), "new.file")
                with open(tmp_file, 'w') as tmp:
                    pass
                vm_ref = tmp_file

            # Change the ownership of the saved file
            if vm_ref_uid and vm_ref_gid:
                os.chown(vm_ref, vm_ref_uid, vm_ref_gid)
                tmpdir = data_dir.get_tmp_dir()
                dump_xml = os.path.join(tmpdir, "test.xml")
                virsh.save_image_dumpxml(vm_ref, "> %s" % dump_xml)
                extra_param = "--xml %s" % dump_xml
                check_file_own(vm_ref, vm_ref_uid, vm_ref_gid)

            if vm.is_alive():
                vm.destroy()
            if pre_status == "start":
                virsh.start(vm_name)
            if libvirtd == "off":
                utils_libvirtd.libvirtd_stop()
            status = virsh.restore(vm_ref,
                                   extra_param,
                                   debug=True,
                                   unprivileged_user=unprivileged_user,
                                   uri=uri,
                                   ignore_status=True).exit_status
        if not status_error:
            list_output = virsh.dom_list().stdout.strip()

        session.close()

        # recover libvirtd service start
        if libvirtd == "off":
            utils_libvirtd.libvirtd_start()

        if status_error:
            if not status:
                if libvirtd == "off" and libvirt_version.version_compare(
                        5, 6, 0):
                    logging.info(
                        "From libvirt version 5.6.0 libvirtd is restarted "
                        "and command should succeed")
                else:
                    test.fail("Run successfully with wrong command!")
        else:
            if status:
                test.fail("Run failed with right command")
            if not re.search(vm_name, list_output):
                test.fail("Run failed with right command")
            if extra_param.count("paused"):
                if not vm.is_paused():
                    test.fail("Guest state should be"
                              " paused after restore"
                              " due to the option --paused")
            if (extra_param.count("running") or extra_param.count("xml")
                    or not extra_param):
                if vm.is_dead() or vm.is_paused():
                    test.fail("Guest state should be" " running after restore")
            if extra_param.count("xml"):
                if not setup_nfs:
                    aft_vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
                    boots_list = aft_vmxml.os.boots
                    if "hd" in boots_list or "cdrom" not in boots_list:
                        test.fail("Update xml with restore failed")
                else:
                    if vm_ref_uid and vm_ref_gid:
                        check_file_own(vm_ref, vm_ref_uid, vm_ref_gid)
                        vm.destroy()
                        check_file_own(vm_ref, vm_ref_uid, vm_ref_gid)
            if check_str_not_in_log and check_log:
                libvirt.check_logfile(check_str_not_in_log, check_log, False)
    finally:
        if vm.is_paused():
            virsh.resume(vm_name)
        if "--xml" in extra_param:
            backup_xml.sync()
        if setup_nfs:
            libvirt.setup_or_cleanup_nfs(is_setup=False,
                                         mount_dir=params.get("mnt_path_name"),
                                         export_dir=params.get("export_dir"),
                                         rm_export_dir=False)
        if setup_iscsi:
            libvirt.setup_or_cleanup_iscsi(False)
コード例 #54
0
def run(test, params, env):
    """
    Test command: virsh blockcopy.

    This command can copy a disk backing image chain to dest.
    1. Positive testing
        1.1 Copy a disk to a new image file.
        1.2 Reuse existing destination copy.
        1.3 Valid blockcopy timeout and bandwidth test.
    2. Negative testing
        2.1 Copy a disk to a non-exist directory.
        2.2 Copy a disk with invalid options.
        2.3 Do block copy for a persistent domain.
    """

    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    target = params.get("target_disk", "")
    replace_vm_disk = "yes" == params.get("replace_vm_disk", "no")
    disk_source_protocol = params.get("disk_source_protocol")
    disk_type = params.get("disk_type")
    pool_name = params.get("pool_name")
    image_size = params.get("image_size")
    emu_image = params.get("emulated_image")
    copy_to_nfs = "yes" == params.get("copy_to_nfs", "no")
    mnt_path_name = params.get("mnt_path_name")
    options = params.get("blockcopy_options", "")
    bandwidth = params.get("blockcopy_bandwidth", "")
    bandwidth_byte = "yes" == params.get("bandwidth_byte", "no")
    reuse_external = "yes" == params.get("reuse_external", "no")
    persistent_vm = params.get("persistent_vm", "no")
    status_error = "yes" == params.get("status_error", "no")
    active_error = "yes" == params.get("active_error", "no")
    active_snap = "yes" == params.get("active_snap", "no")
    active_save = "yes" == params.get("active_save", "no")
    check_state_lock = "yes" == params.get("check_state_lock", "no")
    check_finish_job = "yes" == params.get("check_finish_job", "yes")
    with_shallow = "yes" == params.get("with_shallow", "no")
    with_blockdev = "yes" == params.get("with_blockdev", "no")
    setup_libvirt_polkit = "yes" == params.get('setup_libvirt_polkit')
    bug_url = params.get("bug_url", "")
    timeout = int(params.get("timeout", 1200))
    relative_path = params.get("relative_path")
    rerun_flag = 0
    blkdev_n = None
    back_n = 'blockdev-backing-iscsi'
    snapshot_external_disks = []
    snapshots_take = int(params.get("snapshots_take", '0'))
    external_disk_only_snapshot = "yes" == params.get(
        "external_disk_only_snapshot", "no")
    enable_iscsi_auth = "yes" == params.get("enable_iscsi_auth", "no")

    # Skip/Fail early
    if with_blockdev and not libvirt_version.version_compare(1, 2, 13):
        raise exceptions.TestSkipError("--blockdev option not supported in "
                                       "current version")
    if not target:
        raise exceptions.TestSkipError("Require target disk to copy")
    if setup_libvirt_polkit and not libvirt_version.version_compare(1, 1, 1):
        raise exceptions.TestSkipError("API acl test not supported in current"
                                       " libvirt version")
    if copy_to_nfs and not libvirt_version.version_compare(1, 1, 1):
        raise exceptions.TestSkipError("Bug will not fix: %s" % bug_url)
    if bandwidth_byte and not libvirt_version.version_compare(1, 3, 3):
        raise exceptions.TestSkipError("--bytes option not supported in "
                                       "current version")
    if relative_path == "yes" and not libvirt_version.version_compare(3, 0, 0):
        test.cancel(
            "Forbid using relative path or file name only is added since libvirt-3.0.0"
        )

    if "--transient-job" in options and not libvirt_version.version_compare(
            4, 5, 0):
        test.cancel(
            "--transient-job option is supported until libvirt 4.5.0 version")

    # Check the source disk
    if vm_xml.VMXML.check_disk_exist(vm_name, target):
        logging.debug("Find %s in domain %s", target, vm_name)
    else:
        raise exceptions.TestFail("Can't find %s in domain %s" %
                                  (target, vm_name))

    original_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    tmp_dir = data_dir.get_tmp_dir()

    # Prepare dest path params
    dest_path = params.get("dest_path", "")
    dest_format = params.get("dest_format", "")
    # Ugh... this piece of chicanery brought to you by the QemuImg which
    # will "add" the 'dest_format' extension during the check_format code.
    # So if we create the file with the extension and then remove it when
    # doing the check_format later, then we avoid erroneous failures.
    dest_extension = ""
    if dest_format != "":
        dest_extension = ".%s" % dest_format

    # Prepare for --reuse-external option
    if reuse_external:
        options += "--reuse-external --wait"
        # Set rerun_flag=1 to do blockcopy twice, and the first time created
        # file can be reused in the second time if no dest_path given
        # This will make sure the image size equal to original disk size
        if dest_path == "/path/non-exist":
            if os.path.exists(dest_path) and not os.path.isdir(dest_path):
                os.remove(dest_path)
        else:
            rerun_flag = 1

    # Prepare other options
    if dest_format == "raw":
        options += " --raw"
    if with_blockdev:
        options += " --blockdev"
    if len(bandwidth):
        options += " --bandwidth %s" % bandwidth
    if bandwidth_byte:
        options += " --bytes"
    if with_shallow:
        options += " --shallow"

    # Prepare acl options
    uri = params.get("virsh_uri")
    unprivileged_user = params.get('unprivileged_user')
    if unprivileged_user:
        if unprivileged_user.count('EXAMPLE'):
            unprivileged_user = '******'

    extra_dict = {
        'uri': uri,
        'unprivileged_user': unprivileged_user,
        'debug': True,
        'ignore_status': True,
        'timeout': timeout
    }

    libvirtd_utl = utils_libvirtd.Libvirtd()
    libvirtd_log_path = os.path.join(data_dir.get_tmp_dir(),
                                     "libvirt_daemons.log")
    libvirtd_conf_dict = {
        "log_filter": '"3:json 1:libvirt 1:qemu"',
        "log_outputs": '"1:file:%s"' % libvirtd_log_path
    }
    logging.debug("the libvirtd conf file content is :\n %s" %
                  libvirtd_conf_dict)
    libvirtd_conf = utl.customize_libvirt_config(libvirtd_conf_dict)

    def check_format(dest_path, dest_extension, expect):
        """
        Check the image format

        :param dest_path: Path of the copy to create
        :param expect: Expect image format
        """
        # And now because the QemuImg will add the extension for us
        # we have to remove it here.
        path_noext = dest_path.strip(dest_extension)
        params['image_name'] = path_noext
        params['image_format'] = expect
        image = qemu_storage.QemuImg(params, "/", path_noext)
        if image.get_format() == expect:
            logging.debug("%s format is %s", dest_path, expect)
        else:
            raise exceptions.TestFail("%s format is not %s" %
                                      (dest_path, expect))

    def _blockjob_and_libvirtd_chk(cmd_result):
        """
        Raise TestFail when blockcopy fail with block-job-complete error or
        blockcopy hang with state change lock.
        This is a specific bug verify, so ignore status_error here.
        """
        failure_msg = ""
        err_msg = "internal error: unable to execute QEMU command"
        err_msg += " 'block-job-complete'"
        if err_msg in cmd_result.stderr:
            failure_msg += "Virsh cmd error happened: %s\n" % err_msg
        err_pattern = "Timed out during operation: cannot acquire"
        err_pattern += " state change lock"
        ret = chk_libvirtd_log(libvirtd_log_path, err_pattern, "error")
        if ret:
            failure_msg += "Libvirtd log error happened: %s\n" % err_pattern
        if failure_msg:
            if not libvirt_version.version_compare(1, 3, 2):
                bug_url_ = "https://bugzilla.redhat.com/show_bug.cgi?id=1197592"
                failure_msg += "Hit on bug: %s " % bug_url_
            test.fail(failure_msg)

    def _make_snapshot(snapshot_numbers_take):
        """
        Make external disk snapshot

        :param snapshot_numbers_take: snapshot numbers.
        """
        for count in range(0, snapshot_numbers_take):
            snap_xml = snapshot_xml.SnapshotXML()
            snapshot_name = "blockcopy_snap"
            snap_xml.snap_name = snapshot_name + "_%s" % count
            snap_xml.description = "blockcopy snapshot"

            # Add all disks into xml file.
            vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
            disks = vmxml.devices.by_device_tag('disk')
            # Remove non-storage disk such as 'cdrom'
            for disk in disks:
                if disk.device != 'disk':
                    disks.remove(disk)
            new_disks = []
            src_disk_xml = disks[0]
            disk_xml = snap_xml.SnapDiskXML()
            disk_xml.xmltreefile = src_disk_xml.xmltreefile
            del disk_xml.device
            del disk_xml.address
            disk_xml.snapshot = "external"
            disk_xml.disk_name = disk_xml.target['dev']

            # Only qcow2 works as external snapshot file format, update it
            # here
            driver_attr = disk_xml.driver
            driver_attr.update({'type': 'qcow2'})
            disk_xml.driver = driver_attr

            new_attrs = disk_xml.source.attrs
            if 'file' in disk_xml.source.attrs:
                new_file = os.path.join(tmp_dir,
                                        "blockcopy_shallow_%s.snap" % count)
                snapshot_external_disks.append(new_file)
                new_attrs.update({'file': new_file})
                hosts = None
            elif ('dev' in disk_xml.source.attrs
                  or 'name' in disk_xml.source.attrs
                  or 'pool' in disk_xml.source.attrs):
                if (disk_xml.type_name == 'block'
                        or disk_source_protocol == 'iscsi'):
                    disk_xml.type_name = 'block'
                    if 'name' in new_attrs:
                        del new_attrs['name']
                        del new_attrs['protocol']
                    elif 'pool' in new_attrs:
                        del new_attrs['pool']
                        del new_attrs['volume']
                        del new_attrs['mode']
                    back_path = utl.setup_or_cleanup_iscsi(
                        is_setup=True,
                        is_login=True,
                        image_size="1G",
                        emulated_image=back_n)
                    emulated_iscsi.append(back_n)
                    cmd = "qemu-img create -f qcow2 %s 1G" % back_path
                    process.run(cmd, shell=True)
                    new_attrs.update({'dev': back_path})
                    hosts = None

            new_src_dict = {"attrs": new_attrs}
            if hosts:
                new_src_dict.update({"hosts": hosts})
            disk_xml.source = disk_xml.new_disk_source(**new_src_dict)

            new_disks.append(disk_xml)

            snap_xml.set_disks(new_disks)
            snapshot_xml_path = snap_xml.xml
            logging.debug("The snapshot xml is: %s" % snap_xml.xmltreefile)

            options = "--disk-only --xmlfile %s " % snapshot_xml_path

            snapshot_result = virsh.snapshot_create(vm_name,
                                                    options,
                                                    debug=True)

            if snapshot_result.exit_status != 0:
                raise exceptions.TestFail(snapshot_result.stderr)

    snap_path = ''
    save_path = ''
    emulated_iscsi = []
    nfs_cleanup = False
    try:
        # Prepare dest_path
        tmp_file = time.strftime("%Y-%m-%d-%H.%M.%S.img")
        tmp_file += dest_extension
        if not dest_path:
            if enable_iscsi_auth:
                utils_secret.clean_up_secrets()
                setup_auth_enabled_iscsi_disk(vm, params)
                dest_path = os.path.join(tmp_dir, tmp_file)
            elif with_blockdev:
                blkdev_n = 'blockdev-iscsi'
                dest_path = utl.setup_or_cleanup_iscsi(is_setup=True,
                                                       is_login=True,
                                                       image_size=image_size,
                                                       emulated_image=blkdev_n)
                emulated_iscsi.append(blkdev_n)
                # Make sure the new disk show up
                utils_misc.wait_for(lambda: os.path.exists(dest_path), 5)
            else:
                if copy_to_nfs:
                    tmp_dir = "%s/%s" % (tmp_dir, mnt_path_name)
                dest_path = os.path.join(tmp_dir, tmp_file)

        # Domain disk replacement with desire type
        if replace_vm_disk:
            # Calling 'set_vm_disk' is bad idea as it left lots of cleanup jobs
            # after test, such as pool, volume, nfs, iscsi and so on
            # TODO: remove this function in the future
            if disk_source_protocol == 'iscsi':
                emulated_iscsi.append(emu_image)
            if disk_source_protocol == 'netfs':
                nfs_cleanup = True
            utl.set_vm_disk(vm, params, tmp_dir, test)
            new_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)

        if with_shallow or external_disk_only_snapshot or enable_iscsi_auth:
            _make_snapshot(snapshots_take)

        # Prepare transient/persistent vm
        if persistent_vm == "no" and vm.is_persistent():
            vm.undefine()
        elif persistent_vm == "yes" and not vm.is_persistent():
            new_xml.define()

        # Run blockcopy command to create destination file
        if rerun_flag == 1:
            options1 = "--wait %s --finish --verbose" % dest_format
            if with_blockdev:
                options1 += " --blockdev"
            if with_shallow:
                options1 += " --shallow"
            cmd_result = virsh.blockcopy(vm_name, target, dest_path, options1,
                                         **extra_dict)
            status = cmd_result.exit_status
            if status != 0:
                raise exceptions.TestFail("Run blockcopy command fail: %s" %
                                          cmd_result.stdout.strip() +
                                          cmd_result.stderr)
            elif not os.path.exists(dest_path):
                raise exceptions.TestFail("Cannot find the created copy")

        if "--transient-job" in options:
            pool = ThreadPool(processes=1)
            async_result = pool.apply_async(
                blockcopy_thread, (vm_name, target, dest_path, options))
            kill_blockcopy_process()
            utl.check_blockjob(vm_name, target)
            return

        # Run the real testing command
        cmd_result = virsh.blockcopy(vm_name, target, dest_path, options,
                                     **extra_dict)

        # check BZ#1197592
        _blockjob_and_libvirtd_chk(cmd_result)
        status = cmd_result.exit_status

        if not libvirtd_utl.is_running():
            raise exceptions.TestFail("Libvirtd service is dead")

        if not status_error:
            if status == 0:
                ret = utils_misc.wait_for(
                    lambda: check_xml(vm_name, target, dest_path, options), 5)
                if not ret:
                    raise exceptions.TestFail("Domain xml not expected after"
                                              " blockcopy")
                if options.count("--bandwidth"):
                    if options.count('--bytes'):
                        bandwidth += 'B'
                    else:
                        bandwidth += 'M'
                    if not (bandwidth
                            in ['0B', '0M']) and not utl.check_blockjob(
                                vm_name, target, "bandwidth", bandwidth):
                        raise exceptions.TestFail("Check bandwidth failed")
                val = options.count("--pivot") + options.count("--finish")
                # Don't wait for job finish when using --byte option
                val += options.count('--bytes')
                if val == 0 and check_finish_job:
                    try:
                        finish_job(vm_name, target, timeout)
                    except JobTimeout as excpt:
                        raise exceptions.TestFail("Run command failed: %s" %
                                                  excpt)
                if options.count("--raw") and not with_blockdev:
                    check_format(dest_path, dest_extension, dest_format)
                if active_snap:
                    snap_path = "%s/%s.snap" % (tmp_dir, vm_name)
                    snap_opt = "--disk-only --atomic --no-metadata "
                    snap_opt += "vda,snapshot=external,file=%s" % snap_path
                    ret = virsh.snapshot_create_as(vm_name,
                                                   snap_opt,
                                                   ignore_status=True,
                                                   debug=True)
                    utl.check_exit_status(ret, active_error)
                if active_save:
                    save_path = "%s/%s.save" % (tmp_dir, vm_name)
                    ret = virsh.save(vm_name,
                                     save_path,
                                     ignore_status=True,
                                     debug=True)
                    utl.check_exit_status(ret, active_error)
                if check_state_lock:
                    # Run blockjob pivot in subprocess as it will hang
                    # for a while, run blockjob info again to check
                    # job state
                    command = "virsh blockjob %s %s --pivot" % (vm_name,
                                                                target)
                    session = aexpect.ShellSession(command)
                    ret = virsh.blockjob(vm_name, target, "--info")
                    err_info = "cannot acquire state change lock"
                    if err_info in ret.stderr:
                        raise exceptions.TestFail("Hit on bug: %s" % bug_url)
                    utl.check_exit_status(ret, status_error)
                    session.close()
            else:
                raise exceptions.TestFail(cmd_result.stdout.strip() +
                                          cmd_result.stderr)
        else:
            if status:
                logging.debug("Expect error: %s", cmd_result.stderr)
            else:
                # Commit id '4c297728' changed how virsh exits when
                # unexpectedly failing due to timeout from a fail (1)
                # to a success(0), so we need to look for a different
                # marker to indicate the copy aborted. As "stdout: Now
                # in mirroring phase" could be in stdout which fail the
                # check, so also do check in libvirtd log to confirm.
                if options.count("--timeout") and options.count("--wait"):
                    log_pattern = "Copy aborted"
                    if (re.search(log_pattern, cmd_result.stdout.strip())
                            or chk_libvirtd_log(libvirtd_log_path, log_pattern,
                                                "debug")):
                        logging.debug("Found success a timed out block copy")
                else:
                    raise exceptions.TestFail("Expect fail, but run "
                                              "successfully: %s" % bug_url)
    finally:
        # Recover VM may fail unexpectedly, we need using try/except to
        # proceed the following cleanup steps
        try:
            # Abort exist blockjob to avoid any possible lock error
            virsh.blockjob(vm_name, target, '--abort', ignore_status=True)
            vm.destroy(gracefully=False)
            # It may take a long time to shutdown the VM which has
            # blockjob running
            utils_misc.wait_for(
                lambda: virsh.domstate(vm_name, ignore_status=True).
                exit_status, 180)
            if virsh.domain_exists(vm_name):
                if active_snap or with_shallow:
                    option = "--snapshots-metadata"
                else:
                    option = None
                original_xml.sync(option)
            else:
                original_xml.define()
        except Exception as e:
            logging.error(e)
        for disk in snapshot_external_disks:
            if os.path.exists(disk):
                os.remove(disk)
        # Clean up libvirt pool, which may be created by 'set_vm_disk'
        if disk_type == 'volume':
            virsh.pool_destroy(pool_name, ignore_status=True, debug=True)
        # Restore libvirtd conf and restart libvirtd
        libvirtd_conf.restore()
        libvirtd_utl.restart()
        if libvirtd_log_path and os.path.exists(libvirtd_log_path):
            os.unlink(libvirtd_log_path)
        # Clean up NFS
        try:
            if nfs_cleanup:
                utl.setup_or_cleanup_nfs(is_setup=False)
        except Exception as e:
            logging.error(e)
        # Clean up iSCSI
        try:
            for iscsi_n in list(set(emulated_iscsi)):
                utl.setup_or_cleanup_iscsi(is_setup=False,
                                           emulated_image=iscsi_n)
                # iscsid will be restarted, so give it a break before next loop
                time.sleep(5)
        except Exception as e:
            logging.error(e)
        if os.path.exists(dest_path):
            os.remove(dest_path)
        if os.path.exists(snap_path):
            os.remove(snap_path)
        if os.path.exists(save_path):
            os.remove(save_path)
        # Restart virtlogd service to release VM log file lock
        try:
            path.find_command('virtlogd')
            process.run('systemctl reset-failed virtlogd')
            process.run('systemctl restart virtlogd ')
        except path.CmdNotFoundError:
            pass
コード例 #55
0
def run(test, params, env):
    """
    Test command: virsh dompmsuspend <domain> <target>
    The command suspends a running domain using guest OS's power management.
    """

    # MAIN TEST CODE ###
    # Process cartesian parameters
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)

    vm_state = params.get("vm_state", "running")
    suspend_target = params.get("pm_suspend_target", "mem")
    pm_enabled = params.get("pm_enabled", "not_set")
    test_managedsave = "yes" == params.get("test_managedsave", "no")
    test_save_restore = "yes" == params.get("test_save_restore", "no")
    test_suspend_resume = "yes" == params.get("test_suspend_resume", "no")
    pmsuspend_error = 'yes' == params.get("pmsuspend_error", 'no')

    # Libvirt acl test related params
    uri = params.get("virsh_uri")
    unprivileged_user = params.get('unprivileged_user')
    if unprivileged_user:
        if unprivileged_user.count('EXAMPLE'):
            unprivileged_user = '******'

    if not libvirt_version.version_compare(1, 1, 1):
        if params.get('setup_libvirt_polkit') == 'yes':
            raise error.TestNAError("API acl test not supported in current"
                                    " libvirt version.")

    # A backup of original vm
    vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    vmxml_backup = vmxml.copy()

    # Expected possible fail patterns.
    # Error output should match one of these patterns.
    # An empty list mean test should succeed.
    fail_pat = []
    virsh_dargs = {'debug': True, 'ignore_status': True}
    if params.get('setup_libvirt_polkit') == 'yes':
        virsh_dargs_copy = virsh_dargs.copy()
        virsh_dargs_copy['uri'] = uri
        virsh_dargs_copy['unprivileged_user'] = unprivileged_user
        if pmsuspend_error:
            fail_pat.append('access denied')

    # Setup possible failure patterns
    if pm_enabled == 'not_set':
        fail_pat.append('not supported')
    if pm_enabled == 'no':
        fail_pat.append('disabled')

    if vm_state == 'paused':
        fail_pat.append('not responding')
    elif vm_state == 'shutoff':
        fail_pat.append('not running')

    try:
        if vm.is_alive():
            vm.destroy()

        # Set pm tag in domain's XML if needed.
        if pm_enabled == 'not_set':
            try:
                if vmxml.pm:
                    del vmxml.pm
            except xcepts.LibvirtXMLNotFoundError:
                pass
        else:
            pm_xml = vm_xml.VMPMXML()
            if suspend_target == 'mem':
                pm_xml.mem_enabled = pm_enabled
            elif suspend_target == 'disk':
                pm_xml.disk_enabled = pm_enabled
            elif suspend_target == 'hybrid':
                pm_xml.mem_enabled = pm_enabled
                pm_xml.disk_enabled = pm_enabled
            vmxml.pm = pm_xml
        vmxml.sync()

        vm.prepare_guest_agent()

        # Create swap partition/file if nessesary.
        need_mkswap = False
        if suspend_target in ['disk', 'hybrid']:
            need_mkswap = not vm.has_swap()
        if need_mkswap:
            logging.debug("Creating swap partition.")
            vm.create_swap_partition()

        try:
            libvirtd = utils_libvirtd.Libvirtd()
            savefile = os.path.join(test.tmpdir, "%s.save" % vm_name)
            session = vm.wait_for_login()
            # Touch a file on guest to test managed save command.
            if test_managedsave:
                session.cmd_status("touch pmtest")

            # Set vm state
            if vm_state == "paused":
                vm.pause()
            elif vm_state == "shutoff":
                vm.destroy()

            # Run test case
            result = virsh.dompmsuspend(vm_name, suspend_target, debug=True,
                                        uri=uri,
                                        unprivileged_user=unprivileged_user)
            if result.exit_status == 0:
                if fail_pat:
                    raise error.TestFail("Expected failed with %s, but run succeed"
                                         ":\n%s" % (fail_pat, result))
            else:
                if not fail_pat:
                    raise error.TestFail("Expected success, but run failed:\n%s"
                                         % result)
                #if not any_pattern_match(fail_pat, result.stderr):
                if not any(p in result.stderr for p in fail_pat):
                    raise error.TestFail("Expected failed with one of %s, but "
                                         "failed with:\n%s" % (fail_pat, result))
            if test_managedsave:
                ret = virsh.managedsave(vm_name, **virsh_dargs)
                libvirt.check_exit_status(ret)
                # Dompmwakeup should return false here
                ret = virsh.dompmwakeup(vm_name, **virsh_dargs)
                libvirt.check_exit_status(ret, True)
                ret = virsh.start(vm_name)
                libvirt.check_exit_status(ret)
                if not vm.is_paused():
                    raise error.TestFail("Vm status is not paused before pm wakeup")
                if params.get('setup_libvirt_polkit') == 'yes':
                    ret = virsh.dompmwakeup(vm_name, **virsh_dargs_copy)
                else:
                    ret = virsh.dompmwakeup(vm_name, **virsh_dargs)
                libvirt.check_exit_status(ret)
                if not vm.is_paused():
                    raise error.TestFail("Vm status is not paused after pm wakeup")
                ret = virsh.resume(vm_name, **virsh_dargs)
                libvirt.check_exit_status(ret)
                sess = vm.wait_for_login()
                if sess.cmd_status("ls pmtest && rm -f pmtest"):
                    raise error.TestFail("Check managed save failed on guest")
                sess.close()
            if test_save_restore:
                # Run a series of operations to check libvirtd status.
                ret = virsh.dompmwakeup(vm_name, **virsh_dargs)
                libvirt.check_exit_status(ret)
                # Wait for vm is started
                vm.wait_for_login()
                ret = virsh.save(vm_name, savefile, **virsh_dargs)
                libvirt.check_exit_status(ret)
                ret = virsh.restore(savefile, **virsh_dargs)
                libvirt.check_exit_status(ret)
                # Wait for vm is started
                vm.wait_for_login()
                # run pmsuspend again
                ret = virsh.dompmsuspend(vm_name, suspend_target, **virsh_dargs)
                libvirt.check_exit_status(ret)
                # save and restore the guest again.
                ret = virsh.save(vm_name, savefile, **virsh_dargs)
                libvirt.check_exit_status(ret)
                ret = virsh.restore(savefile, **virsh_dargs)
                libvirt.check_exit_status(ret)
                ret = virsh.destroy(vm_name, **virsh_dargs)
                libvirt.check_exit_status(ret)
                if not libvirtd.is_running():
                    raise error.TestFail("libvirtd crashed")
            if test_suspend_resume:
                ret = virsh.suspend(vm_name)
                libvirt.check_exit_status(ret, expect_error=True)
                if vm.state() != 'pmsuspended':
                    raise error.TestFail("VM state should be pmsuspended")
                ret = virsh.resume(vm_name)
                libvirt.check_exit_status(ret, expect_error=True)
                if vm.state() != 'pmsuspended':
                    raise error.TestFail("VM state should be pmsuspended")
        finally:
            libvirtd.restart()
            # Remove the tmp file
            if os.path.exists(savefile):
                os.remove(savefile)
            # Restore VM state
            if vm_state == "paused":
                vm.resume()

            if suspend_target in ['mem', 'hybrid']:
                if vm.state() == "pmsuspended":
                    virsh.dompmwakeup(vm_name)
            else:
                if vm.state() == "in shutdown":
                    vm.wait_for_shutdown()
                if vm.is_dead():
                    vm.start()

            # Cleanup
            session.close()

            if need_mkswap:
                vm.cleanup_swap()

    finally:
        # Destroy the vm.
        if vm.is_alive():
            vm.destroy()
        # Recover xml of vm.
        vmxml_backup.sync()
コード例 #56
0
ファイル: virsh_event.py プロジェクト: lento-sun/tp-libvirt
    def trigger_events(dom, events_list=[]):
        """
        Trigger various events in events_list

        :param dom: the vm objects corresponding to the domain
        :return: the expected output that virsh event command prints out
        """
        expected_events_list = []
        tmpdir = data_dir.get_tmp_dir()
        save_path = os.path.join(tmpdir, "%s_event.save" % dom.name)
        new_disk = os.path.join(tmpdir, "%s_new_disk.img" % dom.name)
        print dom.name
        try:
            for event in events_list:
                if event in ['start', 'restore']:
                    if dom.is_alive():
                        dom.destroy()
                else:
                    if not dom.is_alive():
                        dom.start()
                        dom.wait_for_login().close()
                if event == "start":
                    virsh.start(dom.name, **virsh_dargs)
                    expected_events_list.append("'lifecycle' for %s:"
                                                " Started Booted")
                    dom.wait_for_login().close()
                elif event == "save":
                    virsh.save(dom.name, save_path, **virsh_dargs)
                    expected_events_list.append("'lifecycle' for %s:"
                                                " Stopped Saved")
                elif event == "restore":
                    if not os.path.exists(save_path):
                        logging.error("%s not exist", save_path)
                    else:
                        virsh.restore(save_path, **virsh_dargs)
                        expected_events_list.append("'lifecycle' for %s:"
                                                    " Started Restored")
                elif event == "destroy":
                    virsh.destroy(dom.name, **virsh_dargs)
                    expected_events_list.append("'lifecycle' for %s:"
                                                " Stopped Destroyed")
                elif event == "reset":
                    virsh.reset(dom.name, **virsh_dargs)
                    expected_events_list.append("'reboot' for %s")
                elif event == "vcpupin":
                    virsh.vcpupin(dom.name, '0', '0', **virsh_dargs)
                    expected_events_list.append("'tunable' for %s:"
                                                "\n\tcputune.vcpupin0: 0")
                elif event == "emulatorpin":
                    virsh.emulatorpin(dom.name, '0', **virsh_dargs)
                    expected_events_list.append("'tunable' for %s:"
                                                "\n\tcputune.emulatorpin: 0")
                elif event == "setmem":
                    mem_size = int(params.get("mem_size", 512000))
                    virsh.setmem(dom.name, mem_size, **virsh_dargs)
                    expected_events_list.append("'balloon-change' for %s:")
                elif event == "detach-disk":
                    if not os.path.exists(new_disk):
                        open(new_disk, 'a').close()
                    # Attach disk firstly, this event will not be catched
                    virsh.attach_disk(dom.name, new_disk, 'vdb', **virsh_dargs)
                    virsh.detach_disk(dom.name, 'vdb', **virsh_dargs)
                    expected_events_list.append("'device-removed' for %s:"
                                                " virtio-disk1")
                else:
                    raise error.TestError("Unsupported event: %s" % event)
                # Event may not received immediately
                time.sleep(3)
        finally:
            if os.path.exists(save_path):
                os.unlink(save_path)
            if os.path.exists(new_disk):
                os.unlink(new_disk)
        return [(dom.name, event) for event in expected_events_list]
コード例 #57
0
def run(test, params, env):
    """
    Different cpu compat mode scenario tests

    :param test: QEMU test object
    :param params: Dictionary with the test parameters
    :param env: Dictionary with test environment.
    """

    def check_feature(vm, feature="", vcpu=0):
        """
        Checks the given feature is present
        :param vm: VM Name
        :param feature: feature to be verified
        :param vcpu: vcpu number to pin guest test
        :return: true on success, test fail on failure
        """
        session = vm.wait_for_login()
        if 'power8' in feature:
            cmd = 'lscpu|grep -i "Model name:.*power8"'
        elif 'xive' in feature:
            # remove -v once guest xive support is available
            # right now power9 guest supports only xics
            cmd = "grep -v xive /sys/firmware/devicetree/base/interrupt-*/compatible"
        elif 'xics' in feature:
            cmd = "grep -v xive /sys/firmware/devicetree/base/interrupt-*/compatible"
        elif 'power9' in feature:
            cmd = 'lscpu|grep -i "Model name:.*power9"'
        elif 'hpt' in feature:
            cmd = 'grep "MMU.*: Hash" /proc/cpuinfo'
        elif 'rpt' in feature:
            cmd = 'grep "MMU.*: Radix" /proc/cpuinfo'
        elif 'isa' in feature:
            utils_package.package_install('gcc', session)
            cmd = "echo 'int main(){asm volatile (\".long 0x7c0005e6\");"
            cmd += "return 0;}' > ~/a.c;cc ~/a.c;taskset -c %s ./a.out" % vcpu
        status, output = session.cmd_status_output(cmd)
        logging.debug(output)
        session.close()
        if feature != "isa2.7":
            if status != 0:
                test.fail("Feature: %s check failed inside "
                          "%s guest on %s host" % (feature,
                                                   guest_version,
                                                   host_version))
        else:
            if status == 0:
                test.fail("isa3.0 instruction succeeds in "
                          "%s guest on %s host" % (guest_version,
                                                   host_version))
        return True

    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    pin_vcpu = 0
    host_version = params.get("host_version")
    guest_version = params.get("guest_version")
    max_vcpu = params.get("cpucompat_vcpu_max", "")
    cur_vcpu = int(params.get("cpucompat_vcpu_cur", "1"))
    cores = int(params.get("topology_cores", '1'))
    sockets = int(params.get("topology_sockets", '1'))
    threads = int(params.get("topology_threads", '1'))
    status_error = "yes" == params.get("status_error", "no")
    condn = params.get("condn", "")
    guest_features = params.get("guest_features", "")
    if guest_features:
        guest_features = guest_features.split(',')
        if guest_version:
            guest_features.append(guest_version)
    if host_version not in cpu.get_cpu_arch():
        test.cancel("Unsupported Host cpu version")

    vmxml = libvirt_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    org_xml = vmxml.copy()
    # Destroy the vm
    vm.destroy()
    try:
        # Set cpu model
        if max_vcpu:
            pin_vcpu = int(max_vcpu) - 1
            libvirt_xml.VMXML.set_vm_vcpus(vm_name, int(max_vcpu), cur_vcpu,
                                           sockets=sockets, cores=cores,
                                           threads=threads, add_topology=True)
        libvirt_xml.VMXML.set_cpu_mode(vm_name, model=guest_version)
        logging.debug(virsh.dumpxml(vm_name))
        try:
            vm.start()
        except virt_vm.VMStartError as detail:
            if not status_error:
                test.fail("%s" % detail)
            else:
                pass
        if max_vcpu:
            virsh.setvcpus(vm_name, int(max_vcpu), "--live",
                           ignore_status=False, debug=True)
            if not utils_misc.check_if_vm_vcpu_match(int(max_vcpu), vm):
                test.fail("Vcpu hotplug failed")
        if not status_error:
            for feature in guest_features:
                check_feature(vm, feature, vcpu=pin_vcpu)
        if condn == "filetrans":
            utils_test.run_file_transfer(test, params, env)
        elif condn == "stress":
            bt = utils_test.run_avocado_bg(vm, params, test)
            if not bt:
                test.cancel("guest stress failed to start")
        elif condn == "save":
            save_file = os.path.join(data_dir.get_tmp_dir(), vm_name + ".save")
            result = virsh.save(vm_name, save_file, ignore_status=True,
                                debug=True)
            utils_test.libvirt.check_exit_status(result)
            # Just sleep few secs before guest recovery
            time.sleep(2)
            if os.path.exists(save_file):
                result = virsh.restore(save_file, ignore_status=True,
                                       debug=True)
                utils_test.libvirt.check_exit_status(result)
                os.remove(save_file)
        elif condn == "suspend":
            result = virsh.suspend(vm_name, ignore_status=True, debug=True)
            utils_test.libvirt.check_exit_status(result)
            # Just sleep few secs before guest recovery
            time.sleep(2)
            result = virsh.resume(vm_name, ignore_status=True, debug=True)
            utils_test.libvirt.check_exit_status(result)
        else:
            pass
    finally:
        org_xml.sync()
コード例 #58
0
def run(test, params, env):
    """
    Test command: virsh blockcopy.

    This command can copy a disk backing image chain to dest.
    1. Positive testing
        1.1 Copy a disk to a new image file.
        1.2 Reuse existing destination copy.
        1.3 Valid blockcopy timeout and bandwidth test.
    2. Negative testing
        2.1 Copy a disk to a non-exist directory.
        2.2 Copy a disk with invalid options.
        2.3 Do block copy for a persistent domain.
    """

    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    target = params.get("target_disk", "")
    replace_vm_disk = "yes" == params.get("replace_vm_disk", "no")
    disk_source_protocol = params.get("disk_source_protocol")
    disk_type = params.get("disk_type")
    pool_name = params.get("pool_name")
    image_size = params.get("image_size")
    emu_image = params.get("emulated_image")
    copy_to_nfs = "yes" == params.get("copy_to_nfs", "no")
    mnt_path_name = params.get("mnt_path_name")
    options = params.get("blockcopy_options", "")
    bandwidth = params.get("blockcopy_bandwidth", "")
    default_timeout = int(params.get("default_timeout", "300"))
    reuse_external = "yes" == params.get("reuse_external", "no")
    persistent_vm = params.get("persistent_vm", "no")
    status_error = "yes" == params.get("status_error", "no")
    active_error = "yes" == params.get("active_error", "no")
    active_snap = "yes" == params.get("active_snap", "no")
    active_save = "yes" == params.get("active_save", "no")
    check_state_lock = "yes" == params.get("check_state_lock", "no")
    with_shallow = "yes" == params.get("with_shallow", "no")
    with_blockdev = "yes" == params.get("with_blockdev", "no")
    setup_libvirt_polkit = "yes" == params.get('setup_libvirt_polkit')
    bug_url = params.get("bug_url", "")
    timeout = int(params.get("timeout", 1200))
    rerun_flag = 0
    blkdev_n = None
    back_n = 'blockdev-backing-iscsi'
    snapshot_external_disks = []
    # Skip/Fail early
    if with_blockdev and not libvirt_version.version_compare(1, 2, 13):
        raise exceptions.TestSkipError("--blockdev option not supported in "
                                       "current version")
    if not target:
        raise exceptions.TestSkipError("Require target disk to copy")
    if setup_libvirt_polkit and not libvirt_version.version_compare(1, 1, 1):
        raise exceptions.TestSkipError("API acl test not supported in current"
                                       " libvirt version")
    if copy_to_nfs and not libvirt_version.version_compare(1, 1, 1):
        raise exceptions.TestSkipError("Bug will not fix: %s" % bug_url)

    # Check the source disk
    if vm_xml.VMXML.check_disk_exist(vm_name, target):
        logging.debug("Find %s in domain %s", target, vm_name)
    else:
        raise exceptions.TestFail("Can't find %s in domain %s" % (target,
                                                                  vm_name))

    original_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    tmp_dir = data_dir.get_tmp_dir()

    # Prepare dest path params
    dest_path = params.get("dest_path", "")
    dest_format = params.get("dest_format", "")
    # Ugh... this piece of chicanery brought to you by the QemuImg which
    # will "add" the 'dest_format' extension during the check_format code.
    # So if we create the file with the extension and then remove it when
    # doing the check_format later, then we avoid erroneous failures.
    dest_extension = ""
    if dest_format != "":
        dest_extension = ".%s" % dest_format

    # Prepare for --reuse-external option
    if reuse_external:
        options += "--reuse-external --wait"
        # Set rerun_flag=1 to do blockcopy twice, and the first time created
        # file can be reused in the second time if no dest_path given
        # This will make sure the image size equal to original disk size
        if dest_path == "/path/non-exist":
            if os.path.exists(dest_path) and not os.path.isdir(dest_path):
                os.remove(dest_path)
        else:
            rerun_flag = 1

    # Prepare other options
    if dest_format == "raw":
        options += " --raw"
    if with_blockdev:
        options += " --blockdev"
    if len(bandwidth):
        options += " --bandwidth %s" % bandwidth
    if with_shallow:
        options += " --shallow"

    # Prepare acl options
    uri = params.get("virsh_uri")
    unprivileged_user = params.get('unprivileged_user')
    if unprivileged_user:
        if unprivileged_user.count('EXAMPLE'):
            unprivileged_user = '******'

    extra_dict = {'uri': uri, 'unprivileged_user': unprivileged_user,
                  'debug': True, 'ignore_status': True, 'timeout': timeout}

    libvirtd_utl = utils_libvirtd.Libvirtd()
    libvirtd_conf = utils_config.LibvirtdConfig()
    libvirtd_conf["log_filters"] = '"3:json 1:libvirt 1:qemu"'
    libvirtd_log_path = os.path.join(test.tmpdir, "libvirtd.log")
    libvirtd_conf["log_outputs"] = '"1:file:%s"' % libvirtd_log_path
    logging.debug("the libvirtd config file content is:\n %s" %
                  libvirtd_conf)
    libvirtd_utl.restart()

    def check_format(dest_path, dest_extension, expect):
        """
        Check the image format

        :param dest_path: Path of the copy to create
        :param expect: Expect image format
        """
        # And now because the QemuImg will add the extension for us
        # we have to remove it here.
        path_noext = dest_path.strip(dest_extension)
        params['image_name'] = path_noext
        params['image_format'] = expect
        image = qemu_storage.QemuImg(params, "/", path_noext)
        if image.get_format() == expect:
            logging.debug("%s format is %s", dest_path, expect)
        else:
            raise exceptions.TestFail("%s format is not %s" % (dest_path,
                                                               expect))

    def _blockjob_and_libvirtd_chk(cmd_result):
        """
        Raise TestFail when blockcopy fail with block-job-complete error or
        blockcopy hang with state change lock.
        """
        bug_url_ = "https://bugzilla.redhat.com/show_bug.cgi?id=1197592"
        err_msg = "internal error: unable to execute QEMU command"
        err_msg += " 'block-job-complete'"
        if err_msg in cmd_result.stderr:
            raise exceptions.TestFail("Hit on bug: %s" % bug_url_)

        err_pattern = "Timed out during operation: cannot acquire"
        err_pattern += " state change lock"
        ret = chk_libvirtd_log(libvirtd_log_path, err_pattern, "error")
        if ret:
            raise exceptions.TestFail("Hit on bug: %s" % bug_url_)

    def _blockcopy_cmd():
        """
        Run blockcopy command
        """
        cmd_result = virsh.blockcopy(vm_name, target, dest_path,
                                     options, **extra_dict)
        _blockjob_and_libvirtd_chk(cmd_result)
        if cmd_result.exit_status:
            return False
        elif "Copy aborted" in cmd_result.stdout:
            return False
        else:
            return cmd_result

    def _make_snapshot():
        """
        Make external disk snapshot
        """
        snap_xml = snapshot_xml.SnapshotXML()
        snapshot_name = "blockcopy_snap"
        snap_xml.snap_name = snapshot_name
        snap_xml.description = "blockcopy snapshot"

        # Add all disks into xml file.
        vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
        disks = vmxml.devices.by_device_tag('disk')
        new_disks = []
        src_disk_xml = disks[0]
        disk_xml = snap_xml.SnapDiskXML()
        disk_xml.xmltreefile = src_disk_xml.xmltreefile
        del disk_xml.device
        del disk_xml.address
        disk_xml.snapshot = "external"
        disk_xml.disk_name = disk_xml.target['dev']

        # Only qcow2 works as external snapshot file format, update it
        # here
        driver_attr = disk_xml.driver
        driver_attr.update({'type': 'qcow2'})
        disk_xml.driver = driver_attr

        new_attrs = disk_xml.source.attrs
        if disk_xml.source.attrs.has_key('file'):
            new_file = os.path.join(tmp_dir, "blockcopy_shallow.snap")
            snapshot_external_disks.append(new_file)
            new_attrs.update({'file': new_file})
            hosts = None
        elif (disk_xml.source.attrs.has_key('dev') or
              disk_xml.source.attrs.has_key('name') or
              disk_xml.source.attrs.has_key('pool')):
            if (disk_xml.type_name == 'block' or
                    disk_source_protocol == 'iscsi'):
                disk_xml.type_name = 'block'
                if new_attrs.has_key('name'):
                    del new_attrs['name']
                    del new_attrs['protocol']
                elif new_attrs.has_key('pool'):
                    del new_attrs['pool']
                    del new_attrs['volume']
                    del new_attrs['mode']
                back_path = utl.setup_or_cleanup_iscsi(is_setup=True,
                                                       is_login=True,
                                                       image_size="1G",
                                                       emulated_image=back_n)
                emulated_iscsi.append(back_n)
                cmd = "qemu-img create -f qcow2 %s 1G" % back_path
                process.run(cmd, shell=True)
                new_attrs.update({'dev': back_path})
                hosts = None

        new_src_dict = {"attrs": new_attrs}
        if hosts:
            new_src_dict.update({"hosts": hosts})
        disk_xml.source = disk_xml.new_disk_source(**new_src_dict)

        new_disks.append(disk_xml)

        snap_xml.set_disks(new_disks)
        snapshot_xml_path = snap_xml.xml
        logging.debug("The snapshot xml is: %s" % snap_xml.xmltreefile)

        options = "--disk-only --xmlfile %s " % snapshot_xml_path

        snapshot_result = virsh.snapshot_create(
            vm_name, options, debug=True)

        if snapshot_result.exit_status != 0:
            raise exceptions.TestFail(snapshot_result.stderr)

    snap_path = ''
    save_path = ''
    emulated_iscsi = []
    try:
        # Prepare dest_path
        tmp_file = time.strftime("%Y-%m-%d-%H.%M.%S.img")
        tmp_file += dest_extension
        if not dest_path:
            if with_blockdev:
                blkdev_n = 'blockdev-iscsi'
                dest_path = utl.setup_or_cleanup_iscsi(is_setup=True,
                                                       is_login=True,
                                                       image_size=image_size,
                                                       emulated_image=blkdev_n)
                emulated_iscsi.append(blkdev_n)
            else:
                if copy_to_nfs:
                    tmp_dir = "%s/%s" % (tmp_dir, mnt_path_name)
                dest_path = os.path.join(tmp_dir, tmp_file)

        # Domain disk replacement with desire type
        if replace_vm_disk:
            # Calling 'set_vm_disk' is bad idea as it left lots of cleanup jobs
            # after test, such as pool, volume, nfs, iscsi and so on
            # TODO: remove this function in the future
            utl.set_vm_disk(vm, params, tmp_dir, test)
            emulated_iscsi.append(emu_image)
            new_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)

        if with_shallow:
            _make_snapshot()

        # Prepare transient/persistent vm
        if persistent_vm == "no" and vm.is_persistent():
            vm.undefine()
        elif persistent_vm == "yes" and not vm.is_persistent():
            new_xml.define()

        # Run blockcopy command
        if rerun_flag == 1:
            options1 = "--wait %s --finish --verbose" % dest_format
            if with_blockdev:
                options1 += " --blockdev"
            if with_shallow:
                options1 += " --shallow"
            cmd_result = virsh.blockcopy(vm_name, target,
                                         dest_path, options1,
                                         **extra_dict)
            status = cmd_result.exit_status
            if status != 0:
                raise exceptions.TestFail("Run blockcopy command fail")
            elif not os.path.exists(dest_path):
                raise exceptions.TestFail("Cannot find the created copy")
            cmd_result = utils_misc.wait_for(_blockcopy_cmd, 10)
            if not cmd_result:
                raise exceptions.TestFail("Run blockcopy command fail")
            status = 0
        else:
            cmd_result = virsh.blockcopy(vm_name, target, dest_path,
                                         options, **extra_dict)
            _blockjob_and_libvirtd_chk(cmd_result)
            status = cmd_result.exit_status

        if not libvirtd_utl.is_running():
            raise exceptions.TestFail("Libvirtd service is dead")

        if not status_error:
            if status == 0:
                ret = utils_misc.wait_for(
                    lambda: check_xml(vm_name, target, dest_path, options), 5)
                if not ret:
                    raise exceptions.TestFail("Domain xml not expected after"
                                              " blockcopy")
                if options.count("--bandwidth"):
                    utl.check_blockjob(vm_name, target, "bandwidth", bandwidth)
                    if check_state_lock:
                        # Run blockjob pivot in subprocess as it will hang
                        # for a while, run blockjob info again to check
                        # job state
                        command = "virsh blockjob %s %s --pivot" % (vm_name,
                                                                    target)
                        session = aexpect.ShellSession(command)
                        ret = virsh.blockjob(vm_name, target, "--info")
                        err_info = "cannot acquire state change lock"
                        if err_info in ret.stderr:
                            raise exceptions.TestFail("Hit on bug: %s" % bug_url)
                        utl.check_exit_status(ret, status_error)
                        session.close()
                val = options.count("--pivot") + options.count("--finish")
                if val == 0:
                    try:
                        finish_job(vm_name, target, default_timeout)
                    except JobTimeout, excpt:
                        raise exceptions.TestFail("Run command failed: %s" %
                                                  excpt)
                if options.count("--raw") and not with_blockdev:
                    check_format(dest_path, dest_extension, dest_format)
                if active_snap:
                    snap_path = "%s/%s.snap" % (tmp_dir, vm_name)
                    snap_opt = "--disk-only --atomic --no-metadata "
                    snap_opt += "vda,snapshot=external,file=%s" % snap_path
                    ret = virsh.snapshot_create_as(vm_name, snap_opt,
                                                   ignore_statues=True,
                                                   debug=True)
                    utl.check_exit_status(ret, active_error)
                if active_save:
                    save_path = "%s/%s.save" % (tmp_dir, vm_name)
                    ret = virsh.save(vm_name, save_path,
                                     ignore_statues=True,
                                     debug=True)
                    utl.check_exit_status(ret, active_error)
            else:
                raise exceptions.TestFail(cmd_result.stderr)
        else:
コード例 #59
0
    def manipulate_domain(vm_name, vm_operation, recover=False):
        """
        Operate domain to given state or recover it.

        :params vm_name: Name of the VM domain
        :params vm_operation: Operation to be performed on VM domain
                              like save, managedsave, suspend
        :params recover: flag to inform whether to set or reset
                         vm_operation
        """
        save_file = os.path.join(data_dir.get_tmp_dir(), vm_name + ".save")
        if not recover:
            if vm_operation == "save":
                save_option = ""
                result = virsh.save(vm_name, save_file, save_option,
                                    ignore_status=True, debug=True)
                libvirt.check_exit_status(result)
            elif vm_operation == "managedsave":
                managedsave_option = ""
                result = virsh.managedsave(vm_name, managedsave_option,
                                           ignore_status=True, debug=True)
                libvirt.check_exit_status(result)
            elif vm_operation == "s3":
                suspend_target = "mem"
                result = virsh.dompmsuspend(vm_name, suspend_target,
                                            ignore_status=True, debug=True)
                libvirt.check_exit_status(result)
            elif vm_operation == "s4":
                suspend_target = "disk"
                result = virsh.dompmsuspend(vm_name, suspend_target,
                                            ignore_status=True, debug=True)
                libvirt.check_exit_status(result)
                # Wait domain state change: 'in shutdown' -> 'shut off'
                utils_misc.wait_for(lambda: virsh.is_dead(vm_name), 5)
            elif vm_operation == "suspend":
                result = virsh.suspend(vm_name, ignore_status=True, debug=True)
                libvirt.check_exit_status(result)
            elif vm_operation == "reboot":
                vm.reboot()
            else:
                logging.debug("No operation for the domain")

        else:
            if vm_operation == "save":
                if os.path.exists(save_file):
                    result = virsh.restore(save_file, ignore_status=True,
                                           debug=True)
                    libvirt.check_exit_status(result)
                    os.remove(save_file)
                else:
                    test.error("No save file for domain restore")
            elif vm_operation in ["managedsave", "s4"]:
                result = virsh.start(vm_name, ignore_status=True, debug=True)
                libvirt.check_exit_status(result)
            elif vm_operation == "s3":
                suspend_target = "mem"
                result = virsh.dompmwakeup(vm_name, ignore_status=True,
                                           debug=True)
                libvirt.check_exit_status(result)
            elif vm_operation == "suspend":
                result = virsh.resume(vm_name, ignore_status=True, debug=True)
                libvirt.check_exit_status(result)
            elif vm_operation == "reboot":
                pass
            else:
                logging.debug("No need recover the domain")
コード例 #60
0
ファイル: virsh_domjobabort.py プロジェクト: zcyes/tp-libvirt
def run(test, params, env):
    """
    Test command: virsh domjobabort.

    The command can abort the currently running domain job.
    1.Prepare test environment,destroy or suspend a VM.
    2.Do action to get a subprocess(dump, save, managedsave).
    3.Perform virsh domjobabort operation to abort VM's job.
    4.Recover the VM's status and wait for the subprocess over.
    5.Confirm the test result.
    """

    vm_name = params.get("main_vm", "vm1")
    vm = env.get_vm(vm_name)
    start_vm = params.get("start_vm")
    pre_vm_state = params.get("pre_vm_state", "start")
    if start_vm == "no" and vm.is_alive():
        vm.destroy()

    # Instead of "paused_after_start_vm", use "pre_vm_state".
    # After start the VM, wait for some time to make sure the job
    # can be created on this domain.
    if start_vm == "yes":
        vm.wait_for_login()
        if params.get("pre_vm_state") == "suspend":
            vm.pause()

    domid = vm.get_id()
    domuuid = vm.get_uuid()
    original_speed = virsh.migrate_getspeed(vm_name).stdout.strip()

    def get_subprocess(action, vm_name, file, remote_uri=None):
        """
        Execute background virsh command, return subprocess w/o waiting for exit()

        :param cmd : virsh command.
        :param guest_name : VM's name
        :param file_source : virsh command's file option.
        """
        if action == "managedsave":
            file = ""
        elif action == "migrate":
            # Slow down migration for domjobabort
            virsh.migrate_setspeed(vm_name, "1")
            file = remote_uri
        command = "virsh %s %s %s --unsafe" % (action, vm_name, file)
        logging.debug("Action: %s", command)
        p = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE,
                             stderr=subprocess.PIPE)
        return p

    action = params.get("jobabort_action", "dump")
    status_error = params.get("status_error", "no")
    job = params.get("jobabort_job", "yes")
    tmp_file = os.path.join(test.tmpdir, "domjobabort.tmp")
    tmp_pipe = os.path.join(test.tmpdir, "domjobabort.fifo")
    vm_ref = params.get("jobabort_vm_ref")
    remote_uri = params.get("jobabort_remote_uri")
    remote_host = params.get("migrate_dest_host")
    remote_user = params.get("migrate_dest_user", "root")
    remote_pwd = params.get("migrate_dest_pwd")
    saved_data = None

    if action == "managedsave":
        tmp_pipe = '/var/lib/libvirt/qemu/save/%s.save' % vm.name

    if action == "restore":
        virsh.save(vm_name, tmp_file, ignore_status=True)

    if action == "migrate":
        if remote_host.count("EXAMPLE"):
            raise error.TestNAError("Remote host should be configured "
                                    "for migrate.")
        else:
            # Config ssh autologin for remote host
            ssh_key.setup_ssh_key(remote_host, remote_user,
                                  remote_pwd, port=22)

    if vm_ref == "id":
        vm_ref = domid
    elif vm_ref == "hex_id":
        vm_ref = hex(int(domid))
    elif vm_ref == "uuid":
        vm_ref = domuuid
    elif vm_ref.find("invalid") != -1:
        vm_ref = params.get(vm_ref)
    elif vm_ref == "name":
        vm_ref = vm_name

    # Get the subprocess of VM.
    # The command's effect is to abort the currently running domain job.
    # So before do "domjobabort" action, we must create a job on the domain.
    process = None
    if job == "yes" and start_vm == "yes" and status_error == "no":
        if os.path.exists(tmp_pipe):
            os.unlink(tmp_pipe)
        os.mkfifo(tmp_pipe)

        process = get_subprocess(action, vm_name, tmp_pipe, remote_uri)

        saved_data = None
        if action == "restore":
            saved_data = file(tmp_file, 'r').read(10 * 1024 * 1024)
            f = open(tmp_pipe, 'w')
            f.write(saved_data[:1024 * 1024])
        elif action == "migrate":
            f = None
        else:
            f = open(tmp_pipe, 'r')
            dummy = f.read(1024 * 1024)

    # Give enough time for starting job
    t = 0
    while t < 5:
        jobtype = vm.get_job_type()
        if "None" == jobtype:
            t += 1
            time.sleep(1)
            continue
        elif jobtype is False:
            logging.error("Get job type failed.")
            break
        else:
            logging.debug("Job started: %s", jobtype)
            break
    ret = virsh.domjobabort(vm_ref, ignore_status=True, debug=True)
    status = ret.exit_status

    if process and f:
        if saved_data:
            f.write(saved_data[1024 * 1024:])
        else:
            dummy = f.read()
        f.close()

        if os.path.exists(tmp_pipe):
            os.unlink(tmp_pipe)
        if os.path.exists(tmp_file):
            os.unlink(tmp_file)

    # Recover the environment.
    if pre_vm_state == "suspend":
        vm.resume()
    if process:
        if process.poll():
            try:
                process.kill()
            except OSError:
                pass

    if action == "migrate":
        # Recover migration speed
        virsh.migrate_setspeed(vm_name, original_speed)
        utlv.MigrationTest().cleanup_dest_vm(vm, None, remote_uri)

    # check status_error
    if status_error == "yes":
        if status == 0:
            raise error.TestFail("Run successfully with wrong command!")
    elif status_error == "no":
        if status != 0:
            raise error.TestFail("Run failed with right command")