Exemplo n.º 1
0
    def test_pmsuspend(vm_name):
        """
        Test pmsuspend command.
        """
        if vm.is_dead():
            vm.start()
            vm.wait_for_login()
        # Create swap partition if nessesary.
        if not vm.has_swap():
            vm.create_swap_partition()
        ret = virsh.dompmsuspend(vm_name, "disk", **virsh_dargs)
        libvirt.check_exit_status(ret)
        # wait for vm to shutdown
        wait_fun = lambda: vm.state() == "shut off"
        if not utils_misc.wait_for(wait_fun, 30):
            raise error.TestFail("vm is still alive after S4 operation")

        # Wait for vm and qemu-ga service to start
        vm.start()
        session = vm.wait_for_login()
        check_vm_guestagent(session)
        session.close()

        #TODO This step may hang for rhel6 guest
        ret = virsh.dompmsuspend(vm_name, "mem", **virsh_dargs)
        libvirt.check_exit_status(ret)
        ret = virsh.dompmwakeup(vm_name, **virsh_dargs)
        libvirt.check_exit_status(ret)
        if not vm.is_alive():
            raise error.TestFail("vm is not alive after dompmwakeup")
Exemplo n.º 2
0
def manipulate_domain(vm_name, action, recover=False):
    """
    Save/managedsave/S3/S4 domain or recover it.
    """
    tmp_dir = data_dir.get_tmp_dir()
    save_file = os.path.join(tmp_dir, vm_name + ".save")
    if not recover:
        if action == "save":
            save_option = ""
            result = virsh.save(vm_name,
                                save_file,
                                save_option,
                                ignore_status=True,
                                debug=True)
            libvirt.check_exit_status(result)
        elif action == "managedsave":
            managedsave_option = ""
            result = virsh.managedsave(vm_name,
                                       managedsave_option,
                                       ignore_status=True,
                                       debug=True)
            libvirt.check_exit_status(result)
        elif action == "s3":
            suspend_target = "mem"
            result = virsh.dompmsuspend(vm_name,
                                        suspend_target,
                                        ignore_status=True,
                                        debug=True)
            libvirt.check_exit_status(result)
        elif action == "s4":
            suspend_target = "disk"
            result = virsh.dompmsuspend(vm_name,
                                        suspend_target,
                                        ignore_status=True,
                                        debug=True)
            libvirt.check_exit_status(result)
            # Wait domain state change: 'in shutdown' -> 'shut off'
            utils_misc.wait_for(lambda: virsh.is_dead(vm_name), 5)
        else:
            logging.debug("No operation for the domain")

    else:
        if action == "save":
            if os.path.exists(save_file):
                result = virsh.restore(save_file,
                                       ignore_status=True,
                                       debug=True)
                libvirt.check_exit_status(result)
                os.remove(save_file)
            else:
                raise error.TestError("No save file for domain restore")
        elif action in ["managedsave", "s4"]:
            result = virsh.start(vm_name, ignore_status=True, debug=True)
            libvirt.check_exit_status(result)
        elif action == "s3":
            suspend_target = "mem"
            result = virsh.dompmwakeup(vm_name, ignore_status=True, debug=True)
            libvirt.check_exit_status(result)
        else:
            logging.debug("No need recover the domain")
Exemplo n.º 3
0
def manipulate_domain(vm_name, action, recover=False):
    """
    Save/managedsave/S3/S4 domain or recover it.
    """
    tmp_dir = data_dir.get_tmp_dir()
    save_file = os.path.join(tmp_dir, vm_name + ".save")
    if not recover:
        if action == "save":
            save_option = ""
            result = virsh.save(vm_name, save_file, save_option,
                                ignore_status=True, debug=True)
            libvirt.check_exit_status(result)
        elif action == "managedsave":
            managedsave_option = ""
            result = virsh.managedsave(vm_name, managedsave_option,
                                       ignore_status=True, debug=True)
            libvirt.check_exit_status(result)
        elif action == "s3":
            suspend_target = "mem"
            result = virsh.dompmsuspend(vm_name, suspend_target,
                                        ignore_status=True, debug=True)
            libvirt.check_exit_status(result)
        elif action == "s4":
            suspend_target = "disk"
            result = virsh.dompmsuspend(vm_name, suspend_target,
                                        ignore_status=True, debug=True)
            libvirt.check_exit_status(result)
            # Wait domain state change: 'in shutdown' -> 'shut off'
            utils_misc.wait_for(lambda: virsh.is_dead(vm_name), 5)
        else:
            logging.debug("No operation for the domain")

    else:
        if action == "save":
            if os.path.exists(save_file):
                result = virsh.restore(save_file, ignore_status=True, debug=True)
                libvirt.check_exit_status(result)
                os.remove(save_file)
            else:
                raise error.TestError("No save file for domain restore")
        elif action in ["managedsave", "s4"]:
            result = virsh.start(vm_name, ignore_status=True, debug=True)
            libvirt.check_exit_status(result)
        elif action == "s3":
            suspend_target = "mem"
            result = virsh.dompmwakeup(vm_name, ignore_status=True, debug=True)
            libvirt.check_exit_status(result)
        else:
            logging.debug("No need recover the domain")
Exemplo n.º 4
0
def run_pm_test(params, libvirtd, vm):
    """
    Destroy VM after executed a series of operations about S3 and save restore
    """

    vm_name = vm.name
    vm_xml = VMXML.new_from_inactive_dumpxml(vm_name)
    vm_xml_backup = vm_xml.copy()
    save_path = os.path.join(data_dir.get_tmp_dir(), 'tmp.save')
    try:
        pm_xml = VMPMXML()
        pm_xml.mem_enabled = 'yes'
        vm_xml.pm = pm_xml
        vm_xml.sync()
        vm.prepare_guest_agent()
        virsh.dompmsuspend(vm.name, 'mem')
        virsh.dompmwakeup(vm.name)
        virsh.save(vm.name, save_path)
        virsh.restore(save_path)
        virsh.dompmsuspend(vm.name, 'mem')
        virsh.save(vm.name, save_path)
        virsh.destroy(vm.name)
    finally:
        vm_xml_backup.sync()
Exemplo n.º 5
0
def run_pm_test(params, libvirtd, vm):
    """
    Destroy VM after executed a series of operations about S3 and save restore
    """

    vm_name = vm.name
    vm_xml = VMXML.new_from_inactive_dumpxml(vm_name)
    vm_xml_backup = vm_xml.copy()
    save_path = os.path.join(data_dir.get_tmp_dir(), 'tmp.save')
    try:
        pm_xml = VMPMXML()
        pm_xml.mem_enabled = 'yes'
        vm_xml.pm = pm_xml
        vm_xml.sync()
        vm.prepare_guest_agent()
        virsh.dompmsuspend(vm.name, 'mem')
        virsh.dompmwakeup(vm.name)
        virsh.save(vm.name, save_path)
        virsh.restore(save_path)
        virsh.dompmsuspend(vm.name, 'mem')
        virsh.save(vm.name, save_path)
        virsh.destroy(vm.name)
    finally:
        vm_xml_backup.sync()
Exemplo n.º 6
0
    def test_pmsuspend(vm_name):
        """
        Test pmsuspend command.
        """
        if vm.is_dead():
            vm.start()
            vm.wait_for_login()
        # Create swap partition if nessesary.
        if not vm.has_swap():
            swap_path = os.path.join(data_dir.get_tmp_dir(), 'swap.img')
            vm.create_swap_partition(swap_path)
        ret = virsh.dompmsuspend(vm_name, "disk", **virsh_dargs)
        libvirt.check_exit_status(ret)
        # wait for vm to shutdown

        if not utils_misc.wait_for(lambda: vm.state() == "shut off", 60):
            test.fail("vm is still alive after S4 operation")

        # Wait for vm and qemu-ga service to start
        vm.start()
        # Prepare guest agent and start guest
        try:
            vm.prepare_guest_agent()
        except (remote.LoginError, virt_vm.VMError) as detail:
            test.fail("failed to prepare agent:\n%s" % detail)

        #TODO This step may hang for rhel6 guest
        ret = virsh.dompmsuspend(vm_name, "mem", **virsh_dargs)
        libvirt.check_exit_status(ret)

        # Check vm state
        if not utils_misc.wait_for(lambda: vm.state() == "pmsuspended", 60):
            test.fail("vm isn't suspended after S3 operation")

        ret = virsh.dompmwakeup(vm_name, **virsh_dargs)
        libvirt.check_exit_status(ret)
        if not vm.is_alive():
            test.fail("vm is not alive after dompmwakeup")
Exemplo n.º 7
0
    def test_pmsuspend(vm_name):
        """
        Test pmsuspend command.
        """
        if vm.is_dead():
            vm.start()
            vm.wait_for_login()
        # Create swap partition if necessary.
        if not vm.has_swap():
            swap_path = os.path.join(data_dir.get_data_dir(), 'swap.img')
            vm.create_swap_partition(swap_path)
        ret = virsh.dompmsuspend(vm_name, "disk", **virsh_dargs)
        libvirt.check_exit_status(ret)
        # wait for vm to shutdown

        if not utils_misc.wait_for(lambda: vm.state() == "shut off", 60):
            test.fail("vm is still alive after S4 operation")

        # Wait for vm and qemu-ga service to start
        vm.start()
        # Prepare guest agent and start guest
        try:
            vm.prepare_guest_agent()
        except (remote.LoginError, virt_vm.VMError) as detail:
            test.fail("failed to prepare agent:\n%s" % detail)

        #TODO This step may hang for rhel6 guest
        ret = virsh.dompmsuspend(vm_name, "mem", **virsh_dargs)
        libvirt.check_exit_status(ret)

        # Check vm state
        if not utils_misc.wait_for(lambda: vm.state() == "pmsuspended", 60):
            test.fail("vm isn't suspended after S3 operation")

        ret = virsh.dompmwakeup(vm_name, **virsh_dargs)
        libvirt.check_exit_status(ret)
        if not vm.is_alive():
            test.fail("vm is not alive after dompmwakeup")
Exemplo n.º 8
0
def run(test, params, env):
    """
    Test command: virsh dompmsuspend <domain> <target>
    The command suspends a running domain using guest OS's power management.
    """
    def check_vm_guestagent(session):
        # Check if qemu-ga already started automatically
        cmd = "rpm -q qemu-guest-agent || yum install -y qemu-guest-agent"
        stat_install = session.cmd_status(cmd, 300)
        if stat_install != 0:
            raise error.TestFail("Fail to install qemu-guest-agent, make"
                                 "sure that you have usable repo in guest")

        # Check if qemu-ga already started
        stat_ps = session.cmd_status("ps aux |grep [q]emu-ga")
        if stat_ps != 0:
            session.cmd("qemu-ga -d")
            # Check if the qemu-ga really started
            stat_ps = session.cmd_status("ps aux |grep [q]emu-ga")
            if stat_ps != 0:
                raise error.TestFail("Fail to run qemu-ga in guest")

    def check_pm_utils(session):
        # Check if pm-utils is present in vm
        cmd = "rpm -q pm-utils || yum install -y pm-utils"
        stat_install = session.cmd_status(cmd, 300)
        if stat_install != 0:
            raise error.TestFail("Fail to install pm-utils, make"
                                 "sure that you have usable repo in guest")

    # MAIN TEST CODE ###
    # Process cartesian parameters
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)

    vm_state = params.get("vm_state", "running")
    suspend_target = params.get("pm_suspend_target", "mem")
    status_error = "yes" == params.get("status_error", "yes")
    virsh_dargs = {'debug': True}

    # A backup of original vm
    vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    logging.debug("original xml is %s", vmxml_backup)

    try:
        if vm.is_alive():
            vm.destroy()
        libvirt_xml.VMXML.set_agent_channel(vm_name)
        vm.start()
        session = vm.wait_for_login()

        check_vm_guestagent(session)
        check_pm_utils(session)

        session.close()

        # Set vm state
        if vm_state == "paused":
            vm.pause()
        elif vm_state == "shutoff":
            vm.destroy()

        # Run test case
        result = virsh.dompmsuspend(vm_name, suspend_target, **virsh_dargs)
        status = result.exit_status
        err = result.stderr.strip()

        # Check status_error
        if status_error:
            if status == 0 or err == "":
                raise error.TestFail("Expect fail, but run successfully!")
        else:
            if status != 0 or err != "":
                raise error.TestFail("Run failed with right command")
    finally:
        # cleanup
        if vm_state == "paused":
            vm.resume()

        if suspend_target == "mem" or suspend_target == "hybrid":
            if vm.state() == "pmsuspended":
                virsh.dompmwakeup(vm_name)

        # Recover xml of vm.
        vmxml_backup.sync()
Exemplo n.º 9
0
def run(test, params, env):
    """
    Test command: virsh dompmsuspend <domain> <target>
    The command suspends a running domain using guest OS's power management.
    """

    def check_vm_guestagent(session):
        # Check if qemu-ga already started automatically
        cmd = "rpm -q qemu-guest-agent || yum install -y qemu-guest-agent"
        stat_install, output = session.cmd_status_output(cmd, 300)
        logging.debug(output)
        if stat_install != 0:
            raise error.TestError("Fail to install qemu-guest-agent, make"
                                  "sure that you have usable repo in guest")

        # Check if qemu-ga already started
        stat_ps = session.cmd_status("ps aux |grep [q]emu-ga")
        if stat_ps != 0:
            session.cmd("qemu-ga -d")
            # Check if the qemu-ga really started
            stat_ps = session.cmd_status("ps aux |grep [q]emu-ga")
            if stat_ps != 0:
                raise error.TestError("Fail to run qemu-ga in guest")

    # MAIN TEST CODE ###
    # Process cartesian parameters
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)

    vm_state = params.get("vm_state", "running")
    suspend_target = params.get("pm_suspend_target", "mem")
    pm_enabled = params.get("pm_enabled", "not_set")

    # A backup of original vm
    vm_xml = VMXML.new_from_inactive_dumpxml(vm_name)
    vm_xml_backup = vm_xml.copy()

    # Expected possible fail patterns.
    # Error output should match one of these patterns.
    # An empty list mean test should succeed.
    fail_pat = []

    # Setup possible failure patterns
    if pm_enabled == 'not_set':
        fail_pat.append('not supported')
    if pm_enabled == 'no':
        fail_pat.append('disabled')

    if vm_state == 'paused':
        fail_pat.append('not responding')
    elif vm_state == 'shutoff':
        fail_pat.append('not running')

    try:
        if vm.is_alive():
            vm.destroy()

        # Set pm tag in domain's XML if needed.
        if pm_enabled == 'not_set':
            if 'pm' in vm_xml:
                del vm_xml.pm
        else:
            pm_xml = VMPM()
            if suspend_target == 'mem':
                pm_xml.mem_enabled = pm_enabled
            elif suspend_target == 'disk':
                pm_xml.disk_enabled = pm_enabled
            elif suspend_target == 'hybrid':
                if 'hybrid_enabled' in dir(pm_xml):
                    pm_xml.hybrid_enabled = pm_enabled
                else:
                    raise error.TestNAError("PM suspend type 'hybrid' is not "
                                            "supported yet.")
            vm_xml.pm = pm_xml
            vm_xml.sync()

        VMXML.set_agent_channel(vm_name)
        vm.start()

        # Create swap partition/file if nessesary.
        need_mkswap = False
        if suspend_target in ['disk', 'hybrid']:
            need_mkswap = not vm.has_swap()
        if need_mkswap:
            logging.debug("Creating swap partition.")
            vm.create_swap_partition()

        session = vm.wait_for_login()
        try:
            check_vm_guestagent(session)

            # Set vm state
            if vm_state == "paused":
                vm.pause()
            elif vm_state == "shutoff":
                vm.destroy()

            # Run test case
            result = virsh.dompmsuspend(vm_name, suspend_target, debug=True)
        finally:
            # Restore VM state
            if vm_state == "paused":
                vm.resume()

            if suspend_target in ['mem', 'hybrid']:
                if vm.state() == "pmsuspended":
                    virsh.dompmwakeup(vm_name)
            else:
                if vm.state() == "in shutdown":
                    vm.wait_for_shutdown()
                if vm.is_dead():
                    vm.start()

            # Cleanup
            session.close()

            if need_mkswap:
                vm.cleanup_swap()

        if result.exit_status == 0:
            if fail_pat:
                raise error.TestFail("Expected failed with %s, but run succeed"
                                     ":\n%s" % (fail_pat, result))
        else:
            if not fail_pat:
                raise error.TestFail("Expected success, but run failed:\n%s"
                                     % result)
            #if not any_pattern_match(fail_pat, result.stderr):
            if not any(p in result.stderr for p in fail_pat):
                raise error.TestFail("Expected failed with one of %s, but "
                                     "failed with:\n%s" % (fail_pat, result))
    finally:
        # Recover xml of vm.
        vm_xml_backup.sync()
Exemplo n.º 10
0
def run(test, params, env):
    """
    Test command: virsh dompmsuspend <domain> <target>
    The command suspends a running domain using guest OS's power management.
    """

    # MAIN TEST CODE ###
    # Process cartesian parameters
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)

    vm_state = params.get("vm_state", "running")
    suspend_target = params.get("pm_suspend_target", "mem")
    pm_enabled = params.get("pm_enabled", "not_set")
    test_managedsave = "yes" == params.get("test_managedsave", "no")
    test_save_restore = "yes" == params.get("test_save_restore", "no")
    test_suspend_resume = "yes" == params.get("test_suspend_resume", "no")
    pmsuspend_error = 'yes' == params.get("pmsuspend_error", 'no')

    # Libvirt acl test related params
    uri = params.get("virsh_uri")
    unprivileged_user = params.get('unprivileged_user')
    if unprivileged_user:
        if unprivileged_user.count('EXAMPLE'):
            unprivileged_user = '******'

    if not libvirt_version.version_compare(1, 1, 1):
        if params.get('setup_libvirt_polkit') == 'yes':
            raise error.TestNAError("API acl test not supported in current"
                                    " libvirt version.")

    # A backup of original vm
    vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    vmxml_backup = vmxml.copy()

    # Expected possible fail patterns.
    # Error output should match one of these patterns.
    # An empty list mean test should succeed.
    fail_pat = []
    virsh_dargs = {'debug': True, 'ignore_status': True}
    if params.get('setup_libvirt_polkit') == 'yes':
        virsh_dargs_copy = virsh_dargs.copy()
        virsh_dargs_copy['uri'] = uri
        virsh_dargs_copy['unprivileged_user'] = unprivileged_user
        if pmsuspend_error:
            fail_pat.append('access denied')

    # Setup possible failure patterns
    if pm_enabled == 'not_set':
        fail_pat.append('not supported')
    if pm_enabled == 'no':
        fail_pat.append('disabled')

    if vm_state == 'paused':
        fail_pat.append('not responding')
    elif vm_state == 'shutoff':
        fail_pat.append('not running')

    try:
        if vm.is_alive():
            vm.destroy()

        # Set pm tag in domain's XML if needed.
        if pm_enabled == 'not_set':
            try:
                if vmxml.pm:
                    del vmxml.pm
            except xcepts.LibvirtXMLNotFoundError:
                pass
        else:
            pm_xml = vm_xml.VMPMXML()
            if suspend_target == 'mem':
                pm_xml.mem_enabled = pm_enabled
            elif suspend_target == 'disk':
                pm_xml.disk_enabled = pm_enabled
            elif suspend_target == 'hybrid':
                pm_xml.mem_enabled = pm_enabled
                pm_xml.disk_enabled = pm_enabled
            vmxml.pm = pm_xml
        vmxml.sync()

        vm.prepare_guest_agent()

        # Create swap partition/file if nessesary.
        need_mkswap = False
        if suspend_target in ['disk', 'hybrid']:
            need_mkswap = not vm.has_swap()
        if need_mkswap:
            logging.debug("Creating swap partition.")
            vm.create_swap_partition()

        try:
            libvirtd = utils_libvirtd.Libvirtd()
            savefile = os.path.join(test.tmpdir, "%s.save" % vm_name)
            session = vm.wait_for_login()
            # Touch a file on guest to test managed save command.
            if test_managedsave:
                session.cmd_status("touch pmtest")

            # Set vm state
            if vm_state == "paused":
                vm.pause()
            elif vm_state == "shutoff":
                vm.destroy()

            # Run test case
            result = virsh.dompmsuspend(vm_name, suspend_target, debug=True,
                                        uri=uri,
                                        unprivileged_user=unprivileged_user)
            if result.exit_status == 0:
                if fail_pat:
                    raise error.TestFail("Expected failed with %s, but run succeed"
                                         ":\n%s" % (fail_pat, result))
            else:
                if not fail_pat:
                    raise error.TestFail("Expected success, but run failed:\n%s"
                                         % result)
                #if not any_pattern_match(fail_pat, result.stderr):
                if not any(p in result.stderr for p in fail_pat):
                    raise error.TestFail("Expected failed with one of %s, but "
                                         "failed with:\n%s" % (fail_pat, result))
            if test_managedsave:
                ret = virsh.managedsave(vm_name, **virsh_dargs)
                libvirt.check_exit_status(ret)
                # Dompmwakeup should return false here
                ret = virsh.dompmwakeup(vm_name, **virsh_dargs)
                libvirt.check_exit_status(ret, True)
                ret = virsh.start(vm_name)
                libvirt.check_exit_status(ret)
                if not vm.is_paused():
                    raise error.TestFail("Vm status is not paused before pm wakeup")
                if params.get('setup_libvirt_polkit') == 'yes':
                    ret = virsh.dompmwakeup(vm_name, **virsh_dargs_copy)
                else:
                    ret = virsh.dompmwakeup(vm_name, **virsh_dargs)
                libvirt.check_exit_status(ret)
                if not vm.is_paused():
                    raise error.TestFail("Vm status is not paused after pm wakeup")
                ret = virsh.resume(vm_name, **virsh_dargs)
                libvirt.check_exit_status(ret)
                sess = vm.wait_for_login()
                if sess.cmd_status("ls pmtest && rm -f pmtest"):
                    raise error.TestFail("Check managed save failed on guest")
                sess.close()
            if test_save_restore:
                # Run a series of operations to check libvirtd status.
                ret = virsh.dompmwakeup(vm_name, **virsh_dargs)
                libvirt.check_exit_status(ret)
                # Wait for vm is started
                vm.wait_for_login()
                ret = virsh.save(vm_name, savefile, **virsh_dargs)
                libvirt.check_exit_status(ret)
                ret = virsh.restore(savefile, **virsh_dargs)
                libvirt.check_exit_status(ret)
                # Wait for vm is started
                vm.wait_for_login()
                # run pmsuspend again
                ret = virsh.dompmsuspend(vm_name, suspend_target, **virsh_dargs)
                libvirt.check_exit_status(ret)
                # save and restore the guest again.
                ret = virsh.save(vm_name, savefile, **virsh_dargs)
                libvirt.check_exit_status(ret)
                ret = virsh.restore(savefile, **virsh_dargs)
                libvirt.check_exit_status(ret)
                ret = virsh.destroy(vm_name, **virsh_dargs)
                libvirt.check_exit_status(ret)
                if not libvirtd.is_running():
                    raise error.TestFail("libvirtd crashed")
            if test_suspend_resume:
                ret = virsh.suspend(vm_name)
                libvirt.check_exit_status(ret, expect_error=True)
                if vm.state() != 'pmsuspended':
                    raise error.TestFail("VM state should be pmsuspended")
                ret = virsh.resume(vm_name)
                libvirt.check_exit_status(ret, expect_error=True)
                if vm.state() != 'pmsuspended':
                    raise error.TestFail("VM state should be pmsuspended")
        finally:
            libvirtd.restart()
            # Remove the tmp file
            if os.path.exists(savefile):
                os.remove(savefile)
            # Restore VM state
            if vm_state == "paused":
                vm.resume()

            if suspend_target in ['mem', 'hybrid']:
                if vm.state() == "pmsuspended":
                    virsh.dompmwakeup(vm_name)
            else:
                if vm.state() == "in shutdown":
                    vm.wait_for_shutdown()
                if vm.is_dead():
                    vm.start()

            # Cleanup
            session.close()

            if need_mkswap:
                vm.cleanup_swap()

    finally:
        # Destroy the vm.
        if vm.is_alive():
            vm.destroy()
        # Recover xml of vm.
        vmxml_backup.sync()
Exemplo n.º 11
0
def run(test, params, env):
    """
    Test multiple disks attachment.

    1.Prepare test environment,destroy or suspend a VM.
    2.Prepare disk image.
    3.Edit disks xml and start the domain.
    4.Perform test operation.
    5.Recover test environment.
    6.Confirm the test result.
    """
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    virsh_dargs = {'debug': True, 'ignore_status': True}

    def prepare_gluster_disk(disk_img, disk_format):
        """
        Setup glusterfs and prepare disk image.
        """
        # Get the image path and name from parameters
        data_path = data_dir.get_data_dir()
        image_name = params.get("image_name")
        image_format = params.get("image_format")
        image_source = os.path.join(data_path,
                                    image_name + '.' + image_format)

        # Setup gluster.
        host_ip = libvirt.setup_or_cleanup_gluster(True, vol_name,
                                                   brick_path, pool_name)
        logging.debug("host ip: %s ", host_ip)
        image_info = utils_misc.get_image_info(image_source)
        if image_info["format"] == disk_format:
            disk_cmd = ("cp -f %s /mnt/%s" % (image_source, disk_img))
        else:
            # Convert the disk format
            disk_cmd = ("qemu-img convert -f %s -O %s %s /mnt/%s" %
                        (image_info["format"], disk_format, image_source, disk_img))

        # Mount the gluster disk and create the image.
        utils.run("mount -t glusterfs %s:%s /mnt;"
                  " %s; chmod a+rw /mnt/%s; umount /mnt"
                  % (host_ip, vol_name, disk_cmd, disk_img))

        return host_ip

    def build_disk_xml(disk_img, disk_format, host_ip):
        """
        Try to rebuild disk xml
        """
        # Delete existed disks first.
        vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
        disks_dev = vmxml.get_devices(device_type="disk")
        for disk in disks_dev:
            vmxml.del_device(disk)

        if default_pool:
            disk_xml = Disk(type_name="file")
        else:
            disk_xml = Disk(type_name="network")
        disk_xml.device = "disk"
        driver_dict = {"name": "qemu",
                       "type": disk_format,
                       "cache": "none"}
        if driver_iothread:
            driver_dict.update({"iothread": driver_iothread})
        disk_xml.driver = driver_dict
        disk_xml.target = {"dev": "vda", "bus": "virtio"}
        if default_pool:
            utils.run("mount -t glusterfs %s:%s %s; setsebool virt_use_fusefs on" %
                      (host_ip, vol_name, default_pool))
            virsh.pool_refresh("default")
            source_dict = {"file": "%s/%s" % (default_pool, disk_img)}
            disk_xml.source = disk_xml.new_disk_source(
                **{"attrs": source_dict})
        else:
            source_dict = {"protocol": "gluster",
                           "name": "%s/%s" % (vol_name, disk_img)}
            host_dict = {"name": host_ip, "port": "24007"}
            if transport:
                host_dict.update({"transport": transport})
            disk_xml.source = disk_xml.new_disk_source(
                **{"attrs": source_dict, "hosts": [host_dict]})
        # set domain options
        if dom_iothreads:
            try:
                vmxml.iothreads = int(dom_iothreads)
            except ValueError:
                # 'iothreads' may not invalid number in negative tests
                logging.debug("Can't convert '%s' to integer type"
                              % dom_iothreads)

        # Add the new disk xml.
        vmxml.add_device(disk_xml)
        vmxml.sync()

    def test_pmsuspend(vm_name):
        """
        Test pmsuspend command.
        """
        if vm.is_dead():
            vm.start()
            vm.wait_for_login()
        # Create swap partition if nessesary.
        if not vm.has_swap():
            vm.create_swap_partition()
        ret = virsh.dompmsuspend(vm_name, "disk", **virsh_dargs)
        libvirt.check_exit_status(ret)
        # wait for vm to shutdown

        if not utils_misc.wait_for(lambda: vm.state() == "shut off", 30):
            raise error.TestFail("vm is still alive after S4 operation")

        # Wait for vm and qemu-ga service to start
        vm.start()
        # Prepare guest agent and start guest
        try:
            vm.prepare_guest_agent()
        except (remote.LoginError, virt_vm.VMError), e:
            raise error.TestFail("failed to prepare agent")

        #TODO This step may hang for rhel6 guest
        ret = virsh.dompmsuspend(vm_name, "mem", **virsh_dargs)
        libvirt.check_exit_status(ret)
        ret = virsh.dompmwakeup(vm_name, **virsh_dargs)
        libvirt.check_exit_status(ret)
        if not vm.is_alive():
            raise error.TestFail("vm is not alive after dompmwakeup")
Exemplo n.º 12
0
def run(test, params, env):
    """
    Test command: virsh dompmsuspend <domain> <target>
    The command suspends a running domain using guest OS's power management.
    """
    def check_vm_guestagent(session):
        # Check if qemu-ga already started automatically
        cmd = "rpm -q qemu-guest-agent || yum install -y qemu-guest-agent"
        stat_install, output = session.cmd_status_output(cmd, 300)
        logging.debug(output)
        if stat_install != 0:
            raise error.TestError("Fail to install qemu-guest-agent, make"
                                  "sure that you have usable repo in guest")

        # Check if qemu-ga already started
        stat_ps = session.cmd_status("ps aux |grep [q]emu-ga | grep -v grep")
        if stat_ps != 0:
            session.cmd("service qemu-ga start")
            # Check if the qemu-ga really started
            stat_ps = session.cmd_status(
                "ps aux |grep [q]emu-ga | grep -v grep")
            if stat_ps != 0:
                raise error.TestError("Fail to run qemu-ga in guest")

    # MAIN TEST CODE ###
    # Process cartesian parameters
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)

    vm_state = params.get("vm_state", "running")
    suspend_target = params.get("pm_suspend_target", "mem")
    pm_enabled = params.get("pm_enabled", "not_set")
    test_managedsave = "yes" == params.get("test_managedsave", "no")
    test_save_restore = "yes" == params.get("test_save_restore", "no")

    # A backup of original vm
    vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    vmxml_backup = vmxml.copy()

    # Expected possible fail patterns.
    # Error output should match one of these patterns.
    # An empty list mean test should succeed.
    fail_pat = []
    virsh_dargs = {'debug': True, 'ignore_status': True}

    # Setup possible failure patterns
    if pm_enabled == 'not_set':
        fail_pat.append('not supported')
    if pm_enabled == 'no':
        fail_pat.append('disabled')

    if vm_state == 'paused':
        fail_pat.append('not responding')
    elif vm_state == 'shutoff':
        fail_pat.append('not running')

    try:
        if vm.is_alive():
            vm.destroy()

        # Set pm tag in domain's XML if needed.
        if pm_enabled == 'not_set':
            try:
                if vmxml.pm:
                    del vmxml.pm
            except xcepts.LibvirtXMLNotFoundError:
                pass
        else:
            pm_xml = vm_xml.VMPMXML()
            if suspend_target == 'mem':
                pm_xml.mem_enabled = pm_enabled
            elif suspend_target == 'disk':
                pm_xml.disk_enabled = pm_enabled
            elif suspend_target == 'hybrid':
                if 'hybrid_enabled' in dir(pm_xml):
                    pm_xml.hybrid_enabled = pm_enabled
                else:
                    raise error.TestNAError("PM suspend type 'hybrid' is not "
                                            "supported yet.")
            vmxml.pm = pm_xml
        vmxml.sync()

        vm_xml.VMXML.set_agent_channel(vm_name)
        vm.start()

        # Create swap partition/file if nessesary.
        need_mkswap = False
        if suspend_target in ['disk', 'hybrid']:
            need_mkswap = not vm.has_swap()
        if need_mkswap:
            logging.debug("Creating swap partition.")
            vm.create_swap_partition()

        try:
            libvirtd = utils_libvirtd.Libvirtd()
            savefile = os.path.join(test.tmpdir, "%s.save" % vm_name)
            session = vm.wait_for_login()
            check_vm_guestagent(session)
            # Touch a file on guest to test managed save command.
            if test_managedsave:
                session.cmd_status("touch pmtest")

            # Set vm state
            if vm_state == "paused":
                vm.pause()
            elif vm_state == "shutoff":
                vm.destroy()

            # Run test case
            result = virsh.dompmsuspend(vm_name, suspend_target, debug=True)
            if result.exit_status == 0:
                if fail_pat:
                    raise error.TestFail(
                        "Expected failed with %s, but run succeed"
                        ":\n%s" % (fail_pat, result))
            else:
                if not fail_pat:
                    raise error.TestFail(
                        "Expected success, but run failed:\n%s" % result)
                #if not any_pattern_match(fail_pat, result.stderr):
                if not any(p in result.stderr for p in fail_pat):
                    raise error.TestFail("Expected failed with one of %s, but "
                                         "failed with:\n%s" %
                                         (fail_pat, result))
            if test_managedsave:
                ret = virsh.managedsave(vm_name, **virsh_dargs)
                libvirt.check_exit_status(ret)
                # Dompmwakeup should return false here
                ret = virsh.dompmwakeup(vm_name, **virsh_dargs)
                libvirt.check_exit_status(ret, True)
                ret = virsh.start(vm_name)
                libvirt.check_exit_status(ret)
                if not vm.is_paused():
                    raise error.TestFail(
                        "Vm status is not paused before pm wakeup")
                ret = virsh.dompmwakeup(vm_name, **virsh_dargs)
                libvirt.check_exit_status(ret)
                if not vm.is_paused():
                    raise error.TestFail(
                        "Vm status is not paused after pm wakeup")
                ret = virsh.resume(vm_name, **virsh_dargs)
                libvirt.check_exit_status(ret)
                sess = vm.wait_for_login()
                if sess.cmd_status("ls pmtest && rm -f pmtest"):
                    raise error.TestFail("Check managed save failed on guest")
                sess.close()
            if test_save_restore:
                # Run a series of operations to check libvirtd status.
                ret = virsh.dompmwakeup(vm_name, **virsh_dargs)
                libvirt.check_exit_status(ret)
                ret = virsh.save(vm_name, savefile, **virsh_dargs)
                libvirt.check_exit_status(ret)
                ret = virsh.restore(savefile, **virsh_dargs)
                libvirt.check_exit_status(ret)
                # run pmsuspend again
                ret = virsh.dompmsuspend(vm_name, suspend_target,
                                         **virsh_dargs)
                libvirt.check_exit_status(ret)
                # save and restore the guest again.
                ret = virsh.save(vm_name, savefile, **virsh_dargs)
                libvirt.check_exit_status(ret)
                ret = virsh.restore(savefile, **virsh_dargs)
                libvirt.check_exit_status(ret)
                ret = virsh.destroy(vm_name, **virsh_dargs)
                libvirt.check_exit_status(ret)
                if not libvirtd.is_running():
                    raise error.TestFail("libvirtd crashed")

        finally:
            libvirtd.restart()
            # Remove the tmp file
            if os.path.exists(savefile):
                os.remove(savefile)
            # Restore VM state
            if vm_state == "paused":
                vm.resume()

            if suspend_target in ['mem', 'hybrid']:
                if vm.state() == "pmsuspended":
                    virsh.dompmwakeup(vm_name)
            else:
                if vm.state() == "in shutdown":
                    vm.wait_for_shutdown()
                if vm.is_dead():
                    vm.start()

            # Cleanup
            session.close()

            if need_mkswap:
                vm.cleanup_swap()

    finally:
        # Destroy the vm.
        if vm.is_alive():
            vm.destroy()
        # Recover xml of vm.
        vmxml_backup.sync()
    def manipulate_domain(vm_name, vm_operation, recover=False):
        """
        Operate domain to given state or recover it.

        :params vm_name: Name of the VM domain
        :params vm_operation: Operation to be performed on VM domain
                              like save, managedsave, suspend
        :params recover: flag to inform whether to set or reset
                         vm_operation
        """
        save_file = os.path.join(data_dir.get_tmp_dir(), vm_name + ".save")
        if not recover:
            if vm_operation == "save":
                save_option = ""
                result = virsh.save(vm_name, save_file, save_option,
                                    ignore_status=True, debug=True)
                libvirt.check_exit_status(result)
            elif vm_operation == "managedsave":
                managedsave_option = ""
                result = virsh.managedsave(vm_name, managedsave_option,
                                           ignore_status=True, debug=True)
                libvirt.check_exit_status(result)
            elif vm_operation == "s3":
                suspend_target = "mem"
                result = virsh.dompmsuspend(vm_name, suspend_target,
                                            ignore_status=True, debug=True)
                libvirt.check_exit_status(result)
            elif vm_operation == "s4":
                suspend_target = "disk"
                result = virsh.dompmsuspend(vm_name, suspend_target,
                                            ignore_status=True, debug=True)
                libvirt.check_exit_status(result)
                # Wait domain state change: 'in shutdown' -> 'shut off'
                utils_misc.wait_for(lambda: virsh.is_dead(vm_name), 5)
            elif vm_operation == "suspend":
                result = virsh.suspend(vm_name, ignore_status=True, debug=True)
                libvirt.check_exit_status(result)
            elif vm_operation == "reboot":
                vm.reboot()
                vm_uptime_init = vm.uptime()
            else:
                logging.debug("No operation for the domain")

        else:
            if vm_operation == "save":
                if os.path.exists(save_file):
                    result = virsh.restore(save_file, ignore_status=True,
                                           debug=True)
                    libvirt.check_exit_status(result)
                    os.remove(save_file)
                else:
                    test.error("No save file for domain restore")
            elif vm_operation in ["managedsave", "s4"]:
                result = virsh.start(vm_name, ignore_status=True, debug=True)
                libvirt.check_exit_status(result)
            elif vm_operation == "s3":
                suspend_target = "mem"
                result = virsh.dompmwakeup(vm_name, ignore_status=True,
                                           debug=True)
                libvirt.check_exit_status(result)
            elif vm_operation == "suspend":
                result = virsh.resume(vm_name, ignore_status=True, debug=True)
                libvirt.check_exit_status(result)
            elif vm_operation == "reboot":
                pass
            else:
                logging.debug("No need recover the domain")
def run(test, params, env):
    """
    Test multiple disks attachment.

    1.Prepare test environment,destroy or suspend a VM.
    2.Prepare disk image.
    3.Edit disks xml and start the domain.
    4.Perform test operation.
    5.Recover test environment.
    6.Confirm the test result.
    """
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    virsh_dargs = {'debug': True, 'ignore_status': True}

    def prepare_gluster_disk(disk_img, disk_format):
        """
        Setup glusterfs and prepare disk image.
        """
        # Get the image path and name from parameters
        data_path = data_dir.get_data_dir()
        image_name = params.get("image_name")
        image_format = params.get("image_format")
        image_source = os.path.join(data_path, image_name + '.' + image_format)

        # Setup gluster.
        host_ip = libvirt.setup_or_cleanup_gluster(True, vol_name, brick_path,
                                                   pool_name)
        logging.debug("host ip: %s ", host_ip)
        image_info = utils_misc.get_image_info(image_source)
        if image_info["format"] == disk_format:
            disk_cmd = ("cp -f %s /mnt/%s" % (image_source, disk_img))
        else:
            # Convert the disk format
            disk_cmd = (
                "qemu-img convert -f %s -O %s %s /mnt/%s" %
                (image_info["format"], disk_format, image_source, disk_img))

        # Mount the gluster disk and create the image.
        utils.run("mount -t glusterfs %s:%s /mnt;"
                  " %s; chmod a+rw /mnt/%s; umount /mnt" %
                  (host_ip, vol_name, disk_cmd, disk_img))

        return host_ip

    def build_disk_xml(disk_img, disk_format, host_ip):
        """
        Try to rebuild disk xml
        """
        # Delete existed disks first.
        vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
        disks_dev = vmxml.get_devices(device_type="disk")
        for disk in disks_dev:
            vmxml.del_device(disk)

        if default_pool:
            disk_xml = Disk(type_name="file")
        else:
            disk_xml = Disk(type_name="network")
        disk_xml.device = "disk"
        driver_dict = {"name": "qemu", "type": disk_format, "cache": "none"}
        disk_xml.driver = driver_dict
        disk_xml.target = {"dev": "vda", "bus": "virtio"}
        if default_pool:
            utils.run(
                "mount -t glusterfs %s:%s %s; setsebool virt_use_fusefs on" %
                (host_ip, vol_name, default_pool))
            virsh.pool_refresh("default")
            source_dict = {"file": "%s/%s" % (default_pool, disk_img)}
            disk_xml.source = disk_xml.new_disk_source(
                **{"attrs": source_dict})
        else:
            source_dict = {
                "protocol": "gluster",
                "name": "%s/%s" % (vol_name, disk_img)
            }
            host_dict = {"name": host_ip, "port": "24007"}
            if transport:
                host_dict.update({"transport": transport})
            disk_xml.source = disk_xml.new_disk_source(**{
                "attrs": source_dict,
                "hosts": [host_dict]
            })
        # Add the new disk xml.
        vmxml.add_device(disk_xml)
        vmxml.sync()

    def test_pmsuspend(vm_name):
        """
        Test pmsuspend command.
        """
        if vm.is_dead():
            vm.start()
            vm.wait_for_login()
        # Create swap partition if nessesary.
        if not vm.has_swap():
            vm.create_swap_partition()
        ret = virsh.dompmsuspend(vm_name, "disk", **virsh_dargs)
        libvirt.check_exit_status(ret)
        # wait for vm to shutdown

        if not utils_misc.wait_for(lambda: vm.state() == "shut off", 30):
            raise error.TestFail("vm is still alive after S4 operation")

        # Wait for vm and qemu-ga service to start
        vm.start()
        # Prepare guest agent and start guest
        try:
            vm.prepare_guest_agent()
        except (remote.LoginError, virt_vm.VMError), e:
            raise error.TestFail("failed to prepare agent")

        #TODO This step may hang for rhel6 guest
        ret = virsh.dompmsuspend(vm_name, "mem", **virsh_dargs)
        libvirt.check_exit_status(ret)
        ret = virsh.dompmwakeup(vm_name, **virsh_dargs)
        libvirt.check_exit_status(ret)
        if not vm.is_alive():
            raise error.TestFail("vm is not alive after dompmwakeup")
Exemplo n.º 15
0
def run(test, params, env):
    """
    Test command: virsh dompmsuspend <domain> <target>
    The command suspends a running domain using guest OS's power management.
    """

    # MAIN TEST CODE ###
    # Process cartesian parameters
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)

    vm_state = params.get("vm_state", "running")
    suspend_target = params.get("pm_suspend_target", "mem")
    pm_enabled = params.get("pm_enabled", "not_set")
    pm_enabled_disk = params.get("pm_enabled_disk", "no")
    pm_enabled_mem = params.get("pm_enabled_mem", "no")
    test_managedsave = "yes" == params.get("test_managedsave", "no")
    test_save_restore = "yes" == params.get("test_save_restore", "no")
    test_suspend_resume = "yes" == params.get("test_suspend_resume", "no")
    pmsuspend_error = 'yes' == params.get("pmsuspend_error", 'no')
    pmsuspend_error_msg = params.get("pmsuspend_error_msg")
    agent_error_test = 'yes' == params.get("agent_error_test", 'no')
    arch = platform.processor()
    duration_value = int(params.get("duration", "0"))

    # Libvirt acl test related params
    uri = params.get("virsh_uri")
    unprivileged_user = params.get('unprivileged_user')
    if unprivileged_user:
        if unprivileged_user.count('EXAMPLE'):
            unprivileged_user = '******'

    if not libvirt_version.version_compare(1, 1, 1):
        if params.get('setup_libvirt_polkit') == 'yes':
            test.cancel("API acl test not supported in current"
                        " libvirt version.")

    # A backup of original vm
    vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    vmxml_backup = vmxml.copy()

    # Expected possible fail patterns.
    # Error output should match one of these patterns.
    # An empty list mean test should succeed.
    fail_pat = []
    virsh_dargs = {'debug': True, 'ignore_status': True}
    if params.get('setup_libvirt_polkit') == 'yes':
        virsh_dargs_copy = virsh_dargs.copy()
        virsh_dargs_copy['uri'] = uri
        virsh_dargs_copy['unprivileged_user'] = unprivileged_user
        if pmsuspend_error:
            fail_pat.append('access denied')

    # Setup possible failure patterns excluding ppc
    if "ppc64" not in arch:
        if pm_enabled == 'not_set':
            fail_pat.append('not supported')
        if pm_enabled == 'no':
            fail_pat.append('disabled')

    if vm_state == 'paused':
        # For older version
        fail_pat.append('not responding')
        # For newer version
        fail_pat.append('not running')
    elif vm_state == 'shutoff':
        fail_pat.append('not running')
    if agent_error_test:
        fail_pat.append('not running')
        fail_pat.append('agent not available')
    if pmsuspend_error_msg:
        fail_pat.append(pmsuspend_error_msg)

    # RHEL6 or older releases
    unsupported_guest_err = 'suspend mode is not supported by the guest'

    try:
        if vm.is_alive():
            vm.destroy()

        # Set pm tag in domain's XML if needed.
        if "ppc64" not in arch:
            if pm_enabled == 'not_set':
                try:
                    if vmxml.pm:
                        del vmxml.pm
                except xcepts.LibvirtXMLNotFoundError:
                    pass
            else:
                pm_xml = vm_xml.VMPMXML()
                pm_xml.mem_enabled = pm_enabled_mem
                pm_xml.disk_enabled = pm_enabled_disk
                vmxml.pm = pm_xml
            vmxml.sync()

        try:
            vm.prepare_guest_agent()
        except virt_vm.VMStartError as info:
            if "not supported" in str(info).lower():
                test.cancel(info)
            else:
                test.error(info)
        # Selinux should be enforcing
        vm.setenforce(1)

        # Create swap partition/file if nessesary.
        need_mkswap = False
        if suspend_target in ['disk', 'hybrid']:
            need_mkswap = not vm.has_swap()
        if need_mkswap:
            logging.debug("Creating swap partition.")
            vm.create_swap_partition()

        try:
            libvirtd = utils_libvirtd.Libvirtd()
            savefile = os.path.join(data_dir.get_tmp_dir(), "%s.save" % vm_name)
            session = vm.wait_for_login()
            # Touch a file on guest to test managed save command.
            if test_managedsave:
                session.cmd_status("touch pmtest")
            session.close()

            # Set vm state
            if vm_state == "paused":
                vm.pause()
            elif vm_state == "shutoff":
                vm.destroy()

            # Run test case
            result = virsh.dompmsuspend(vm_name, suspend_target, duration=duration_value, debug=True,
                                        uri=uri,
                                        unprivileged_user=unprivileged_user)
            if result.exit_status == 0:
                if fail_pat:
                    test.fail("Expected failed with %s, but run succeed:\n%s" %
                              (fail_pat, result.stdout))
            else:
                if unsupported_guest_err in result.stderr:
                    test.cancel("Unsupported suspend mode:\n%s" % result.stderr)
                if not fail_pat:
                    test.fail("Expected success, but run failed:\n%s" % result.stderr)
                #if not any_pattern_match(fail_pat, result.stderr):
                if not any(p in result.stderr for p in fail_pat):
                    test.fail("Expected failed with one of %s, but "
                              "failed with:\n%s" % (fail_pat, result.stderr))

            # If pmsuspend_error is True, just skip below checking, and return directly.
            if pmsuspend_error:
                return
            # check whether the state changes to pmsuspended
            if not utils_misc.wait_for(lambda: vm.state() == 'pmsuspended', 30):
                test.fail("VM failed to change its state, expected state: "
                          "pmsuspended, but actual state: %s" % vm.state())

            if agent_error_test:
                ret = virsh.dompmsuspend(vm_name, "mem", **virsh_dargs)
                libvirt.check_result(ret, fail_pat)
                ret = virsh.dompmsuspend(vm_name, "disk", **virsh_dargs)
                libvirt.check_result(ret, fail_pat)
                ret = virsh.domtime(vm_name, **virsh_dargs)
                libvirt.check_result(ret, fail_pat)
            if test_managedsave:
                ret = virsh.managedsave(vm_name, **virsh_dargs)
                libvirt.check_exit_status(ret)
                # Dompmwakeup should return false here
                ret = virsh.dompmwakeup(vm_name, **virsh_dargs)
                libvirt.check_exit_status(ret, True)
                ret = virsh.start(vm_name)
                libvirt.check_exit_status(ret)
                if not vm.is_paused():
                    test.fail("Vm status is not paused before pm wakeup")
                if params.get('setup_libvirt_polkit') == 'yes':
                    ret = virsh.dompmwakeup(vm_name, **virsh_dargs_copy)
                else:
                    ret = virsh.dompmwakeup(vm_name, **virsh_dargs)
                libvirt.check_exit_status(ret)
                if not vm.is_paused():
                    test.fail("Vm status is not paused after pm wakeup")
                ret = virsh.resume(vm_name, **virsh_dargs)
                libvirt.check_exit_status(ret)
                sess = vm.wait_for_login()
                if sess.cmd_status("ls pmtest && rm -f pmtest"):
                    test.fail("Check managed save failed on guest")
                sess.close()
            if test_save_restore:
                # Run a series of operations to check libvirtd status.
                ret = virsh.dompmwakeup(vm_name, **virsh_dargs)
                libvirt.check_exit_status(ret)
                # Wait for vm is started
                vm.wait_for_login()
                ret = virsh.save(vm_name, savefile, **virsh_dargs)
                libvirt.check_exit_status(ret)
                ret = virsh.restore(savefile, **virsh_dargs)
                libvirt.check_exit_status(ret)
                # Wait for vm is started
                vm.wait_for_login()
                # run pmsuspend again
                ret = virsh.dompmsuspend(vm_name, suspend_target, **virsh_dargs)
                libvirt.check_exit_status(ret)
                # save and restore the guest again.
                ret = virsh.save(vm_name, savefile, **virsh_dargs)
                libvirt.check_exit_status(ret)
                ret = virsh.restore(savefile, **virsh_dargs)
                libvirt.check_exit_status(ret)
                ret = virsh.destroy(vm_name, **virsh_dargs)
                libvirt.check_exit_status(ret)
                if not libvirtd.is_running():
                    test.fail("libvirtd crashed")
            if test_suspend_resume:
                ret = virsh.suspend(vm_name)
                libvirt.check_exit_status(ret, expect_error=True)
                if vm.state() != 'pmsuspended':
                    test.fail("VM state should be pmsuspended")
                ret = virsh.resume(vm_name)
                libvirt.check_exit_status(ret, expect_error=True)
                if vm.state() != 'pmsuspended':
                    test.fail("VM state should be pmsuspended")
        finally:
            libvirtd.restart()
            # Remove the tmp file
            if os.path.exists(savefile):
                os.remove(savefile)
            # Restore VM state
            if vm_state == "paused":
                vm.resume()

            if suspend_target in ['mem', 'hybrid']:
                if vm.state() == "pmsuspended":
                    virsh.dompmwakeup(vm_name)
            else:
                if vm.state() == "in shutdown":
                    vm.wait_for_shutdown()
                if vm.is_dead():
                    vm.start()

            if need_mkswap:
                vm.cleanup_swap()

    finally:
        # Destroy the vm.
        if vm.is_alive():
            vm.destroy()
        # Recover xml of vm.
        vmxml_backup.sync()
Exemplo n.º 16
0
def run(test, params, env):
    """
    Test multiple disks attachment.

    1.Prepare test environment,destroy or suspend a VM.
    2.Prepare disk image.
    3.Edit disks xml and start the domain.
    4.Perform test operation.
    5.Recover test environment.
    6.Confirm the test result.
    """
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    virsh_dargs = {'debug': True, 'ignore_status': True}

    def prepare_gluster_disk(disk_img, disk_format):
        """
        Setup glusterfs and prepare disk image.
        """
        # Get the image path
        image_source = vm.get_first_disk_devices()['source']

        # Setup gluster
        host_ip = libvirt.setup_or_cleanup_gluster(True, vol_name, brick_path,
                                                   pool_name)
        logging.debug("host ip: %s ", host_ip)
        image_info = utils_misc.get_image_info(image_source)
        image_dest = "/mnt/%s" % disk_img

        if image_info["format"] == disk_format:
            disk_cmd = ("cp -f %s %s" % (image_source, image_dest))
        else:
            # Convert the disk format
            disk_cmd = (
                "qemu-img convert -f %s -O %s %s %s" %
                (image_info["format"], disk_format, image_source, image_dest))

        # Mount the gluster disk and create the image.
        process.run("mount -t glusterfs %s:%s /mnt && "
                    "%s && chmod a+rw /mnt/%s && umount /mnt" %
                    (host_ip, vol_name, disk_cmd, disk_img),
                    shell=True)

        return host_ip

    def build_disk_xml(disk_img, disk_format, host_ip):
        """
        Try to rebuild disk xml
        """
        # Delete existed disks first.
        vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
        disks_dev = vmxml.get_devices(device_type="disk")
        for disk in disks_dev:
            vmxml.del_device(disk)

        if default_pool:
            disk_xml = Disk(type_name="file")
        else:
            disk_xml = Disk(type_name="network")
        disk_xml.device = "disk"
        driver_dict = {"name": "qemu", "type": disk_format, "cache": "none"}
        if driver_iothread:
            driver_dict.update({"iothread": driver_iothread})
        disk_xml.driver = driver_dict
        disk_xml.target = {"dev": "vda", "bus": "virtio"}
        if default_pool:
            utils_misc.mount("%s:%s" % (host_ip, vol_name), default_pool,
                             "glusterfs")
            process.run("setsebool virt_use_fusefs on", shell=True)
            virsh.pool_refresh("default")
            source_dict = {"file": "%s/%s" % (default_pool, disk_img)}
            disk_xml.source = disk_xml.new_disk_source(
                **{"attrs": source_dict})
        else:
            source_dict = {
                "protocol": "gluster",
                "name": "%s/%s" % (vol_name, disk_img)
            }
            host_dict = [{"name": host_ip, "port": "24007"}]
            # If mutiple_hosts is True, attempt to add multiple hosts.
            if multiple_hosts:
                host_dict.append({
                    "name": params.get("dummy_host1"),
                    "port": "24007"
                })
                host_dict.append({
                    "name": params.get("dummy_host2"),
                    "port": "24007"
                })
            if transport:
                host_dict[0]['transport'] = transport
            disk_xml.source = disk_xml.new_disk_source(**{
                "attrs": source_dict,
                "hosts": host_dict
            })
        # set domain options
        if dom_iothreads:
            try:
                vmxml.iothreads = int(dom_iothreads)
            except ValueError:
                # 'iothreads' may not invalid number in negative tests
                logging.debug("Can't convert '%s' to integer type" %
                              dom_iothreads)

        # Add the new disk xml.
        vmxml.add_device(disk_xml)
        vmxml.sync()

    def test_pmsuspend(vm_name):
        """
        Test pmsuspend command.
        """
        if vm.is_dead():
            vm.start()
            vm.wait_for_login()
        # Create swap partition if nessesary.
        if not vm.has_swap():
            swap_path = os.path.join(test.tmpdir, 'swap.img')
            vm.create_swap_partition(swap_path)
        ret = virsh.dompmsuspend(vm_name, "disk", **virsh_dargs)
        libvirt.check_exit_status(ret)
        # wait for vm to shutdown

        if not utils_misc.wait_for(lambda: vm.state() == "shut off", 60):
            test.fail("vm is still alive after S4 operation")

        # Wait for vm and qemu-ga service to start
        vm.start()
        # Prepare guest agent and start guest
        try:
            vm.prepare_guest_agent()
        except (remote.LoginError, virt_vm.VMError), detail:
            test.fail("failed to prepare agent:\n%s" % detail)

        #TODO This step may hang for rhel6 guest
        ret = virsh.dompmsuspend(vm_name, "mem", **virsh_dargs)
        libvirt.check_exit_status(ret)

        # Check vm state
        if not utils_misc.wait_for(lambda: vm.state() == "pmsuspended", 60):
            test.fail("vm isn't suspended after S3 operation")

        ret = virsh.dompmwakeup(vm_name, **virsh_dargs)
        libvirt.check_exit_status(ret)
        if not vm.is_alive():
            test.fail("vm is not alive after dompmwakeup")
Exemplo n.º 17
0
def run(test, params, env):
    """
    Test multiple disks attachment.

    1.Prepare test environment,destroy or suspend a VM.
    2.Prepare disk image.
    3.Edit disks xml and start the domain.
    4.Perform test operation.
    5.Recover test environment.
    6.Confirm the test result.
    """
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    virsh_dargs = {'debug': True, 'ignore_status': True}

    def prepare_gluster_disk(disk_img, disk_format):
        """
        Setup glusterfs and prepare disk image.
        """
        # Get the image path
        image_source = vm.get_first_disk_devices()['source']

        # Setup gluster
        host_ip = libvirt.setup_or_cleanup_gluster(True, vol_name,
                                                   brick_path, pool_name)
        logging.debug("host ip: %s ", host_ip)
        image_info = utils_misc.get_image_info(image_source)
        image_dest = "/mnt/%s" % disk_img

        if image_info["format"] == disk_format:
            disk_cmd = ("cp -f %s %s" % (image_source, image_dest))
        else:
            # Convert the disk format
            disk_cmd = ("qemu-img convert -f %s -O %s %s %s" %
                        (image_info["format"], disk_format,
                         image_source, image_dest))

        # Mount the gluster disk and create the image.
        process.run("mount -t glusterfs %s:%s /mnt && "
                    "%s && chmod a+rw /mnt/%s && umount /mnt"
                    % (host_ip, vol_name, disk_cmd, disk_img),
                    shell=True)

        return host_ip

    def build_disk_xml(disk_img, disk_format, host_ip):
        """
        Try to rebuild disk xml
        """
        if default_pool:
            disk_xml = Disk(type_name="file")
        else:
            disk_xml = Disk(type_name="network")
        disk_xml.device = "disk"
        driver_dict = {"name": "qemu",
                       "type": disk_format,
                       "cache": "none"}
        if driver_iothread:
            driver_dict.update({"iothread": driver_iothread})
        disk_xml.driver = driver_dict
        disk_xml.target = {"dev": "vdb", "bus": "virtio"}
        if default_pool:
            utils_misc.mount("%s:%s" % (host_ip, vol_name),
                             default_pool, "glusterfs")
            process.run("setsebool virt_use_fusefs on", shell=True)
            source_dict = {"file": "%s/%s" % (default_pool, disk_img)}
            disk_xml.source = disk_xml.new_disk_source(
                **{"attrs": source_dict})
        else:
            source_dict = {"protocol": "gluster",
                           "name": "%s/%s" % (vol_name, disk_img)}
            host_dict = [{"name": host_ip, "port": "24007"}]
            # If mutiple_hosts is True, attempt to add multiple hosts.
            if multiple_hosts:
                host_dict.append({"name": params.get("dummy_host1"), "port": "24007"})
                host_dict.append({"name": params.get("dummy_host2"), "port": "24007"})
            if transport:
                host_dict[0]['transport'] = transport
            disk_xml.source = disk_xml.new_disk_source(
                **{"attrs": source_dict, "hosts": host_dict})
        return disk_xml

    def test_pmsuspend(vm_name):
        """
        Test pmsuspend command.
        """
        if vm.is_dead():
            vm.start()
            vm.wait_for_login()
        # Create swap partition if nessesary.
        if not vm.has_swap():
            swap_path = os.path.join(test.tmpdir, 'swap.img')
            vm.create_swap_partition(swap_path)
        ret = virsh.dompmsuspend(vm_name, "disk", **virsh_dargs)
        libvirt.check_exit_status(ret)
        # wait for vm to shutdown

        if not utils_misc.wait_for(lambda: vm.state() == "shut off", 60):
            test.fail("vm is still alive after S4 operation")

        # Wait for vm and qemu-ga service to start
        vm.start()
        # Prepare guest agent and start guest
        try:
            vm.prepare_guest_agent()
        except (remote.LoginError, virt_vm.VMError), detail:
            test.fail("failed to prepare agent:\n%s" % detail)

        #TODO This step may hang for rhel6 guest
        ret = virsh.dompmsuspend(vm_name, "mem", **virsh_dargs)
        libvirt.check_exit_status(ret)

        # Check vm state
        if not utils_misc.wait_for(lambda: vm.state() == "pmsuspended", 60):
            test.fail("vm isn't suspended after S3 operation")

        ret = virsh.dompmwakeup(vm_name, **virsh_dargs)
        libvirt.check_exit_status(ret)
        if not vm.is_alive():
            test.fail("vm is not alive after dompmwakeup")
Exemplo n.º 18
0
def run(test, params, env):
    """
    Test command: virsh dompmsuspend <domain> <target>
    The command suspends a running domain using guest OS's power management.
    """

    def check_vm_guestagent(session):
        # Check if qemu-ga already started automatically
        cmd = "rpm -q qemu-guest-agent || yum install -y qemu-guest-agent"
        stat_install, output = session.cmd_status_output(cmd, 300)
        logging.debug(output)
        if stat_install != 0:
            raise error.TestError("Fail to install qemu-guest-agent, make"
                                  "sure that you have usable repo in guest")

        # Check if qemu-ga already started
        stat_ps = session.cmd_status("ps aux |grep [q]emu-ga")
        if stat_ps != 0:
            session.cmd("qemu-ga -d")
            # Check if the qemu-ga really started
            stat_ps = session.cmd_status("ps aux |grep [q]emu-ga")
            if stat_ps != 0:
                raise error.TestError("Fail to run qemu-ga in guest")

    # MAIN TEST CODE ###
    # Process cartesian parameters
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)

    vm_state = params.get("vm_state", "running")
    suspend_target = params.get("pm_suspend_target", "mem")
    pm_enabled = params.get("pm_enabled", "not_set")

    # A backup of original vm
    vm_xml = VMXML.new_from_inactive_dumpxml(vm_name)
    vm_xml_backup = vm_xml.copy()

    # Expected possible fail patterns.
    # Error output should match one of these patterns.
    # An empty list mean test should succeed.
    fail_pat = []

    # Setup possible failure patterns
    if pm_enabled == 'not_set':
        fail_pat.append('not supported')
    if pm_enabled == 'no':
        fail_pat.append('disabled')

    if vm_state == 'paused':
        fail_pat.append('not responding')
    elif vm_state == 'shutoff':
        fail_pat.append('not running')

    try:
        if vm.is_alive():
            vm.destroy()

        # Set pm tag in domain's XML if needed.
        if pm_enabled == 'not_set':
            if 'pm' in vm_xml:
                del vm_xml.pm
        else:
            pm_xml = VMPMXML()
            if suspend_target == 'mem':
                pm_xml.mem_enabled = pm_enabled
            elif suspend_target == 'disk':
                pm_xml.disk_enabled = pm_enabled
            elif suspend_target == 'hybrid':
                if 'hybrid_enabled' in dir(pm_xml):
                    pm_xml.hybrid_enabled = pm_enabled
                else:
                    raise error.TestNAError("PM suspend type 'hybrid' is not "
                                            "supported yet.")
            vm_xml.pm = pm_xml
            vm_xml.sync()

        VMXML.set_agent_channel(vm_name)
        vm.start()

        # Create swap partition/file if nessesary.
        need_mkswap = False
        if suspend_target in ['disk', 'hybrid']:
            need_mkswap = not vm.has_swap()
        if need_mkswap:
            logging.debug("Creating swap partition.")
            vm.create_swap_partition()

        session = vm.wait_for_login()
        try:
            check_vm_guestagent(session)

            # Set vm state
            if vm_state == "paused":
                vm.pause()
            elif vm_state == "shutoff":
                vm.destroy()

            # Run test case
            result = virsh.dompmsuspend(vm_name, suspend_target, debug=True)
        finally:
            # Restore VM state
            if vm_state == "paused":
                vm.resume()

            if suspend_target in ['mem', 'hybrid']:
                if vm.state() == "pmsuspended":
                    virsh.dompmwakeup(vm_name)
            else:
                if vm.state() == "in shutdown":
                    vm.wait_for_shutdown()
                if vm.is_dead():
                    vm.start()

            # Cleanup
            session.close()

            if need_mkswap:
                vm.cleanup_swap()

        if result.exit_status == 0:
            if fail_pat:
                raise error.TestFail("Expected failed with %s, but run succeed"
                                     ":\n%s" % (fail_pat, result))
        else:
            if not fail_pat:
                raise error.TestFail("Expected success, but run failed:\n%s"
                                     % result)
            #if not any_pattern_match(fail_pat, result.stderr):
            if not any(p in result.stderr for p in fail_pat):
                raise error.TestFail("Expected failed with one of %s, but "
                                     "failed with:\n%s" % (fail_pat, result))
    finally:
        # Recover xml of vm.
        vm_xml_backup.sync()
Exemplo n.º 19
0
def run(test, params, env):
    """
    Test command: virsh dompmsuspend <domain> <target>
    The command suspends a running domain using guest OS's power management.
    """

    # MAIN TEST CODE ###
    # Process cartesian parameters
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)

    vm_state = params.get("vm_state", "running")
    suspend_target = params.get("pm_suspend_target", "mem")
    pm_enabled = params.get("pm_enabled", "not_set")
    pm_enabled_disk = params.get("pm_enabled_disk", "no")
    pm_enabled_mem = params.get("pm_enabled_mem", "no")
    test_managedsave = "yes" == params.get("test_managedsave", "no")
    test_save_restore = "yes" == params.get("test_save_restore", "no")
    test_suspend_resume = "yes" == params.get("test_suspend_resume", "no")
    pmsuspend_error = 'yes' == params.get("pmsuspend_error", 'no')
    pmsuspend_error_msg = params.get("pmsuspend_error_msg")
    agent_error_test = 'yes' == params.get("agent_error_test", 'no')
    arch = platform.processor()

    # Libvirt acl test related params
    uri = params.get("virsh_uri")
    unprivileged_user = params.get('unprivileged_user')
    if unprivileged_user:
        if unprivileged_user.count('EXAMPLE'):
            unprivileged_user = '******'

    if not libvirt_version.version_compare(1, 1, 1):
        if params.get('setup_libvirt_polkit') == 'yes':
            test.cancel("API acl test not supported in current"
                        " libvirt version.")

    # A backup of original vm
    vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    vmxml_backup = vmxml.copy()

    # Expected possible fail patterns.
    # Error output should match one of these patterns.
    # An empty list mean test should succeed.
    fail_pat = []
    virsh_dargs = {'debug': True, 'ignore_status': True}
    if params.get('setup_libvirt_polkit') == 'yes':
        virsh_dargs_copy = virsh_dargs.copy()
        virsh_dargs_copy['uri'] = uri
        virsh_dargs_copy['unprivileged_user'] = unprivileged_user
        if pmsuspend_error:
            fail_pat.append('access denied')

    # Setup possible failure patterns excluding ppc
    if "ppc64" not in arch:
        if pm_enabled == 'not_set':
            fail_pat.append('not supported')
        if pm_enabled == 'no':
            fail_pat.append('disabled')

    if vm_state == 'paused':
        # For older version
        fail_pat.append('not responding')
        # For newer version
        fail_pat.append('not running')
    elif vm_state == 'shutoff':
        fail_pat.append('not running')
    if agent_error_test:
        fail_pat.append('not running')
        fail_pat.append('agent not available')
    if pmsuspend_error_msg:
        fail_pat.append(pmsuspend_error_msg)

    # RHEL6 or older releases
    unsupported_guest_err = 'suspend mode is not supported by the guest'

    try:
        if vm.is_alive():
            vm.destroy()

        # Set pm tag in domain's XML if needed.
        if "ppc64" not in arch:
            if pm_enabled == 'not_set':
                try:
                    if vmxml.pm:
                        del vmxml.pm
                except xcepts.LibvirtXMLNotFoundError:
                    pass
            else:
                pm_xml = vm_xml.VMPMXML()
                pm_xml.mem_enabled = pm_enabled_mem
                pm_xml.disk_enabled = pm_enabled_disk
                vmxml.pm = pm_xml
            vmxml.sync()

        try:
            vm.prepare_guest_agent()
        except virt_vm.VMStartError as info:
            if "not supported" in str(info).lower():
                test.cancel(info)
            else:
                test.error(info)
        # Selinux should be enforcing
        vm.setenforce(1)

        # Create swap partition/file if nessesary.
        need_mkswap = False
        if suspend_target in ['disk', 'hybrid']:
            need_mkswap = not vm.has_swap()
        if need_mkswap:
            logging.debug("Creating swap partition.")
            vm.create_swap_partition()

        try:
            libvirtd = utils_libvirtd.Libvirtd()
            savefile = os.path.join(data_dir.get_tmp_dir(),
                                    "%s.save" % vm_name)
            session = vm.wait_for_login()
            # Touch a file on guest to test managed save command.
            if test_managedsave:
                session.cmd_status("touch pmtest")
            session.close()

            # Set vm state
            if vm_state == "paused":
                vm.pause()
            elif vm_state == "shutoff":
                vm.destroy()

            # Run test case
            result = virsh.dompmsuspend(vm_name,
                                        suspend_target,
                                        debug=True,
                                        uri=uri,
                                        unprivileged_user=unprivileged_user)
            if result.exit_status == 0:
                if fail_pat:
                    test.fail("Expected failed with %s, but run succeed:\n%s" %
                              (fail_pat, result.stdout))
            else:
                if unsupported_guest_err in result.stderr:
                    test.cancel("Unsupported suspend mode:\n%s" %
                                result.stderr)
                if not fail_pat:
                    test.fail("Expected success, but run failed:\n%s" %
                              result.stderr)
                #if not any_pattern_match(fail_pat, result.stderr):
                if not any(p in result.stderr for p in fail_pat):
                    test.fail("Expected failed with one of %s, but "
                              "failed with:\n%s" % (fail_pat, result.stderr))

            # check whether the state changes to pmsuspended
            if not utils_misc.wait_for(lambda: vm.state() == 'pmsuspended',
                                       30):
                test.fail("VM failed to change its state, expected state: "
                          "pmsuspended, but actual state: %s" % vm.state())

            if agent_error_test:
                ret = virsh.dompmsuspend(vm_name, "mem", **virsh_dargs)
                libvirt.check_result(ret, fail_pat)
                ret = virsh.dompmsuspend(vm_name, "disk", **virsh_dargs)
                libvirt.check_result(ret, fail_pat)
                ret = virsh.domtime(vm_name, **virsh_dargs)
                libvirt.check_result(ret, fail_pat)
            if test_managedsave:
                ret = virsh.managedsave(vm_name, **virsh_dargs)
                libvirt.check_exit_status(ret)
                # Dompmwakeup should return false here
                ret = virsh.dompmwakeup(vm_name, **virsh_dargs)
                libvirt.check_exit_status(ret, True)
                ret = virsh.start(vm_name)
                libvirt.check_exit_status(ret)
                if not vm.is_paused():
                    test.fail("Vm status is not paused before pm wakeup")
                if params.get('setup_libvirt_polkit') == 'yes':
                    ret = virsh.dompmwakeup(vm_name, **virsh_dargs_copy)
                else:
                    ret = virsh.dompmwakeup(vm_name, **virsh_dargs)
                libvirt.check_exit_status(ret)
                if not vm.is_paused():
                    test.fail("Vm status is not paused after pm wakeup")
                ret = virsh.resume(vm_name, **virsh_dargs)
                libvirt.check_exit_status(ret)
                sess = vm.wait_for_login()
                if sess.cmd_status("ls pmtest && rm -f pmtest"):
                    test.fail("Check managed save failed on guest")
                sess.close()
            if test_save_restore:
                # Run a series of operations to check libvirtd status.
                ret = virsh.dompmwakeup(vm_name, **virsh_dargs)
                libvirt.check_exit_status(ret)
                # Wait for vm is started
                vm.wait_for_login()
                ret = virsh.save(vm_name, savefile, **virsh_dargs)
                libvirt.check_exit_status(ret)
                ret = virsh.restore(savefile, **virsh_dargs)
                libvirt.check_exit_status(ret)
                # Wait for vm is started
                vm.wait_for_login()
                # run pmsuspend again
                ret = virsh.dompmsuspend(vm_name, suspend_target,
                                         **virsh_dargs)
                libvirt.check_exit_status(ret)
                # save and restore the guest again.
                ret = virsh.save(vm_name, savefile, **virsh_dargs)
                libvirt.check_exit_status(ret)
                ret = virsh.restore(savefile, **virsh_dargs)
                libvirt.check_exit_status(ret)
                ret = virsh.destroy(vm_name, **virsh_dargs)
                libvirt.check_exit_status(ret)
                if not libvirtd.is_running():
                    test.fail("libvirtd crashed")
            if test_suspend_resume:
                ret = virsh.suspend(vm_name)
                libvirt.check_exit_status(ret, expect_error=True)
                if vm.state() != 'pmsuspended':
                    test.fail("VM state should be pmsuspended")
                ret = virsh.resume(vm_name)
                libvirt.check_exit_status(ret, expect_error=True)
                if vm.state() != 'pmsuspended':
                    test.fail("VM state should be pmsuspended")
        finally:
            libvirtd.restart()
            # Remove the tmp file
            if os.path.exists(savefile):
                os.remove(savefile)
            # Restore VM state
            if vm_state == "paused":
                vm.resume()

            if suspend_target in ['mem', 'hybrid']:
                if vm.state() == "pmsuspended":
                    virsh.dompmwakeup(vm_name)
            else:
                if vm.state() == "in shutdown":
                    vm.wait_for_shutdown()
                if vm.is_dead():
                    vm.start()

            if need_mkswap:
                vm.cleanup_swap()

    finally:
        # Destroy the vm.
        if vm.is_alive():
            vm.destroy()
        # Recover xml of vm.
        vmxml_backup.sync()
    def manipulate_domain(vm_name, vm_operation, recover=False):
        """
        Operate domain to given state or recover it.

        :params vm_name: Name of the VM domain
        :params vm_operation: Operation to be performed on VM domain
                              like save, managedsave, suspend
        :params recover: flag to inform whether to set or reset
                         vm_operation
        """
        save_file = os.path.join(data_dir.get_tmp_dir(), vm_name + ".save")
        if not recover:
            if vm_operation == "save":
                save_option = ""
                result = virsh.save(vm_name, save_file, save_option,
                                    ignore_status=True, debug=True)
                libvirt.check_exit_status(result)
            elif vm_operation == "managedsave":
                managedsave_option = ""
                result = virsh.managedsave(vm_name, managedsave_option,
                                           ignore_status=True, debug=True)
                libvirt.check_exit_status(result)
            elif vm_operation == "s3":
                suspend_target = "mem"
                result = virsh.dompmsuspend(vm_name, suspend_target,
                                            ignore_status=True, debug=True)
                libvirt.check_exit_status(result)
            elif vm_operation == "s4":
                suspend_target = "disk"
                result = virsh.dompmsuspend(vm_name, suspend_target,
                                            ignore_status=True, debug=True)
                libvirt.check_exit_status(result)
                # Wait domain state change: 'in shutdown' -> 'shut off'
                utils_misc.wait_for(lambda: virsh.is_dead(vm_name), 5)
            elif vm_operation == "suspend":
                result = virsh.suspend(vm_name, ignore_status=True, debug=True)
                libvirt.check_exit_status(result)
            elif vm_operation == "reboot":
                vm.reboot()
            else:
                logging.debug("No operation for the domain")

        else:
            if vm_operation == "save":
                if os.path.exists(save_file):
                    result = virsh.restore(save_file, ignore_status=True,
                                           debug=True)
                    libvirt.check_exit_status(result)
                    os.remove(save_file)
                else:
                    test.error("No save file for domain restore")
            elif vm_operation in ["managedsave", "s4"]:
                result = virsh.start(vm_name, ignore_status=True, debug=True)
                libvirt.check_exit_status(result)
            elif vm_operation == "s3":
                suspend_target = "mem"
                result = virsh.dompmwakeup(vm_name, ignore_status=True,
                                           debug=True)
                libvirt.check_exit_status(result)
            elif vm_operation == "suspend":
                result = virsh.resume(vm_name, ignore_status=True, debug=True)
                libvirt.check_exit_status(result)
            elif vm_operation == "reboot":
                pass
            else:
                logging.debug("No need recover the domain")
Exemplo n.º 21
0
def run(test, params, env):
    """
    Test command: virsh dompmsuspend <domain> <target>
    The command suspends a running domain using guest OS's power management.
    """

    def check_vm_guestagent(session):
        # Check if qemu-ga already started automatically
        cmd = "rpm -q qemu-guest-agent || yum install -y qemu-guest-agent"
        stat_install = session.cmd_status(cmd, 300)
        if stat_install != 0:
            raise error.TestFail("Fail to install qemu-guest-agent, make"
                                 "sure that you have usable repo in guest")

        # Check if qemu-ga already started
        stat_ps = session.cmd_status("ps aux |grep [q]emu-ga")
        if stat_ps != 0:
            session.cmd("qemu-ga -d")
            # Check if the qemu-ga really started
            stat_ps = session.cmd_status("ps aux |grep [q]emu-ga")
            if stat_ps != 0:
                raise error.TestFail("Fail to run qemu-ga in guest")

    def check_pm_utils(session):
        # Check if pm-utils is present in vm
        cmd = "rpm -q pm-utils || yum install -y pm-utils"
        stat_install = session.cmd_status(cmd, 300)
        if stat_install != 0:
            raise error.TestFail("Fail to install pm-utils, make"
                                 "sure that you have usable repo in guest")

    # MAIN TEST CODE ###
    # Process cartesian parameters
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)

    vm_state = params.get("vm_state", "running")
    suspend_target = params.get("pm_suspend_target", "mem")
    status_error = "yes" == params.get("status_error", "yes")
    virsh_dargs = {'debug': True}

    # A backup of original vm
    vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    logging.debug("original xml is %s", vmxml_backup)

    try:
        if vm.is_alive():
            vm.destroy()
        libvirt_xml.VMXML.set_agent_channel(vm_name)
        vm.start()
        session = vm.wait_for_login()

        check_vm_guestagent(session)
        check_pm_utils(session)

        session.close()

        # Set vm state
        if vm_state == "paused":
            vm.pause()
        elif vm_state == "shutoff":
            vm.destroy()

        # Run test case
        result = virsh.dompmsuspend(vm_name, suspend_target, **virsh_dargs)
        status = result.exit_status
        err = result.stderr.strip()

        # Check status_error
        if status_error:
            if status == 0 or err == "":
                raise error.TestFail("Expect fail, but run successfully!")
        else:
            if status != 0 or err != "":
                raise error.TestFail("Run failed with right command")
    finally:
        # cleanup
        if vm_state == "paused":
            vm.resume()

        if suspend_target == "mem" or suspend_target == "hybrid":
            if vm.state() == "pmsuspended":
                virsh.dompmwakeup(vm_name)

        # Recover xml of vm.
        vmxml_backup.sync()