Esempio n. 1
0
def check_domjobinfo(vm, params, option="", remote_virsh_dargs=None):
    """
    Check given item in domjobinfo of the guest is as expected

    :param vm: the vm object
    :param params: the parameters used
    :param option: options for domjobinfo
    :param remote_virsh_dargs: the parameters for remote host
    :raise: exceptions.TestFail if the value of given item is unexpected
    """
    def _search_jobinfo(jobinfo, ignore_status=False):
        """
        Find value of given item in domjobinfo

        :param jobinfo: cmdResult object
        :param ignore_status: False to raise Error, True to ignore
        :raise: exceptions.TestFail if not found
        """
        for item in jobinfo.stdout.splitlines():
            if item.count(jobinfo_item):
                groups = re.findall(r'[0-9.]+', item.strip())
                logging.debug("In '%s' search '%s'\n", item, groups[0])
                if (math.fabs(float(groups[0]) - float(compare_to_value)) //
                        float(compare_to_value) > diff_rate):
                    err_msg = ("{} {} has too much difference from "
                               "{}".format(jobinfo_item,
                                           groups[0],
                                           compare_to_value))
                    if ignore_status:
                        logging.error(err_msg)
                    else:
                        raise exceptions.TestFail(err_msg)
                break

    jobinfo_item = params.get("jobinfo_item")
    compare_to_value = params.get("compare_to_value")
    ignore_status = params.get("domjob_ignore_status", False)
    logging.debug("compare_to_value:%s", compare_to_value)
    diff_rate = float(params.get("diff_rate", "0"))
    if not jobinfo_item or not compare_to_value:
        return
    jobinfo = virsh.domjobinfo(vm.name, option, debug=True)
    _search_jobinfo(jobinfo, ignore_status)

    check_domjobinfo_remote = params.get("check_domjobinfo_remote")
    if check_domjobinfo_remote and remote_virsh_dargs:
        remote_virsh_session = virsh.VirshPersistent(**remote_virsh_dargs)
        jobinfo = remote_virsh_session.domjobinfo(vm.name, option, debug=True)
        _search_jobinfo(jobinfo, ignore_status)
        remote_virsh_session.close_session()
Esempio n. 2
0
def is_backup_canceled(vm_name):
    """
    Check if a backup job canceled.

    :param vm_name: vm's name
    :return: True means a backup job is ccanceled, False means not.
    """
    virsh_output = virsh.domjobinfo(vm_name, extra="--completed",
                                    debug=True).stdout_text
    if virsh_output:
        virsh_output = virsh_output.lower()
        if "backup" in virsh_output and "cancel" in virsh_output:
            return True
    return False
Esempio n. 3
0
    def check_domjobinfo(params, option=""):
        """
        Check given item in domjobinfo of the guest is as expected

        :param params: the parameters used
        :param option: options for domjobinfo
        :raise: test.fail if the value of given item is unexpected
        """
        def search_jobinfo(jobinfo):
            """
            Find value of given item in domjobinfo

            :param jobinfo: cmdResult object
            :raise: test.fail if not found
            """
            for item in jobinfo.stdout.splitlines():
                if item.count(jobinfo_item):
                    groups = re.findall(r'[0-9.]+', item.strip())
                    logging.debug("In '%s' search '%s'\n", item, groups[0])
                    if (math.fabs(float(groups[0]) - float(compare_to_value)) //
                       float(compare_to_value) > diff_rate):
                        test.fail("{} {} has too much difference from "
                                  "{}".format(jobinfo_item,
                                              groups[0],
                                              compare_to_value))
                break

        jobinfo_item = params.get("jobinfo_item")
        compare_to_value = params.get("compare_to_value")
        logging.debug("compare_to_value:%s", compare_to_value)
        diff_rate = float(params.get("diff_rate", "0"))
        if not jobinfo_item or not compare_to_value:
            return
        vm_ref = '{}{}'.format(vm_name, option)
        jobinfo = virsh.domjobinfo(vm_ref, **virsh_args)
        search_jobinfo(jobinfo)

        check_domjobinfo_remote = params.get("check_domjobinfo_remote")
        if check_domjobinfo_remote:
            remote_virsh_session = virsh.VirshPersistent(**remote_virsh_dargs)
            jobinfo = remote_virsh_session.domjobinfo(vm_ref, **virsh_args)
            search_jobinfo(jobinfo)
            remote_virsh_session.close_session()
    def check_domjobinfo(params, option=""):
        """
        Check given item in domjobinfo of the guest is as expected

        :param params: the parameters used
        :param option: options for domjobinfo
        :raise: test.fail if the value of given item is unexpected
        """
        def search_jobinfo(jobinfo):
            """
            Find value of given item in domjobinfo

            :param jobinfo: cmdResult object
            :raise: test.fail if not found
            """
            for item in jobinfo.stdout.splitlines():
                if item.count(jobinfo_item):
                    groups = re.findall(r'[0-9.]+', item.strip())
                    logging.debug("In '%s' search '%s'\n", item, groups[0])
                    if (math.fabs(float(groups[0]) - float(compare_to_value)) //
                       float(compare_to_value) > diff_rate):
                        test.fail("{} {} has too much difference from "
                                  "{}".format(jobinfo_item,
                                              groups[0],
                                              compare_to_value))
                break

        jobinfo_item = params.get("jobinfo_item")
        compare_to_value = params.get("compare_to_value")
        logging.debug("compare_to_value:%s", compare_to_value)
        diff_rate = float(params.get("diff_rate", "0"))
        if not jobinfo_item or not compare_to_value:
            return
        vm_ref = '{}{}'.format(vm_name, option)
        jobinfo = virsh.domjobinfo(vm_ref, **virsh_args)
        search_jobinfo(jobinfo)

        check_domjobinfo_remote = params.get("check_domjobinfo_remote")
        if check_domjobinfo_remote:
            remote_virsh_session = virsh.VirshPersistent(**remote_virsh_dargs)
            jobinfo = remote_virsh_session.domjobinfo(vm_ref, **virsh_args)
            search_jobinfo(jobinfo)
            remote_virsh_session.close_session()
Esempio n. 5
0
def run(test, params, env):
    """
    Test command: migrate-compcache <domain> [--size <number>]

    1) Run migrate-compcache command and check return code.
    """
    vm_ref = params.get("vm_ref", "name")
    vm_name = params.get('main_vm')
    start_vm = 'yes' == params.get('start_vm', 'yes')
    pause_vm = 'yes' == params.get('pause_after_start_vm', 'no')
    expect_succeed = 'yes' == params.get('expect_succeed', 'yes')
    size_option = params.get('size_option', 'valid')
    action = params.get('compcache_action', 'get')
    vm = env.get_vm(vm_name)

    # Check if the virsh command migrate-compcache is available
    if not virsh.has_help_command('migrate-compcache'):
        raise error.TestNAError("This version of libvirt does not support "
                                "virsh command migrate-compcache")

    # Prepare the VM state if it's not correct.
    if start_vm and not vm.is_alive():
        vm.start()
    elif not start_vm and vm.is_alive():
        vm.destroy()
    if pause_vm and not vm.is_paused():
        vm.pause()

    # Setup domain reference
    if vm_ref == 'domname':
        vm_ref = vm_name

    # Setup size according to size_option:
    # minimal: Same as memory page size
    # maximal: Same as guest memory
    # empty: An empty string
    # small: One byte less than page size
    # large: Larger than guest memory
    # huge : Largest int64
    page_size = get_page_size()
    if size_option == 'minimal':
        size = str(page_size)
    elif size_option == 'maximal':
        size = str(vm.get_max_mem() * 1024)
    elif size_option == 'empty':
        size = '""'
    elif size_option == 'small':
        size = str(page_size - 1)
    elif size_option == 'large':
        # Guest memory is larger than the max mem set,
        # add 50MB to ensure size exceeds guest memory.
        size = str(vm.get_max_mem() * 1024 + 50000000)
    elif size_option == 'huge':
        size = str(2**64 - 1)
    else:
        size = size_option

    # If we need to get, just omit the size option
    if action == 'get':
        size = None

    # Run testing command
    result = virsh.migrate_compcache(vm_ref, size=size)
    logging.debug(result)

    remote_uri = params.get("jobabort_remote_uri")
    remote_host = params.get("migrate_dest_host")
    remote_user = params.get("migrate_dest_user", "root")
    remote_pwd = params.get("migrate_dest_pwd")
    check_job_compcache = False
    if not remote_host.count(
            "EXAMPLE") and size is not None and expect_succeed:
        # Config ssh autologin for remote host
        ssh_key.setup_ssh_key(remote_host, remote_user, remote_pwd, port=22)
        if vm.is_dead():
            vm.start()
        if vm.is_paused():
            vm.resume()
        vm.wait_for_login()
        # Do actual migration to verify compression cache of migrate jobs
        command = "virsh migrate %s %s --compressed" % (vm_name, remote_uri)
        p = subprocess.Popen(command,
                             shell=True,
                             stdout=subprocess.PIPE,
                             stderr=subprocess.PIPE)

        # Give enough time for starting job
        t = 0
        while t < 5:
            jobtype = vm.get_job_type()
            if "None" == jobtype:
                t += 1
                time.sleep(1)
                continue
            elif jobtype is False:
                logging.error("Get job type failed.")
                break
            else:
                logging.debug("Job started: %s", jobtype)
                break

        jobinfo = virsh.domjobinfo(vm_ref, debug=True,
                                   ignore_status=True).stdout
        check_job_compcache = True
        if p.poll():
            try:
                p.kill()
            except OSError:
                pass

        # Cleanup in case of successful migration
        utlv.MigrationTest().cleanup_dest_vm(vm, None, remote_uri)

    # Shut down the VM to make sure the compcache setting cleared
    if vm.is_alive():
        vm.destroy()

    # Check test result
    if expect_succeed:
        if result.exit_status != 0:
            raise error.TestFail(
                'Expected succeed, but failed with result:\n%s' % result)
        if check_job_compcache:
            for line in jobinfo.splitlines():
                detail = line.split(":")
                if detail[0].count("Compression cache"):
                    value = detail[-1].split()[0].strip()
                    value = int(float(value))
                    unit = detail[-1].split()[-1].strip()
                    if unit == "KiB":
                        size = int(int(size) / 1024)
                    elif unit == "MiB":
                        size = int(int(size) / 1048576)
                    elif unit == "GiB":
                        size = int(int(size) / 1073741824)
                    if value != size:
                        raise error.TestFail("Compression cache is not match"
                                             " with setted")
                    else:
                        return
            raise error.TestFail("Get compression cahce in job failed.")
    elif not expect_succeed:
        if result.exit_status == 0:
            raise error.TestFail(
                'Expected fail, but succeed with result:\n%s' % result)
Esempio n. 6
0
def run(test, params, env):
    """
    Test command: virsh domjobinfo.

    The command returns information about jobs running on a domain.
    1.Prepare test environment.
    2.When the libvirtd == "off", stop the libvirtd service.
    3.Perform job action on a domain.
    4.Get running and completed job info by virsh domjobinfo.
    5.Recover test environment.
    6.Confirm the test result.
    """
    def get_subprocess(action, vm_name, file, remote_uri=None):
        """
        Execute background virsh command, return subprocess w/o waiting for exit()

        :param action : virsh command and its option.
        :param vm_name : VM's name
        :param file : virsh command's file option, could be vm.dump, vm.save, etc.
        """
        command = "virsh %s %s %s" % (action, vm_name, file)
        logging.debug("Action: %s", command)
        p = subprocess.Popen(command,
                             shell=True,
                             stdout=subprocess.PIPE,
                             stderr=subprocess.PIPE)
        return p

    def cmp_jobinfo(result, info_list, job_type, actions):
        """
        Compare the output jobinfo with expected one

        :param result : the return from domjobinfo cmd
        :param info_list : an expected domjobinfo list
        :param job_type : an expected value for 'Job Type'
        :param actions : the job operation
        """
        logging.debug(result.stdout)
        out_list = result.stdout.strip().splitlines()
        out_dict = dict([x.split(':') for x in out_list])
        ret_cmp = set(out_dict.keys()) == set(info_list)
        if not ret_cmp:
            if set(info_list) - set(out_dict.keys()):
                test.fail("Missing expected items in domjobinfo output: %s" %
                          (set(info_list) - set(out_dict.keys())))
            else:
                new_cmp = set(out_dict.keys()) - set(info_list)
                known_item = {'Memory bandwidth'}
                # For running domjobinfo, 'Memory bandwidth' appears sometimes.
                if new_cmp != known_item or job_type == "Completed":
                    test.fail("New items appear: %s, pls modify script!",
                              new_cmp)
        else:
            if out_dict["Job type"].strip() != job_type:
                test.fail("Expect %s Job type but got %s" %
                          (job_type, out_dict["Job type"].strip()))
            if job_type != "None" and out_dict["Operation"].strip(
            ) != actions.capitalize():
                test.fail(
                    "Expect %s Operation but got %s" %
                    (actions.capitalize(), out_dict["Operation"].strip()))

    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    start_vm = params.get("start_vm")
    pre_vm_state = params.get("pre_vm_state", "start")
    if start_vm == "no" and vm.is_alive():
        vm.destroy()

    # Instead of "paused_after_start_vm", use "pre_vm_state".
    # After start the VM, wait for some time to make sure the job
    # can be created on this domain.
    if start_vm == "yes":
        vm.wait_for_login()
        if params.get("pre_vm_state") == "suspend":
            vm.pause()

    domid = vm.get_id()
    domuuid = vm.get_uuid()
    actions = params.get("domjobinfo_action", "dump")
    act_opt = params.get("dump_opt", "")
    vm_ref = params.get("domjobinfo_vm_ref")
    status_error = params.get("status_error", "no")
    keep_complete = "yes" == params.get("keep_complete", "no")
    libvirtd = params.get("libvirtd", "on")
    # Use tmp_pipe to act as target file for job operation in subprocess,
    # such as vm.dump, vm.save, etc.
    tmp_pipe = os.path.join(data_dir.get_tmp_dir(), "domjobinfo.fifo")

    # Build job action
    action = ' '.join([actions, act_opt])
    if actions == "managedsave":
        tmp_pipe = '/var/lib/libvirt/qemu/save/%s.save' % vm.name

    # Expected domjobinfo list
    info_list = [
        "Job type", "Time elapsed", "Data processed", "Data remaining",
        "Data total", "Memory processed", "Memory remaining", "Memory total",
        "Dirty rate", "Iteration", "Constant pages", "Normal pages",
        "Normal data", "Expected downtime", "Setup time"
    ]
    if libvirt_version.version_compare(3, 2, 0):
        info_list.insert(1, "Operation")
        if libvirt_version.version_compare(3, 9, 0):
            info_list.insert(info_list.index("Dirty rate") + 1, "Page size")
            if libvirt_version.version_compare(5, 0, 0):
                info_list.insert(
                    info_list.index("Iteration") + 1, "Postcopy requests")
    logging.debug("The expected info_list for running job is %s", info_list)

    # run test case
    if vm_ref == "id":
        vm_ref = domid
    elif vm_ref == "hex_id":
        vm_ref = hex(int(domid))
    elif vm_ref == "name":
        vm_ref = "%s %s" % (vm_name, params.get("domjobinfo_extra"))
    elif vm_ref == "uuid":
        vm_ref = domuuid
    elif 'invalid' in vm_ref:
        vm_ref = params.get(vm_ref)

    # Get the subprocess of VM.
    # The command's effect is to get domjobinfo of running domain job.
    # So before do "domjobinfo", we must create a job on the domain.
    process = None
    if start_vm == "yes" and status_error == "no":
        if os.path.exists(tmp_pipe):
            os.unlink(tmp_pipe)
        os.mkfifo(tmp_pipe)

        # Target file param is not needed for managedsave operation
        if action == "managedsave ":
            process = get_subprocess(action, vm_name, "", None)
        else:
            process = get_subprocess(action, vm_name, tmp_pipe, None)

        f = open(tmp_pipe, 'rb')
        dummy = f.read(1024 * 1024).decode(locale.getpreferredencoding(),
                                           'ignore')

    if libvirtd == "off":
        utils_libvirtd.libvirtd_stop()

    # Give enough time for starting job
    t = 0
    while t < 5:
        jobtype = vm.get_job_type()
        if "None" == jobtype:
            t += 1
            time.sleep(1)
            continue
        elif jobtype is False:
            logging.error("Get job type failed.")
            break
        else:
            logging.debug("Job started: %s", jobtype)
            break

    # Get domjobinfo while job is running
    ret = virsh.domjobinfo(vm_ref, ignore_status=True, debug=True)
    status = ret.exit_status

    # Clear process env
    if process and f:
        dummy = f.read()
        f.close()

        try:
            os.unlink(tmp_pipe)
        except OSError as detail:
            logging.info("Can't remove %s: %s", tmp_pipe, detail)

    if process:
        if process.poll():
            try:
                process.kill()
            except OSError:
                pass

    # Get completed domjobinfo with --keep-completed option, next completed domjobinfo gathering will still get statistics.
    if keep_complete:
        time.sleep(5)
        vm_ref_tmp = "%s --completed --keep-completed" % vm_ref
        virsh.domjobinfo(vm_ref_tmp, ignore_status=False, debug=True)

    # Get completed domjobinfo.(Without -keep-completed option, later completed domjobinfo gathering will get None.)
    if status_error == "no":
        time.sleep(5)
        if act_opt != "--live" and vm_ref == domid:
            # use vm_name but not id since domain shutoff
            vm_ref = vm_name
        vm_ref = "%s --completed" % vm_ref
        ret_cmplt = virsh.domjobinfo(vm_ref, ignore_status=True, debug=True)
        status_cmplt = ret_cmplt.exit_status

    # Get completed domjobinfo again, get None.
    if keep_complete:
        ret_cmplt_later = virsh.domjobinfo(vm_ref,
                                           ignore_status=True,
                                           debug=True)

    # Recover the environment.
    if actions == "managedsave":
        virsh.managedsave_remove(vm_name, ignore_status=True)
    if pre_vm_state == "suspend":
        vm.resume()
    if libvirtd == "off":
        utils_libvirtd.libvirtd_start()

    # Check status_error
    if status_error == "yes":
        if status == 0:
            if libvirtd == "off" and libvirt_version.version_compare(5, 6, 0):
                logging.info("From libvirt 5.6.0 libvirtd is restarted "
                             "and command should succeed")
            else:
                test.fail("Run successfully with wrong command!")
    elif status_error == "no":
        if status != 0 or status_cmplt != 0:
            test.fail("Run failed with right command")

    # if libvirtd wasn't running the jobinfo is expected to be empty
    if status_error == "no" and not libvirtd == "off":
        # The 'managedsave' Operation will be shown as 'Save' in domjobinfo
        if actions == "managedsave":
            actions = "save"
        # Check output of "virsh domjobinfo"
        cmp_jobinfo(ret, info_list, "Unbounded", actions)
        # Check output of "virsh domjobinfo --completed"
        info_list.insert(
            info_list.index("Memory total") + 1, "Memory bandwidth")
        info_list[info_list.index("Expected downtime")] = "Total downtime"
        logging.debug("The expected info_list for completed job is %s",
                      info_list)
        cmp_jobinfo(ret_cmplt, info_list, "Completed", actions)
        # Check output of later "virsh domjobinfo --completed"
        if keep_complete:
            info_list = ["Job type"]
            cmp_jobinfo(ret_cmplt_later, info_list, "None", actions)
Esempio n. 7
0
def copied_migration(test, vm, params, blockjob_type=None, block_target="vda"):
    """
    Migrate vms with storage copied under some stress.
    And during it, some qemu-monitor-command will be sent.
    """
    dest_uri = params.get("migrate_dest_uri")
    remote_host = params.get("migrate_dest_host")
    copy_option = params.get("copy_storage_option", "")
    username = params.get("remote_user")
    password = params.get("migrate_dest_pwd")
    timeout = int(params.get("thread_timeout", 1200))
    options = "--live %s --unsafe" % copy_option

    # Get vm ip for remote checking
    if vm.is_dead():
        vm.start()
    vm.wait_for_login()
    vms_ip = {}
    vms_ip[vm.name] = vm.get_address()
    logging.debug("VM %s IP: %s", vm.name, vms_ip[vm.name])

    # Start to load stress
    stress_type = params.get("migrate_stress_type")
    if stress_type == "cpu":
        params['stress_args'] = "--cpu 2 --quiet --timeout 60"
    elif stress_type == "memory":
        params['stress_args'] = "--vm 2 --vm-bytes 256M --vm-keep --timeout 60"
    if stress_type is not None:
        utils_test.load_stress("stress_in_vms", params=params, vms=[vm])

    cp_mig = migration.MigrationTest()
    migration_thread = threading.Thread(target=cp_mig.thread_func_migration,
                                        args=(vm, dest_uri, options))
    migration_thread.start()
    # Wait for migration launched
    time.sleep(5)
    job_ret = virsh.domjobinfo(vm.name, debug=True)
    if job_ret.exit_status:
        test.error("Prepare migration for blockjob failed.")

    # Execute some qemu monitor commands
    pause_cmd = "block-job-pause %s" % block_target
    resume_cmd = "block-job-resume %s" % block_target
    cancel_cmd = "block-job-cancel %s" % block_target
    complete_cmd = "block-job-complete %s" % block_target

    blockjob_failures = []
    try:
        if blockjob_type == "cancel":
            virsh.qemu_monitor_command(vm.name,
                                       cancel_cmd,
                                       debug=True,
                                       ignore_status=False)
        elif blockjob_type == "pause_resume":
            virsh.qemu_monitor_command(vm.name,
                                       pause_cmd,
                                       debug=True,
                                       ignore_status=False)
            # TODO: Check whether it is paused.
            virsh.qemu_monitor_command(vm.name,
                                       resume_cmd,
                                       debug=True,
                                       ignore_status=False)
        elif blockjob_type == "complete":
            virsh.qemu_monitor_command(vm.name,
                                       complete_cmd,
                                       debug=True,
                                       ignore_status=False)
    except process.CmdError as detail:
        blockjob_failures.append(str(detail))

    # Job info FYI
    virsh.domjobinfo(vm.name, debug=True)

    if len(blockjob_failures):
        timeout = 30

    migration_thread.join(timeout)
    if migration_thread.isAlive():
        logging.error("Migrate %s timeout.", migration_thread)
        cp_mig.RET_LOCK.acquire()
        cp_mig.RET_MIGRATION = False
        cp_mig.RET_LOCK.release()

    if len(blockjob_failures):
        cp_mig.cleanup_dest_vm(vm, None, dest_uri)
        test.fail("Run qemu monitor command failed %s" % blockjob_failures)

    check_ip_failures = []
    if cp_mig.RET_MIGRATION:
        try:
            utils_test.check_dest_vm_network(vm, vms_ip[vm.name], remote_host,
                                             username, password)
        except exceptions.TestFail as detail:
            check_ip_failures.append(str(detail))
        cp_mig.cleanup_dest_vm(vm, None, dest_uri)
        if blockjob_type in ["cancel", "complete"]:
            test.fail("Storage migration passed even after " "cancellation.")
    else:
        cp_mig.cleanup_dest_vm(vm, None, dest_uri)
        if blockjob_type in ["cancel", "complete"]:
            logging.error("Expected Migration Error for %s", blockjob_type)
            return
        else:
            test.fail("Command blockjob does not work well under "
                      "storage copied migration.")

    if len(check_ip_failures):
        test.fail("Check IP failed:%s" % check_ip_failures)
def copied_migration(test, vm, params, blockjob_type=None, block_target="vda"):
    """
    Migrate vms with storage copied under some stress.
    And during it, some qemu-monitor-command will be sent.
    """
    dest_uri = params.get("migrate_dest_uri")
    remote_host = params.get("migrate_dest_host")
    copy_option = params.get("copy_storage_option", "")
    username = params.get("remote_user")
    password = params.get("migrate_dest_pwd")
    timeout = int(params.get("thread_timeout", 1200))
    options = "--live %s --unsafe" % copy_option

    # Get vm ip for remote checking
    if vm.is_dead():
        vm.start()
    vm.wait_for_login()
    vms_ip = {}
    vms_ip[vm.name] = vm.get_address()
    logging.debug("VM %s IP: %s", vm.name, vms_ip[vm.name])

    # Start to load stress
    stress_type = params.get("migrate_stress_type")
    if stress_type == "cpu":
        params['stress_args'] = "--cpu 2 --quiet --timeout 60"
    elif stress_type == "memory":
        params['stress_args'] = "--vm 2 --vm-bytes 256M --vm-keep --timeout 60"
    if stress_type is not None:
        utils_test.load_stress("stress_in_vms", params=params, vms=[vm])

    cp_mig = utlv.MigrationTest()
    migration_thread = threading.Thread(target=cp_mig.thread_func_migration,
                                        args=(vm, dest_uri, options))
    migration_thread.start()
    # Wait for migration launched
    time.sleep(5)
    job_ret = virsh.domjobinfo(vm.name, debug=True)
    if job_ret.exit_status:
        test.error("Prepare migration for blockjob failed.")

    # Execute some qemu monitor commands
    pause_cmd = "block-job-pause %s" % block_target
    resume_cmd = "block-job-resume %s" % block_target
    cancel_cmd = "block-job-cancel %s" % block_target
    complete_cmd = "block-job-complete %s" % block_target

    blockjob_failures = []
    try:
        if blockjob_type == "cancel":
            virsh.qemu_monitor_command(vm.name, cancel_cmd, debug=True,
                                       ignore_status=False)
        elif blockjob_type == "pause_resume":
            virsh.qemu_monitor_command(vm.name, pause_cmd, debug=True,
                                       ignore_status=False)
            # TODO: Check whether it is paused.
            virsh.qemu_monitor_command(vm.name, resume_cmd, debug=True,
                                       ignore_status=False)
        elif blockjob_type == "complete":
            virsh.qemu_monitor_command(vm.name, complete_cmd, debug=True,
                                       ignore_status=False)
    except process.CmdError as detail:
        blockjob_failures.append(str(detail))

    # Job info FYI
    virsh.domjobinfo(vm.name, debug=True)

    if len(blockjob_failures):
        timeout = 30

    migration_thread.join(timeout)
    if migration_thread.isAlive():
        logging.error("Migrate %s timeout.", migration_thread)
        cp_mig.RET_LOCK.acquire()
        cp_mig.RET_MIGRATION = False
        cp_mig.RET_LOCK.release()

    if len(blockjob_failures):
        cp_mig.cleanup_dest_vm(vm, None, dest_uri)
        test.fail("Run qemu monitor command failed %s"
                  % blockjob_failures)

    check_ip_failures = []
    if cp_mig.RET_MIGRATION:
        try:
            utils_test.check_dest_vm_network(vm, vms_ip[vm.name],
                                             remote_host, username,
                                             password)
        except exceptions.TestFail as detail:
            check_ip_failures.append(str(detail))
        cp_mig.cleanup_dest_vm(vm, None, dest_uri)
        if blockjob_type in ["cancel", "complete"]:
            test.fail("Storage migration passed even after "
                      "cancellation.")
    else:
        cp_mig.cleanup_dest_vm(vm, None, dest_uri)
        if blockjob_type in ["cancel", "complete"]:
            logging.error("Expected Migration Error for %s", blockjob_type)
            return
        else:
            test.fail("Command blockjob does not work well under "
                      "storage copied migration.")

    if len(check_ip_failures):
        test.fail("Check IP failed:%s" % check_ip_failures)
            virsh.qemu_monitor_command(vm.name, cancel_cmd, debug=True,
                                       ignore_status=False)
        elif blockjob_type == "pause_resume":
            virsh.qemu_monitor_command(vm.name, pause_cmd, debug=True,
                                       ignore_status=False)
            # TODO: Check whether it is paused.
            virsh.qemu_monitor_command(vm.name, resume_cmd, debug=True,
                                       ignore_status=False)
        elif blockjob_type == "complete":
            virsh.qemu_monitor_command(vm.name, complete_cmd, debug=True,
                                       ignore_status=False)
    except error.CmdError, detail:
        blockjob_failures.append(str(detail))

    # Job info FYI
    virsh.domjobinfo(vm.name, debug=True)

    if len(blockjob_failures):
        timeout = 30

    migration_thread.join(timeout)
    if migration_thread.isAlive():
        logging.error("Migrate %s timeout.", migration_thread)
        cp_mig.RET_LOCK.acquire()
        cp_mig.RET_MIGRATION = False
        cp_mig.RET_LOCK.release()

    if len(blockjob_failures):
        cp_mig.cleanup_dest_vm(vm, None, dest_uri)
        raise error.TestFail("Run qemu monitor command failed %s"
                             % blockjob_failures)
Esempio n. 10
0
def copied_migration(vm, params, blockjob_type=None, block_target="vda"):
    """
    Migrate vms with storage copied under some stress.
    And during it, some qemu-monitor-command will be sent.
    """
    dest_uri = params.get("migrate_dest_uri")
    remote_host = params.get("migrate_dest_host")
    copy_option = params.get("copy_storage_option", "")
    username = params.get("remote_user")
    password = params.get("migrate_dest_pwd")
    timeout = int(params.get("thread_timeout", 1200))
    options = "--live %s --unsafe" % copy_option

    # Get vm ip for remote checking
    if vm.is_dead():
        vm.start()
    vm.wait_for_login()
    vms_ip = {}
    vms_ip[vm.name] = vm.get_address()
    logging.debug("VM %s IP: %s", vm.name, vms_ip[vm.name])

    # Start to load stress
    stress_type = params.get("migrate_stress_type")
    if stress_type == "cpu":
        params['stress_args'] = "--cpu 2 --quiet --timeout 60"
    elif stress_type == "memory":
        params['stress_args'] = "--vm 2 --vm-bytes 256M --vm-keep --timeout 60"
    if stress_type is not None:
        utils_test.load_stress("stress_in_vms", [vm], params)

    cp_mig = utlv.MigrationTest()
    migration_thread = threading.Thread(target=cp_mig.thread_func_migration,
                                        args=(vm, dest_uri, options))
    migration_thread.start()
    # Wait for migration launched
    time.sleep(5)
    job_ret = virsh.domjobinfo(vm.name, debug=True)
    if job_ret.exit_status:
        raise error.TestError("Prepare migration for blockjob failed.")

    # Execute some qemu monitor commands
    pause_cmd = "block-job-pause %s" % block_target
    resume_cmd = "block-job-resume %s" % block_target
    cancel_cmd = "block-job-cancel %s" % block_target
    complete_cmd = "block-job-complete %s" % block_target

    blockjob_failures = []
    try:
        if blockjob_type == "cancel":
            virsh.qemu_monitor_command(vm.name,
                                       cancel_cmd,
                                       debug=True,
                                       ignore_status=False)
        elif blockjob_type == "pause_resume":
            virsh.qemu_monitor_command(vm.name,
                                       pause_cmd,
                                       debug=True,
                                       ignore_status=False)
            # TODO: Check whether it is paused.
            virsh.qemu_monitor_command(vm.name,
                                       resume_cmd,
                                       debug=True,
                                       ignore_status=False)
        elif blockjob_type == "complete":
            virsh.qemu_monitor_command(vm.name,
                                       complete_cmd,
                                       debug=True,
                                       ignore_status=False)
    except error.CmdError, detail:
        blockjob_failures.append(str(detail))
Esempio n. 11
0
    def check_converge(params):
        """
        Handle option '--auto-converge --auto-converge-initial
        --auto-converge-increment '.
        'Auto converge throttle' in domjobinfo should start with
        the initial value and increase with correct increment
        and max value is 99.

        :param params: The parameters used
        :raise: exceptions.TestFail when unexpected or no throttle
                       is found
        """
        initial = int(params.get("initial", 20))
        increment = int(params.get("increment", 10))
        max_converge = int(params.get("max_converge", 99))
        allow_throttle_list = [initial + count * increment
                               for count in range(0, (100 - initial) // increment + 1)
                               if (initial + count * increment) < 100]
        allow_throttle_list.append(max_converge)
        logging.debug("The allowed 'Auto converge throttle' value "
                      "is %s", allow_throttle_list)

        throttle = 0
        jobtype = "None"

        while throttle < 100:
            cmd_result = virsh.domjobinfo(vm_name, debug=True,
                                          ignore_status=True)
            if cmd_result.exit_status:
                logging.debug(cmd_result.stderr)
                # Check if migration is completed
                if "domain is not running" in cmd_result.stderr:
                    args = vm_name + " --completed"
                    cmd_result = virsh.domjobinfo(args, debug=True,
                                                  ignore_status=True)
                    if cmd_result.exit_status:
                        test.error("Failed to get domjobinfo and domjobinfo "
                                   "--completed: %s" % cmd_result.stderr)
                else:
                    test.error("Failed to get domjobinfo: %s" % cmd_result.stderr)
            jobinfo = cmd_result.stdout
            for line in jobinfo.splitlines():
                key = line.split(':')[0]
                if key.count("Job type"):
                    jobtype = line.split(':')[-1].strip()
                elif key.count("Auto converge throttle"):
                    throttle = int(line.split(':')[-1].strip())
                    logging.debug("Auto converge throttle:%s", str(throttle))
            if throttle and throttle not in allow_throttle_list:
                test.fail("Invalid auto converge throttle "
                          "value '%s'" % throttle)
            if throttle == 99:
                logging.debug("'Auto converge throttle' reaches maximum "
                              "allowed value 99")
                break
            if jobtype == "None" or jobtype == "Completed":
                logging.debug("Jobtype:%s", jobtype)
                if not throttle:
                    test.fail("'Auto converge throttle' is "
                              "not found in the domjobinfo")
                break
            time.sleep(5)
Esempio n. 12
0
    def check_domjobinfo_output(option="", is_mig_compelete=False):
        """
        Check all items in domjobinfo of the guest on both remote and local

        :param option: options for domjobinfo
        :param is_mig_compelete: False for domjobinfo checking during migration,
                            True for domjobinfo checking after migration
        :raise: test.fail if the value of given item is unexpected
        """
        expected_list_during_mig = ["Job type", "Operation", "Time elapsed",
                                    "Data processed", "Data remaining",
                                    "Data total", "Memory processed",
                                    "Memory remaining", "Memory total",
                                    "Memory bandwidth", "Dirty rate", "Page size",
                                    "Iteration", "Constant pages", "Normal pages",
                                    "Normal data", "Expected downtime", "Setup time"]
        if libvirt_version.version_compare(4, 10, 0):
            expected_list_during_mig.insert(13, "Postcopy requests")

        expected_list_after_mig_src = copy.deepcopy(expected_list_during_mig)
        expected_list_after_mig_src[-2] = 'Total downtime'
        expected_list_after_mig_dest = copy.deepcopy(expected_list_after_mig_src)

        # Check version in remote
        if not expected_list_after_mig_dest.count("Postcopy requests"):
            remote_session = remote.remote_login("ssh", server_ip, "22", server_user,
                                                 server_pwd, "#")
            if libvirt_version.version_compare(4, 10, 0, session=remote_session):
                expected_list_after_mig_dest.insert(14, "Postcopy requests")
            remote_session.close()

        expect_dict = {"src_notdone": {"Job type": "Unbounded",
                                       "Operation": "Outgoing migration",
                                       "all_items": expected_list_during_mig},
                       "dest_notdone": {"error": "Operation not supported: mig"
                                                 "ration statistics are availab"
                                                 "le only on the source host"},
                       "src_done": {"Job type": "Completed",
                                    "Operation": "Outgoing migration",
                                    "all_items": expected_list_after_mig_src},
                       "dest_done": {"Job type": "Completed",
                                     "Operation": "Incoming migration",
                                     "all_items": expected_list_after_mig_dest}}
        pc_opt = False
        if postcopy_options:
            pc_opt = True
            if is_mig_compelete:
                expect_dict["dest_done"].clear()
                expect_dict["dest_done"]["Job type"] = "None"
            else:
                set_migratepostcopy()

        vm_ref = '{}{}'.format(vm_name, option)
        src_jobinfo = virsh.domjobinfo(vm_ref, **virsh_args)
        cmd = "virsh domjobinfo {} {}".format(vm_name, option)
        dest_jobinfo = remote.run_remote_cmd(cmd, cmd_parms, runner_on_target)

        if not is_mig_compelete:
            search_jobinfo_output(src_jobinfo.stdout, expect_dict["src_notdone"],
                                  postcopy_req=pc_opt)
            search_jobinfo_output(dest_jobinfo.stderr, expect_dict["dest_notdone"])
        else:
            search_jobinfo_output(src_jobinfo.stdout, expect_dict["src_done"])
            search_jobinfo_output(dest_jobinfo.stdout, expect_dict["dest_done"])
def run(test, params, env):
    """
    Test command: migrate-compcache <domain> [--size <number>]

    1) Run migrate-compcache command and check return code.
    """
    vm_ref = params.get("vm_ref", "name")
    vm_name = params.get("migrate_main_vm")
    start_vm = "yes" == params.get("start_vm", "yes")
    pause_vm = "yes" == params.get("pause_after_start_vm", "no")
    expect_succeed = "yes" == params.get("expect_succeed", "yes")
    size_option = params.get("size_option", "valid")
    action = params.get("compcache_action", "get")
    vm = env.get_vm(vm_name)

    # Check if the virsh command migrate-compcache is available
    if not virsh.has_help_command("migrate-compcache"):
        raise error.TestNAError("This version of libvirt does not support " "virsh command migrate-compcache")

    # Prepare the VM state if it's not correct.
    if start_vm and not vm.is_alive():
        vm.start()
    elif not start_vm and vm.is_alive():
        vm.destroy()
    if pause_vm and not vm.is_paused():
        vm.pause()

    # Setup domain reference
    if vm_ref == "domname":
        vm_ref = vm_name

    # Setup size according to size_option:
    # minimal: Same as memory page size
    # maximal: Same as guest memory
    # empty: An empty string
    # small: One byte less than page size
    # large: Larger than guest memory
    # huge : Largest int64
    page_size = get_page_size()
    if size_option == "minimal":
        size = str(page_size)
    elif size_option == "maximal":
        size = str(vm.get_max_mem() * 1024)
    elif size_option == "empty":
        size = '""'
    elif size_option == "small":
        size = str(page_size - 1)
    elif size_option == "large":
        # Guest memory is larger than the max mem set,
        # add 50MB to ensure size exceeds guest memory.
        size = str(vm.get_max_mem() * 1024 + 50000000)
    elif size_option == "huge":
        size = str(2 ** 64 - 1)
    else:
        size = size_option

    # If we need to get, just omit the size option
    if action == "get":
        size = None

    # Run testing command
    result = virsh.migrate_compcache(vm_ref, size=size)
    logging.debug(result)

    remote_uri = params.get("compcache_remote_uri")
    remote_host = params.get("migrate_dest_host")
    remote_user = params.get("migrate_dest_user", "root")
    remote_pwd = params.get("migrate_dest_pwd")
    check_job_compcache = False
    compressed_size = None
    if not remote_host.count("EXAMPLE") and size is not None and expect_succeed:
        # Config ssh autologin for remote host
        ssh_key.setup_ssh_key(remote_host, remote_user, remote_pwd, port=22)
        if vm.is_dead():
            vm.start()
        if vm.is_paused():
            vm.resume()
        vm.wait_for_login()
        # Do actual migration to verify compression cache of migrate jobs
        command = "virsh migrate %s %s --compressed --unsafe --verbose" % (vm_name, remote_uri)
        logging.debug("Start migrating: %s", command)
        p = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)

        # Give enough time for starting job
        t = 0
        while t < 5:
            jobinfo = virsh.domjobinfo(vm_ref, debug=True, ignore_status=True).stdout
            jobtype = "None"
            for line in jobinfo.splitlines():
                key = line.split(":")[0]
                if key.count("type"):
                    jobtype = line.split(":")[-1].strip()
                elif key.strip() == "Compression cache":
                    compressed_size = line.split(":")[-1].strip()
            if "None" == jobtype or compressed_size is None:
                t += 1
                time.sleep(1)
                continue
            else:
                check_job_compcache = True
                logging.debug("Job started: %s", jobtype)
                break

        if p.poll():
            try:
                p.kill()
            except OSError:
                pass

        # Cleanup in case of successful migration
        utlv.MigrationTest().cleanup_dest_vm(vm, None, remote_uri)

    # Shut down the VM to make sure the compcache setting cleared
    if vm.is_alive():
        vm.destroy()

    # Check test result
    if expect_succeed:
        if result.exit_status != 0:
            raise error.TestFail("Expected succeed, but failed with result:\n%s" % result)
        if check_job_compcache:
            value = compressed_size.split()[0].strip()
            unit = compressed_size.split()[-1].strip()
            value = int(float(value))
            if unit == "KiB":
                size = int(int(size) / 1024)
            elif unit == "MiB":
                size = int(int(size) / 1048576)
            elif unit == "GiB":
                size = int(int(size) / 1073741824)
            if value != size:
                raise error.TestFail("Compression cache is not match" " with setted")
            else:
                return
            raise error.TestFail("Get compression cache in job failed.")
        else:
            logging.warn("The compressed size wasn't been verified " "during migration.")
    elif not expect_succeed:
        if result.exit_status == 0:
            raise error.TestFail("Expected fail, but succeed with result:\n%s" % result)
Esempio n. 14
0
def run(test, params, env):
    """
    Test command: virsh domjobinfo.

    The command returns information about jobs running on a domain.
    1.Prepare test environment.
    2.When the libvirtd == "off", stop the libvirtd service.
    3.Perform job action on a domain.
    4.Get running and completed job info by virsh domjobinfo.
    5.Recover test environment.
    6.Confirm the test result.
    """

    def get_subprocess(action, vm_name, file, remote_uri=None):
        """
        Execute background virsh command, return subprocess w/o waiting for exit()

        :param action : virsh command and its option.
        :param vm_name : VM's name
        :param file : virsh command's file option, could be vm.dump, vm.save, etc.
        """
        command = "virsh %s %s %s" % (action, vm_name, file)
        logging.debug("Action: %s", command)
        p = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE,
                             stderr=subprocess.PIPE)
        return p

    def cmp_jobinfo(result, info_list, job_type, actions):
        """
        Compare the output jobinfo with expected one

        :param result : the return from domjobinfo cmd
        :param info_list : an expected domjobinfo list
        :param job_type : an expected value for 'Job Type'
        :param actions : the job operation
        """
        logging.debug(result.stdout)
        out_list = result.stdout.strip().splitlines()
        out_dict = dict([x.split(':') for x in out_list])
        ret_cmp = set(out_dict.keys()) == set(info_list)
        if not ret_cmp:
            test.fail("Not all output jobinfo items are as expected: Expect:%s, but get %s"
                      % (set(info_list), set(out_dict.keys())))
        else:
            if out_dict["Job type"].strip() != job_type:
                test.fail("Expect %s Job type but got %s" %
                          (job_type, out_dict["Job type"].strip()))
            if out_dict["Operation"].strip() != actions.capitalize():
                test.fail("Expect %s Operation but got %s" %
                          (actions.capitalize(), out_dict["Operation"].strip()))

    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    start_vm = params.get("start_vm")
    pre_vm_state = params.get("pre_vm_state", "start")
    if start_vm == "no" and vm.is_alive():
        vm.destroy()

    # Instead of "paused_after_start_vm", use "pre_vm_state".
    # After start the VM, wait for some time to make sure the job
    # can be created on this domain.
    if start_vm == "yes":
        vm.wait_for_login()
        if params.get("pre_vm_state") == "suspend":
            vm.pause()

    domid = vm.get_id()
    domuuid = vm.get_uuid()
    actions = params.get("domjobinfo_action", "dump")
    act_opt = params.get("dump_opt", "")
    vm_ref = params.get("domjobinfo_vm_ref")
    status_error = params.get("status_error", "no")
    libvirtd = params.get("libvirtd", "on")
    # Use tmp_pipe to act as target file for job operation in subprocess,
    # such as vm.dump, vm.save, etc.
    tmp_pipe = os.path.join(data_dir.get_tmp_dir(), "domjobinfo.fifo")

    # Build job action
    action = ' '.join([actions, act_opt])
    if actions == "managedsave":
        tmp_pipe = '/var/lib/libvirt/qemu/save/%s.save' % vm.name

    # Expected domjobinfo list
    info_list = ["Job type", "Time elapsed",
                 "Data processed", "Data remaining", "Data total",
                 "Memory processed", "Memory remaining",
                 "Memory total", "Dirty rate",
                 "Iteration", "Constant pages", "Normal pages",
                 "Normal data", "Expected downtime", "Setup time"]
    if libvirt_version.version_compare(3, 2, 0):
        info_list.insert(1, "Operation")
        if libvirt_version.version_compare(3, 9, 0):
            info_list.insert(info_list.index("Dirty rate")+1, "Page size")
            if libvirt_version.version_compare(5, 0, 0):
                info_list.insert(info_list.index("Iteration")+1, "Postcopy requests")
    logging.debug("The expected info_list for running job is %s", info_list)

    # run test case
    if vm_ref == "id":
        vm_ref = domid
    elif vm_ref == "hex_id":
        vm_ref = hex(int(domid))
    elif vm_ref == "name":
        vm_ref = "%s %s" % (vm_name, params.get("domjobinfo_extra"))
    elif vm_ref == "uuid":
        vm_ref = domuuid
    elif 'invalid' in vm_ref:
        vm_ref = params.get(vm_ref)

    # Get the subprocess of VM.
    # The command's effect is to get domjobinfo of running domain job.
    # So before do "domjobinfo", we must create a job on the domain.
    process = None
    if start_vm == "yes" and status_error == "no":
        if os.path.exists(tmp_pipe):
            os.unlink(tmp_pipe)
        os.mkfifo(tmp_pipe)

        # Target file param is not needed for managedsave operation
        if action == "managedsave ":
            process = get_subprocess(action, vm_name, "", None)
        else:
            process = get_subprocess(action, vm_name, tmp_pipe, None)

        f = open(tmp_pipe, 'rb')
        dummy = f.read(1024 * 1024).decode(locale.getpreferredencoding(), 'ignore')

    if libvirtd == "off":
        utils_libvirtd.libvirtd_stop()

    # Give enough time for starting job
    t = 0
    while t < 5:
        jobtype = vm.get_job_type()
        if "None" == jobtype:
            t += 1
            time.sleep(1)
            continue
        elif jobtype is False:
            logging.error("Get job type failed.")
            break
        else:
            logging.debug("Job started: %s", jobtype)
            break

    # Get domjobinfo while job is running
    ret = virsh.domjobinfo(vm_ref, ignore_status=True, debug=True)
    status = ret.exit_status

    # Clear process env
    if process and f:
        dummy = f.read()
        f.close()

        try:
            os.unlink(tmp_pipe)
        except OSError as detail:
            logging.info("Can't remove %s: %s", tmp_pipe, detail)

    if process:
        if process.poll():
            try:
                process.kill()
            except OSError:
                pass

    # Get completed domjobinfo
    if status_error == "no":
        time.sleep(5)
        if act_opt != "--live" and vm_ref == domid:
            # use vm_name but not id since domain shutoff
            vm_ref = vm_name
        vm_ref = "%s --completed" % vm_ref
        ret_cmplt = virsh.domjobinfo(vm_ref, ignore_status=True, debug=True)
        status_cmplt = ret_cmplt.exit_status

    # Recover the environment.
    if actions == "managedsave":
        virsh.managedsave_remove(vm_name, ignore_status=True)
    if pre_vm_state == "suspend":
        vm.resume()
    if libvirtd == "off":
        utils_libvirtd.libvirtd_start()

    # Check status_error
    if status_error == "yes":
        if status == 0:
            test.fail("Run successfully with wrong command!")
    elif status_error == "no":
        if status != 0 or status_cmplt != 0:
            test.fail("Run failed with right command")

    if status_error == "no":
        # The 'managedsave' Operation will be shown as 'Save' in domjobinfo
        if actions == "managedsave":
            actions = "save"
        # Check output of "virsh domjobinfo"
        cmp_jobinfo(ret, info_list, "Unbounded", actions)
        # Check output of "virsh domjobinfo --completed"
        info_list.insert(info_list.index("Memory total")+1, "Memory bandwidth")
        info_list[info_list.index("Expected downtime")] = "Total downtime"
        logging.debug("The expected info_list for completed job is %s", info_list)
        cmp_jobinfo(ret_cmplt, info_list, "Completed", actions)
def run(test, params, env):
    """
    Test virsh migrate command.
    """
    def set_feature(vmxml, feature, value):
        """
        Set guest features for PPC

        :param state: the htm status
        :param vmxml: guest xml
        """
        features_xml = vm_xml.VMFeaturesXML()
        if feature == 'hpt':
            features_xml.hpt_resizing = value
        elif feature == 'htm':
            features_xml.htm = value
        vmxml.features = features_xml
        vmxml.sync()

    def trigger_hpt_resize(session):
        """
        Check the HPT order file and dmesg

        :param session: the session to guest

        :raise: test.fail if required message is not found
        """
        hpt_order_path = "/sys/kernel/debug/powerpc/hpt_order"
        hpt_order = session.cmd_output('cat %s' % hpt_order_path).strip()
        hpt_order = int(hpt_order)
        logging.info('Current hpt_order is %d', hpt_order)
        hpt_order += 1
        cmd = 'echo %d > %s' % (hpt_order, hpt_order_path)
        cmd_result = session.cmd_status_output(cmd)
        result = process.CmdResult(stderr=cmd_result[1],
                                   stdout=cmd_result[1],
                                   exit_status=cmd_result[0])
        libvirt.check_exit_status(result)
        dmesg = session.cmd('dmesg')
        dmesg_content = params.get('dmesg_content').split('|')
        for content in dmesg_content:
            if content % hpt_order not in dmesg:
                test.fail("'%s' is missing in dmesg" % (content % hpt_order))
            else:
                logging.info("'%s' is found in dmesg", content % hpt_order)

    def check_qemu_cmd_line(content, err_ignore=False):
        """
        Check the specified content in the qemu command line

        :param content: the desired string to search
        :param err_ignore: True to return False when fail
                           False to raise exception when fail

        :return: True if exist, False otherwise
        """
        cmd = 'ps -ef|grep qemu|grep -v grep'
        qemu_line = results_stdout_52lts(process.run(cmd, shell=True))
        if content not in qemu_line:
            if err_ignore:
                return False
            else:
                test.fail("Expected '%s' was not found in "
                          "qemu command line" % content)
        return True

    def check_vm_network_accessed(session=None):
        """
        The operations to the VM need to be done before or after
        migration happens

        :param session: The session object to the host

        :raise: test.error when ping fails
        """
        # Confirm local/remote VM can be accessed through network.
        logging.info("Check VM network connectivity")
        s_ping, _ = utils_test.ping(vm.get_address(),
                                    count=10,
                                    timeout=20,
                                    output_func=logging.debug,
                                    session=session)
        if s_ping != 0:
            if session:
                session.close()
            test.fail("%s did not respond after %d sec." % (vm.name, 20))

    def check_virsh_command_and_option(command, option=None):
        """
        Check if virsh command exists

        :param command: the command to be checked
        :param option: the command option to be checked
        """
        msg = "This version of libvirt does not support "
        if not virsh.has_help_command(command):
            test.cancel(msg + "virsh command '%s'" % command)

        if option and not virsh.has_command_help_match(command, option):
            test.cancel(msg + "virsh command '%s' with option '%s'" %
                        (command, option))

    def add_ctrls(vm_xml, dev_type="pci", dev_index="0", dev_model="pci-root"):
        """
        Add multiple devices

        :param dev_type: the type of the device to be added
        :param dev_index: the maximum index of the device to be added
        :param dev_model: the model of the device to be added
        """
        for inx in range(0, int(dev_index) + 1):
            newcontroller = Controller("controller")
            newcontroller.type = dev_type
            newcontroller.index = inx
            newcontroller.model = dev_model
            logging.debug("New device is added:\n%s", newcontroller)
            vm_xml.add_device(newcontroller)
        vm_xml.sync()

    def do_migration(vm, dest_uri, options, extra):
        """
        Execute the migration with given parameters
        :param vm: the guest to be migrated
        :param dest_uri: the destination uri for migration
        :param options: options next to 'migrate' command
        :param extra: options in the end of the migrate command line

        :return: CmdResult object
        """
        logging.info("Sleeping 10 seconds before migration")
        time.sleep(10)
        # Migrate the guest.
        migration_res = vm.migrate(dest_uri, options, extra, **virsh_args)
        logging.info("Migration out: %s",
                     results_stdout_52lts(migration_res).strip())
        logging.info("Migration error: %s",
                     results_stderr_52lts(migration_res).strip())
        if int(migration_res.exit_status) != 0:
            logging.error("Migration failed for %s.", vm_name)
            return migration_res

        if vm.is_alive():  # vm.connect_uri was updated
            logging.info("VM is alive on destination %s.", dest_uri)
        else:
            test.fail("VM is not alive on destination %s" % dest_uri)

        # Throws exception if console shows panic message
        vm.verify_kernel_crash()
        return migration_res

    def cleanup_libvirtd_log(log_file):
        """
        Remove existing libvirtd log file on source and target host.

        :param log_file: log file with absolute path
        """
        if os.path.exists(log_file):
            logging.debug("Delete local libvirt log file '%s'", log_file)
            os.remove(log_file)
        cmd = "rm -f %s" % log_file
        logging.debug("Delete remote libvirt log file '%s'", log_file)
        run_remote_cmd(cmd)

    def cleanup_dest(vm):
        """
        Clean up the destination host environment
        when doing the uni-direction migration.

        :param vm: the guest to be cleaned up
        """
        logging.info("Cleaning up VMs on %s", vm.connect_uri)
        try:
            if virsh.domain_exists(vm.name, uri=vm.connect_uri):
                vm_state = vm.state()
                if vm_state == "paused":
                    vm.resume()
                elif vm_state == "shut off":
                    vm.start()
                vm.destroy(gracefully=False)

                if vm.is_persistent():
                    vm.undefine()

        except Exception as detail:
            logging.error("Cleaning up destination failed.\n%s", detail)

    def run_remote_cmd(cmd):
        """
        A function to run a command on remote host.

        :param cmd: the command to be executed

        :return: CmdResult object
        """
        remote_runner = remote.RemoteRunner(host=server_ip,
                                            username=server_user,
                                            password=server_pwd)
        cmdResult = remote_runner.run(cmd, ignore_status=True)
        if cmdResult.exit_status:
            test.fail("Failed to run '%s' on remote: %s" %
                      (cmd, results_stderr_52lts(cmdResult).strip()))
        return cmdResult

    def run_stress_in_vm():
        """
        The function to load stress in VM
        """
        stress_args = params.get(
            "stress_args", "--cpu 8 --io 4 "
            "--vm 2 --vm-bytes 128M "
            "--timeout 20s")
        try:
            vm_session.cmd('stress %s' % stress_args)
        except Exception as detail:
            logging.debug(detail)

    def check_timeout_postcopy(params):
        """
        Check the vm state on target host after timeout
        when --postcopy and --timeout-postcopy are used.
        The vm state is expected as running.

        :param params: the parameters used
        """
        timeout = int(params.get("timeout_postcopy", 10))
        time.sleep(timeout + 1)
        remote_virsh_session = virsh.VirshPersistent(**remote_virsh_dargs)
        vm_state = results_stdout_52lts(
            remote_virsh_session.domstate(vm_name)).strip()
        if vm_state != "running":
            remote_virsh_session.close_session()
            test.fail(
                "After timeout '%s' seconds, "
                "the vm state on target host should "
                "be 'running', but '%s' found", timeout, vm_state)
        remote_virsh_session.close_session()

    def get_usable_compress_cache(pagesize):
        """
        Get a number which is bigger than pagesize and is power of two.

        :param pagesize: the given integer
        :return: an integer satisfying the criteria
        """
        def calculate(num):
            result = num & (num - 1)
            return (result == 0)

        item = pagesize
        found = False
        while (not found):
            item += 1
            found = calculate(item)
        logging.debug(
            "%d is smallest one that is bigger than '%s' and "
            "is power of 2", item, pagesize)
        return item

    check_parameters(test, params)

    # Params for NFS shared storage
    shared_storage = params.get("migrate_shared_storage", "")
    if shared_storage == "":
        default_guest_asset = defaults.get_default_guest_os_info()['asset']
        default_guest_asset = "%s.qcow2" % default_guest_asset
        shared_storage = os.path.join(params.get("nfs_mount_dir"),
                                      default_guest_asset)
        logging.debug("shared_storage:%s", shared_storage)

    # params for migration connection
    params["virsh_migrate_desturi"] = libvirt_vm.complete_uri(
        params.get("migrate_dest_host"))
    # Params to update disk using shared storage
    params["disk_type"] = "file"
    params["disk_source_protocol"] = "netfs"
    params["mnt_path_name"] = params.get("nfs_mount_dir")

    # Local variables
    virsh_args = {"ignore_status": True, "debug": True}
    server_ip = params.get("server_ip")
    server_user = params.get("server_user", "root")
    server_pwd = params.get("server_pwd")
    extra = params.get("virsh_migrate_extra")
    options = params.get("virsh_migrate_options")
    src_uri = params.get("virsh_migrate_connect_uri")
    dest_uri = params.get("virsh_migrate_desturi")
    log_file = params.get("libvirt_log", "/var/log/libvirt/libvirtd.log")
    check_complete_job = "yes" == params.get("check_complete_job", "no")
    config_libvirtd = "yes" == params.get("config_libvirtd", "no")
    contrl_index = params.get("new_contrl_index", None)
    grep_str_remote_log = params.get("grep_str_remote_log", "")
    grep_str_local_log = params.get("grep_str_local_log", "")
    stress_in_vm = "yes" == params.get("stress_in_vm", "no")
    remote_virsh_dargs = {
        'remote_ip': server_ip,
        'remote_user': server_user,
        'remote_pwd': server_pwd,
        'unprivileged_user': None,
        'ssh_remote_auth': True
    }

    hpt_resize = params.get("hpt_resize", None)
    htm_state = params.get("htm_state", None)
    qemu_check = params.get("qemu_check", None)
    xml_check_after_mig = params.get("guest_xml_check_after_mig", None)

    arch = platform.machine()
    if any([hpt_resize, contrl_index, htm_state]) and 'ppc64' not in arch:
        test.cancel("The case is PPC only.")

    # For TLS
    tls_recovery = params.get("tls_auto_recovery", "yes")
    # qemu config
    qemu_conf_dict = None
    # libvirtd config
    libvirtd_conf_dict = None

    remote_virsh_session = None
    vm = None
    vm_session = None
    libvirtd_conf = None
    qemu_conf = None
    mig_result = None
    test_exception = None
    is_TestError = False
    is_TestFail = False
    is_TestSkip = False
    asynch_migration = False

    # Objects to be cleaned up in the end
    objs_list = []
    tls_obj = None

    # Local variables
    vm_name = params.get("migrate_main_vm")
    vm = env.get_vm(vm_name)
    vm.verify_alive()

    # For safety reasons, we'd better back up  xmlfile.
    new_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    orig_config_xml = new_xml.copy()
    if not orig_config_xml:
        test.error("Backing up xmlfile failed.")

    try:
        # Change VM xml in below part
        if contrl_index:
            new_xml.remove_all_device_by_type('controller')
            logging.debug("After removing controllers, current XML:\n%s\n",
                          new_xml)
            add_ctrls(new_xml, dev_index=contrl_index)

        if extra.count("--tls"):
            qemu_conf_dict = {"migrate_tls_x509_verify": "1"}
            # Setup TLS
            tls_obj = TLSConnection(params)
            if tls_recovery == "yes":
                objs_list.append(tls_obj)
                tls_obj.auto_recover = True
                tls_obj.conn_setup()
            # Setup qemu configure
            logging.debug("Configure the qemu")
            cleanup_libvirtd_log(log_file)
            qemu_conf = libvirt.customize_libvirt_config(qemu_conf_dict,
                                                         config_type="qemu",
                                                         remote_host=True,
                                                         extra_params=params)
        # Setup libvirtd
        if config_libvirtd:
            logging.debug("Configure the libvirtd")
            cleanup_libvirtd_log(log_file)
            libvirtd_conf_dict = setup_libvirtd_conf_dict(params)
            libvirtd_conf = libvirt.customize_libvirt_config(
                libvirtd_conf_dict, remote_host=True, extra_params=params)
        if hpt_resize:
            set_feature(new_xml, 'hpt', hpt_resize)

        if htm_state:
            set_feature(new_xml, 'htm', htm_state)
        # Change the disk of the vm to shared disk and then start VM
        libvirt.set_vm_disk(vm, params)
        if not vm.is_alive():
            vm.start()

        logging.debug("Guest xml after starting:\n%s",
                      vm_xml.VMXML.new_from_dumpxml(vm_name))

        if qemu_check:
            check_content = qemu_check
            if hpt_resize:
                check_content = "%s%s" % (qemu_check, hpt_resize)
            if htm_state:
                check_content = "%s%s" % (qemu_check, htm_state)
            check_qemu_cmd_line(check_content)

        vm_session = vm.wait_for_login()
        check_vm_network_accessed()

        if hpt_resize and hpt_resize != 'disabled':
            trigger_hpt_resize(vm_session)

        if stress_in_vm:
            pkg_name = 'stress'
            logging.debug("Check if stress tool is installed")
            pkg_mgr = utils_package.package_manager(vm_session, pkg_name)
            if not pkg_mgr.is_installed(pkg_name):
                logging.debug("Stress tool will be installed")
                if not pkg_mgr.install():
                    test.error("Package '%s' installation fails" % pkg_name)

            stress_thread = threading.Thread(target=run_stress_in_vm, args=())
            stress_thread.start()

        if extra.count("timeout-postcopy"):
            asynch_migration = True
            func_name = check_timeout_postcopy
        if extra.count("comp-xbzrle-cache"):
            cache = get_usable_compress_cache(memory.get_page_size())
            extra = "%s %s" % (extra, cache)

        # For --postcopy enable
        postcopy_options = params.get("postcopy_options")
        if postcopy_options and not extra.count(postcopy_options):
            extra = "%s %s" % (extra, postcopy_options)

        if not asynch_migration:
            mig_result = do_migration(vm, dest_uri, options, extra)
        else:
            migration_test = libvirt.MigrationTest()

            logging.debug("vm.connect_uri=%s", vm.connect_uri)
            vms = [vm]
            try:
                migration_test.do_migration(vms,
                                            None,
                                            dest_uri,
                                            'orderly',
                                            options,
                                            thread_timeout=900,
                                            ignore_status=True,
                                            func=func_name,
                                            extra_opts=extra,
                                            func_params=params)
                mig_result = migration_test.ret
            except exceptions.TestFail as fail_detail:
                test.fail(fail_detail)
            except exceptions.TestSkipError as skip_detail:
                test.cancel(skip_detail)
            except exceptions.TestError as error_detail:
                test.error(error_detail)
            except Exception as details:
                mig_result = migration_test.ret
                logging.error(details)

        if int(mig_result.exit_status) != 0:
            test.fail(results_stderr_52lts(mig_result).strip())

        if check_complete_job:
            search_str_domjobinfo = params.get("search_str_domjobinfo", None)
            opts = "--completed"
            args = vm_name + " " + opts
            check_virsh_command_and_option("domjobinfo", opts)
            jobinfo = results_stdout_52lts(
                virsh.domjobinfo(args, debug=True,
                                 ignore_status=True)).strip()
            logging.debug("Local job info on completion:\n%s", jobinfo)
            if extra.count("comp-xbzrle-cache") and search_str_domjobinfo:
                search_str_domjobinfo = "%s %s" % (search_str_domjobinfo,
                                                   cache // 1024)
            if search_str_domjobinfo:
                if not re.search(search_str_domjobinfo, jobinfo):
                    test.fail("Fail to search '%s' on local:\n%s" %
                              (search_str_domjobinfo, jobinfo))
            # Check remote host
            if not remote_virsh_session:
                remote_virsh_session = virsh.VirshPersistent(
                    **remote_virsh_dargs)
            jobinfo = results_stdout_52lts(
                remote_virsh_session.domjobinfo(args,
                                                debug=True,
                                                ignore_status=True)).strip()
            logging.debug("Remote job info on completion:\n%s", jobinfo)
            if search_str_domjobinfo:
                if not re.search(search_str_domjobinfo, jobinfo):
                    remote_virsh_session.close_session()
                    test.fail("Fail to search '%s' on remote:\n%s" %
                              (search_str_domjobinfo, jobinfo))
            remote_virsh_session.close_session()

        if grep_str_local_log:
            cmd = "grep -E '%s' %s" % (grep_str_local_log, log_file)
            cmdRes = process.run(cmd, shell=True, ignore_status=True)
            if cmdRes.exit_status:
                test.fail(results_stderr_52lts(cmdRes).strip())
        if grep_str_remote_log:
            cmd = "grep -E '%s' %s" % (grep_str_remote_log, log_file)
            run_remote_cmd(cmd)

        if xml_check_after_mig:
            if not remote_virsh_session:
                remote_virsh_session = virsh.VirshPersistent(
                    **remote_virsh_dargs)
            target_guest_dumpxml = results_stdout_52lts(
                remote_virsh_session.dumpxml(vm_name,
                                             debug=True,
                                             ignore_status=True)).strip()
            if hpt_resize:
                check_str = hpt_resize
            elif htm_state:
                check_str = htm_state
            if hpt_resize or htm_state:
                xml_check_after_mig = "%s'%s'" % (xml_check_after_mig,
                                                  check_str)
                if not re.search(xml_check_after_mig, target_guest_dumpxml):
                    remote_virsh_session.close_session()
                    test.fail("Fail to search '%s' in target guest XML:\n%s" %
                              (xml_check_after_mig, target_guest_dumpxml))

            if contrl_index:
                all_ctrls = re.findall(xml_check_after_mig,
                                       target_guest_dumpxml)
                if len(all_ctrls) != int(contrl_index) + 1:
                    remote_virsh_session.close_session()
                    test.fail(
                        "%s pci-root controllers are expected in guest XML, "
                        "but found %s" %
                        (int(contrl_index) + 1, len(all_ctrls)))
            remote_virsh_session.close_session()

        server_session = remote.wait_for_login('ssh', server_ip, '22',
                                               server_user, server_pwd,
                                               r"[\#\$]\s*$")
        check_vm_network_accessed(server_session)
        server_session.close()
    except exceptions.TestFail as details:
        is_TestFail = True
        test_exception = details
    except exceptions.TestSkipError as details:
        is_TestSkip = True
        test_exception = details
    except exceptions.TestError as details:
        is_TestError = True
        test_exception = details
    except Exception as details:
        test_exception = details
    finally:
        logging.debug("Recover test environment")
        try:
            # Clean VM on destination
            vm.connect_uri = dest_uri
            cleanup_dest(vm)
            vm.connect_uri = src_uri

            logging.info("Recovery VM XML configration")
            orig_config_xml.sync()
            logging.debug("The current VM XML:\n%s",
                          orig_config_xml.xmltreefile)

            if remote_virsh_session:
                remote_virsh_session.close_session()

            if extra.count("--tls"):
                logging.debug("Recover the qemu configuration")
                libvirt.customize_libvirt_config(None,
                                                 config_type="qemu",
                                                 remote_host=True,
                                                 extra_params=params,
                                                 is_recover=True,
                                                 config_object=qemu_conf)

            if config_libvirtd:
                logging.debug("Recover the libvirtd configuration")
                libvirt.customize_libvirt_config(None,
                                                 remote_host=True,
                                                 extra_params=params,
                                                 is_recover=True,
                                                 config_object=libvirtd_conf)

            logging.info("Remove local NFS image")
            source_file = params.get("source_file")
            libvirt.delete_local_disk("file", path=source_file)

            if objs_list:
                for obj in objs_list:
                    logging.debug("Clean up local objs")
                    del obj

        except Exception as exception_detail:
            if (not test_exception and not is_TestError and not is_TestFail
                    and not is_TestSkip):
                raise exception_detail
            else:
                # if any of above exceptions has been raised, only print
                # error log here to avoid of hiding the original issue
                logging.error(exception_detail)
    # Check result
    if is_TestFail:
        test.fail(test_exception)
    if is_TestSkip:
        test.cancel(test_exception)
    if is_TestError:
        test.error(test_exception)
    if not test_exception:
        logging.info("Case execution is done.")
    else:
        test.error(test_exception)
Esempio n. 16
0
                                       ignore_status=False)
            # TODO: Check whether it is paused.
            virsh.qemu_monitor_command(vm.name,
                                       resume_cmd,
                                       debug=True,
                                       ignore_status=False)
        elif blockjob_type == "complete":
            virsh.qemu_monitor_command(vm.name,
                                       complete_cmd,
                                       debug=True,
                                       ignore_status=False)
    except error.CmdError, detail:
        blockjob_failures.append(str(detail))

    # Job info FYI
    virsh.domjobinfo(vm.name, debug=True)

    if len(blockjob_failures):
        timeout = 30

    migration_thread.join(timeout)
    if migration_thread.isAlive():
        logging.error("Migrate %s timeout.", migration_thread)
        cp_mig.RET_LOCK.acquire()
        cp_mig.RET_MIGRATION = False
        cp_mig.RET_LOCK.release()

    if len(blockjob_failures):
        cp_mig.cleanup_dest_vm(vm, None, dest_uri)
        raise error.TestFail("Run qemu monitor command failed %s" %
                             blockjob_failures)
def run(test, params, env):
    def check_vm_network_accessed():
        """
        The operations to the VM need to be done before migration happens
        """
        # 1. Confirm local VM can be accessed through network.
        logging.info("Check local VM network connectivity before migrating")
        s_ping, o_ping = utils_test.ping(vm.get_address(),
                                         count=10,
                                         timeout=20)
        logging.info(o_ping)
        if s_ping != 0:
            test.error("%s did not respond after %d sec." % (vm.name, 20))

    def check_virsh_command_and_option(command, option=None):
        """
        Check if virsh command exists

        :param command: the command to be checked
        :param option: the command option to be checked
        """
        msg = "This version of libvirt does not support "
        if not virsh.has_help_command(command):
            test.cancel(msg + "virsh command '%s'" % command)

        if option and not virsh.has_command_help_match(command, option):
            test.cancel(msg + "virsh command '%s' with option '%s'" %
                        (command, option))

    def do_migration(vm, dest_uri, options, extra):
        """
        Execute the migration with given parameters
        :param vm: the guest to be migrated
        :param dest_uri: the destination uri for migration
        :param options: options next to 'migrate' command
        :param extra: options in the end of the migrate command line

        :return: CmdResult object
        """
        logging.info("Sleeping 10 seconds before migration")
        time.sleep(10)
        # Migrate the guest.
        migration_res = vm.migrate(dest_uri, options, extra, **virsh_args)
        logging.info("Migration out: %s",
                     results_stdout_52lts(migration_res).strip())
        logging.info("Migration error: %s",
                     results_stderr_52lts(migration_res).strip())
        if int(migration_res.exit_status) != 0:
            logging.error("Migration failed for %s.", vm_name)
            return migration_res

        if vm.is_alive():  # vm.connect_uri was updated
            logging.info("VM is alive on destination %s.", dest_uri)
        else:
            test.fail("VM is not alive on destination %s" % dest_uri)

        # Throws exception if console shows panic message
        vm.verify_kernel_crash()
        return migration_res

    def cleanup_libvirtd_log(log_file):
        """
        Remove existing libvirtd log file on source and target host.

        :param log_file: log file with absolute path
        """
        if os.path.exists(log_file):
            logging.debug("Delete local libvirt log file '%s'", log_file)
            os.remove(log_file)
        cmd = "rm -f %s" % log_file
        logging.debug("Delete remote libvirt log file '%s'", log_file)
        run_remote_cmd(cmd)

    def cleanup_dest(vm):
        """
        Clean up the destination host environment
        when doing the uni-direction migration.

        :param vm: the guest to be cleaned up
        """
        logging.info("Cleaning up VMs on %s", vm.connect_uri)
        try:
            if virsh.domain_exists(vm.name, uri=vm.connect_uri):
                vm_state = vm.state()
                if vm_state == "paused":
                    vm.resume()
                elif vm_state == "shut off":
                    vm.start()
                vm.destroy(gracefully=False)

                if vm.is_persistent():
                    vm.undefine()

        except Exception as detail:
            logging.error("Cleaning up destination failed.\n%s", detail)

    def run_remote_cmd(cmd):
        """
        A function to run a command on remote host.

        :param cmd: the command to be executed

        :return: CmdResult object
        """
        remote_runner = remote.RemoteRunner(host=server_ip,
                                            username=server_user,
                                            password=server_pwd)
        cmdResult = remote_runner.run(cmd, ignore_status=True)
        if cmdResult.exit_status:
            test.fail("Failed to run '%s' on remote: %s" %
                      (cmd, results_stderr_52lts(cmdResult).strip()))
        return cmdResult

    def run_stress_in_vm():
        """
        The function to load stress in VM
        """
        stress_args = params.get(
            "stress_args", "--cpu 8 --io 4 "
            "--vm 2 --vm-bytes 128M "
            "--timeout 20s")
        try:
            vm_session.cmd('stress %s' % stress_args)
        except Exception as detail:
            logging.debug(detail)

    def check_timeout_postcopy(params):
        """
        Check the vm state on target host after timeout
        when --postcopy and --timeout-postcopy are used.
        The vm state is expected as running.

        :param params: the parameters used
        """
        timeout = int(params.get("timeout_postcopy", 10))
        time.sleep(timeout + 1)
        remote_virsh_session = virsh.VirshPersistent(**remote_virsh_dargs)
        vm_state = results_stdout_52lts(
            remote_virsh_session.domstate(vm_name)).strip()
        if vm_state != "running":
            remote_virsh_session.close_session()
            test.fail(
                "After timeout '%s' seconds, "
                "the vm state on target host should "
                "be 'running', but '%s' found", timeout, vm_state)
        remote_virsh_session.close_session()

    def get_usable_compress_cache(pagesize):
        """
        Get a number which is bigger than pagesize and is power of two.

        :param pagesize: the given integer
        :return: an integer satisfying the criteria
        """
        def calculate(num):
            result = num & (num - 1)
            return (result == 0)

        item = pagesize
        found = False
        while (not found):
            item += 1
            found = calculate(item)
        logging.debug(
            "%d is smallest one that is bigger than '%s' and "
            "is power of 2", item, pagesize)
        return item

    check_parameters(test, params)

    # Params for NFS shared storage
    shared_storage = params.get("migrate_shared_storage", "")
    if shared_storage == "":
        default_guest_asset = defaults.get_default_guest_os_info()['asset']
        default_guest_asset = "%s.qcow2" % default_guest_asset
        shared_storage = os.path.join(params.get("nfs_mount_dir"),
                                      default_guest_asset)
        logging.debug("shared_storage:%s", shared_storage)

    # params for migration connection
    params["virsh_migrate_desturi"] = libvirt_vm.complete_uri(
        params.get("migrate_dest_host"))
    # Params to update disk using shared storage
    params["disk_type"] = "file"
    params["disk_source_protocol"] = "netfs"
    params["mnt_path_name"] = params.get("nfs_mount_dir")

    # Local variables
    virsh_args = {"ignore_status": True, "debug": True}
    server_ip = params.get("server_ip")
    server_user = params.get("server_user", "root")
    server_pwd = params.get("server_pwd")
    extra = params.get("virsh_migrate_extra")
    options = params.get("virsh_migrate_options")
    src_uri = params.get("virsh_migrate_connect_uri")
    dest_uri = params.get("virsh_migrate_desturi")
    log_file = params.get("libvirt_log", "/var/log/libvirt/libvirtd.log")
    check_complete_job = "yes" == params.get("check_complete_job", "no")
    config_libvirtd = "yes" == params.get("config_libvirtd", "no")
    grep_str_remote_log = params.get("grep_str_remote_log", "")
    grep_str_local_log = params.get("grep_str_local_log", "")
    stress_in_vm = "yes" == params.get("stress_in_vm", "no")
    remote_virsh_dargs = {
        'remote_ip': server_ip,
        'remote_user': server_user,
        'remote_pwd': server_pwd,
        'unprivileged_user': None,
        'ssh_remote_auth': True
    }

    # For TLS
    tls_recovery = params.get("tls_auto_recovery", "yes")
    # qemu config
    qemu_conf_dict = None
    # libvirtd config
    libvirtd_conf_dict = None

    remote_virsh_session = None
    vm = None
    vm_session = None
    libvirtd_conf = None
    qemu_conf = None
    mig_result = None
    test_exception = None
    is_TestError = False
    is_TestFail = False
    is_TestSkip = False
    asynch_migration = False

    # Objects to be cleaned up in the end
    objs_list = []
    tls_obj = None

    # Local variables
    vm_name = params.get("migrate_main_vm")
    vm = env.get_vm(vm_name)
    vm.verify_alive()

    # For safety reasons, we'd better back up  xmlfile.
    orig_config_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    if not orig_config_xml:
        test.error("Backing up xmlfile failed.")

    try:
        # Change the disk of the vm to shared disk
        libvirt.set_vm_disk(vm, params)

        if extra.count("--tls"):
            qemu_conf_dict = {"migrate_tls_x509_verify": "1"}
            # Setup TLS
            tls_obj = TLSConnection(params)
            if tls_recovery == "yes":
                objs_list.append(tls_obj)
                tls_obj.auto_recover = True
                tls_obj.conn_setup()
            # Setup qemu configure
            logging.debug("Configure the qemu")
            cleanup_libvirtd_log(log_file)
            qemu_conf = libvirt.customize_libvirt_config(qemu_conf_dict,
                                                         config_type="qemu",
                                                         remote_host=True,
                                                         extra_params=params)
        # Setup libvirtd
        if config_libvirtd:
            logging.debug("Configure the libvirtd")
            cleanup_libvirtd_log(log_file)
            libvirtd_conf_dict = setup_libvirtd_conf_dict(params)
            libvirtd_conf = libvirt.customize_libvirt_config(
                libvirtd_conf_dict, remote_host=True, extra_params=params)
        if not vm.is_alive():
            vm.start()
        vm_session = vm.wait_for_login()
        check_vm_network_accessed()

        if stress_in_vm:
            pkg_name = 'stress'
            logging.debug("Check if stress tool is installed")
            pkg_mgr = utils_package.package_manager(vm_session, pkg_name)
            if not pkg_mgr.is_installed(pkg_name):
                logging.debug("Stress tool will be installed")
                if not pkg_mgr.install():
                    test.error("Package '%s' installation fails" % pkg_name)

            stress_thread = threading.Thread(target=run_stress_in_vm, args=())
            stress_thread.start()

        if extra.count("timeout-postcopy"):
            asynch_migration = True
            func_name = check_timeout_postcopy
        if extra.count("comp-xbzrle-cache"):
            cache = get_usable_compress_cache(memory.get_page_size())
            extra = "%s %s" % (extra, cache)

        # For --postcopy enable
        postcopy_options = params.get("postcopy_options")
        if postcopy_options and not extra.count(postcopy_options):
            extra = "%s %s" % (extra, postcopy_options)

        if not asynch_migration:
            mig_result = do_migration(vm, dest_uri, options, extra)
        else:
            migration_test = libvirt.MigrationTest()

            logging.debug("vm.connect_uri=%s", vm.connect_uri)
            vms = [vm]
            try:
                migration_test.do_migration(vms,
                                            None,
                                            dest_uri,
                                            'orderly',
                                            options,
                                            thread_timeout=900,
                                            ignore_status=True,
                                            func=func_name,
                                            extra_opts=extra,
                                            func_params=params)
                mig_result = migration_test.ret
            except exceptions.TestFail as fail_detail:
                test.fail(fail_detail)
            except exceptions.TestSkipError as skip_detail:
                test.cancel(skip_detail)
            except exceptions.TestError as error_detail:
                test.error(error_detail)
            except Exception as details:
                mig_result = migration_test.ret
                logging.error(details)

        if int(mig_result.exit_status) != 0:
            test.fail(results_stderr_52lts(mig_result).strip())
        if check_complete_job:
            search_str_domjobinfo = params.get("search_str_domjobinfo", None)
            opts = "--completed"
            args = vm_name + " " + opts
            check_virsh_command_and_option("domjobinfo", opts)
            jobinfo = results_stdout_52lts(
                virsh.domjobinfo(args, debug=True,
                                 ignore_status=True)).strip()
            logging.debug("Local job info on completion:\n%s", jobinfo)
            if extra.count("comp-xbzrle-cache") and search_str_domjobinfo:
                search_str_domjobinfo = "%s %s" % (search_str_domjobinfo,
                                                   cache // 1024)
            if search_str_domjobinfo:
                if not re.search(search_str_domjobinfo, jobinfo):
                    test.fail("Fail to search '%s' on local:\n%s" %
                              (search_str_domjobinfo, jobinfo))
            # Check remote host
            remote_virsh_session = virsh.VirshPersistent(**remote_virsh_dargs)
            jobinfo = results_stdout_52lts(
                remote_virsh_session.domjobinfo(args,
                                                debug=True,
                                                ignore_status=True)).strip()
            logging.debug("Remote job info on completion:\n%s", jobinfo)
            if search_str_domjobinfo:
                if not re.search(search_str_domjobinfo, jobinfo):
                    remote_virsh_session.close_session()
                    test.fail("Fail to search '%s' on remote:\n%s" %
                              (search_str_domjobinfo, jobinfo))
            remote_virsh_session.close_session()
        if grep_str_local_log:
            cmd = "grep -E '%s' %s" % (grep_str_local_log, log_file)
            cmdRes = process.run(cmd, shell=True, ignore_status=True)
            if cmdRes.exit_status:
                test.fail(results_stderr_52lts(cmdRes).strip())
        if grep_str_remote_log:
            cmd = "grep -E '%s' %s" % (grep_str_remote_log, log_file)
            run_remote_cmd(cmd)

    except exceptions.TestFail as details:
        is_TestFail = True
        test_exception = details
    except exceptions.TestSkipError as details:
        is_TestSkip = True
        test_exception = details
    except exceptions.TestError as details:
        is_TestError = True
        test_exception = details
    except Exception as details:
        test_exception = details
    finally:
        logging.debug("Recover test environment")
        try:
            # Clean VM on destination
            vm.connect_uri = dest_uri
            cleanup_dest(vm)
            vm.connect_uri = src_uri

            logging.info("Recovery VM XML configration")
            orig_config_xml.sync()
            logging.debug("The current VM XML:\n%s",
                          orig_config_xml.xmltreefile)

            if remote_virsh_session:
                remote_virsh_session.close_session()

            if extra.count("--tls"):
                logging.debug("Recover the qemu configuration")
                libvirt.customize_libvirt_config(None,
                                                 config_type="qemu",
                                                 remote_host=True,
                                                 extra_params=params,
                                                 is_recover=True,
                                                 config_object=qemu_conf)

            if config_libvirtd:
                logging.debug("Recover the libvirtd configuration")
                libvirt.customize_libvirt_config(None,
                                                 remote_host=True,
                                                 extra_params=params,
                                                 is_recover=True,
                                                 config_object=libvirtd_conf)

            logging.info("Remove local NFS image")
            source_file = params.get("source_file")
            libvirt.delete_local_disk("file", path=source_file)

            if objs_list:
                for obj in objs_list:
                    logging.debug("Clean up local objs")
                    del obj

        except Exception as exception_detail:
            if (not test_exception and not is_TestError and not is_TestFail
                    and not is_TestSkip):
                raise exception_detail
            else:
                # if any of above exceptions has been raised, only print
                # error log here to avoid of hiding the original issue
                logging.error(exception_detail)
    # Check result
    if is_TestFail:
        test.fail(test_exception)
    if is_TestSkip:
        test.cancel(test_exception)
    if is_TestError:
        test.error(test_exception)
    if not test_exception:
        logging.info("Case execution is done.")
    else:
        test.error(test_exception)
Esempio n. 18
0
def run(test, params, env):
    """
    Test command: virsh domjobinfo.

    The command returns information about jobs running on a domain.
    1.Prepare test environment.
    2.When the libvirtd == "off", stop the libvirtd service.
    3.Perform job action on a domain.
    4.Get running and completed job info by virsh domjobinfo.
    5.Recover test environment.
    6.Confirm the test result.
    """

    def get_subprocess(action, vm_name, file, remote_uri=None):
        """
        Execute background virsh command, return subprocess w/o waiting for exit()

        :param action : virsh command.
        :param vm_name : VM's name
        :param file : virsh command's file option.
        """
        command = "virsh %s %s %s" % (action, vm_name, file)
        logging.debug("Action: %s", command)
        p = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE,
                             stderr=subprocess.PIPE)
        return p

    def cmp_jobinfo(result, info_list, job_type, action):
        """
        Compare the output jobinfo with expected one

        :param result : the return from domjobinfo cmd
        :param info_list : an expected domjobinfo list
        :param job_type : an expected value for 'Job Type'
        :param action : the job operation
        """
        logging.debug(result.stdout)
        out_list = result.stdout.strip().splitlines()
        out_dict = dict([x.split(':') for x in out_list])
        ret_cmp = set(out_dict.keys()) == set(info_list)
        if not ret_cmp:
            test.fail("Not all output jobinfo items are as expected: Expect:%s, but get %s"
                      % (set(info_list), set(out_dict.keys())))
        else:
            if out_dict["Job type"].strip() != job_type:
                test.fail("Expect %s Job type but got %s" %
                          (job_type, out_dict["Job type"].strip()))
            if out_dict["Operation"].strip() != action.capitalize():
                test.fail("Expect %s Operation but got %s" %
                          (action.capitalize(), out_dict["Operation"].strip()))

    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    start_vm = params.get("start_vm")
    pre_vm_state = params.get("pre_vm_state", "start")
    if start_vm == "no" and vm.is_alive():
        vm.destroy()

    # Instead of "paused_after_start_vm", use "pre_vm_state".
    # After start the VM, wait for some time to make sure the job
    # can be created on this domain.
    if start_vm == "yes":
        vm.wait_for_login()
        if params.get("pre_vm_state") == "suspend":
            vm.pause()

    domid = vm.get_id()
    domuuid = vm.get_uuid()
    action = params.get("domjobinfo_action", "dump")
    vm_ref = params.get("domjobinfo_vm_ref")
    status_error = params.get("status_error", "no")
    libvirtd = params.get("libvirtd", "on")
    tmp_file = os.path.join(data_dir.get_tmp_dir(), "domjobinfo.tmp")
    tmp_pipe = os.path.join(data_dir.get_tmp_dir(), "domjobinfo.fifo")
    # Expected domjobinfo list
    info_list = ["Job type", "Time elapsed",
                 "Data processed", "Data remaining", "Data total",
                 "Memory processed", "Memory remaining",
                 "Memory total", "Dirty rate",
                 "Iteration", "Constant pages", "Normal pages",
                 "Normal data", "Expected downtime", "Setup time"]
    if libvirt_version.version_compare(3, 2, 0):
        info_list.insert(1, "Operation")
        if libvirt_version.version_compare(3, 9, 0):
            info_list.insert(info_list.index("Dirty rate")+1, "Page size")
    logging.debug("The expected info_list for running job is %s", info_list)

    # run test case
    if vm_ref == "id":
        vm_ref = domid
    elif vm_ref == "hex_id":
        vm_ref = hex(int(domid))
    elif vm_ref == "name":
        vm_ref = "%s %s" % (vm_name, params.get("domjobinfo_extra"))
    elif vm_ref == "uuid":
        vm_ref = domuuid
    elif 'invalid' in vm_ref:
        vm_ref = params.get(vm_ref)

    # Get the subprocess of VM.
    # The command's effect is to get domjobinfo of running domain job.
    # So before do "domjobinfo" action, we must create a job on the domain.
    process = None
    if start_vm == "yes" and status_error == "no":
        if os.path.exists(tmp_pipe):
            os.unlink(tmp_pipe)
        os.mkfifo(tmp_pipe)

        process = get_subprocess(action, vm_name, tmp_pipe, None)

        f = open(tmp_pipe, 'rb')
        dummy = f.read(1024 * 1024).decode(locale.getpreferredencoding(), 'ignore')

    if libvirtd == "off":
        utils_libvirtd.libvirtd_stop()

    # Give enough time for starting job
    t = 0
    while t < 5:
        jobtype = vm.get_job_type()
        if "None" == jobtype:
            t += 1
            time.sleep(1)
            continue
        elif jobtype is False:
            logging.error("Get job type failed.")
            break
        else:
            logging.debug("Job started: %s", jobtype)
            break

    # Get domjobinfo while job is running
    ret = virsh.domjobinfo(vm_ref, ignore_status=True, debug=True)
    status = ret.exit_status

    # Clear process env
    if process and f:
        dummy = f.read()
        f.close()

        try:
            os.unlink(tmp_pipe)
        except OSError as detail:
            logging.info("Can't remove %s: %s", tmp_pipe, detail)
        try:
            os.unlink(tmp_file)
        except OSError as detail:
            logging.info("Cant' remove %s: %s", tmp_file, detail)

    if process:
        if process.poll():
            try:
                process.kill()
            except OSError:
                pass

    # Get completed domjobinfo
    if status_error == "no":
        vm_ref = "%s --completed" % vm_ref
        ret_cmplt = virsh.domjobinfo(vm_ref, ignore_status=True)
        status_cmplt = ret_cmplt.exit_status

    # Recover the environment.
    if pre_vm_state == "suspend":
        vm.resume()
    if libvirtd == "off":
        utils_libvirtd.libvirtd_start()

    # Check status_error
    if status_error == "yes":
        if status == 0:
            test.fail("Run successfully with wrong command!")
    elif status_error == "no":
        if status != 0 or status_cmplt != 0:
            test.fail("Run failed with right command")

    if status_error == "no":
        # Check output of "virsh domjobinfo"
        cmp_jobinfo(ret, info_list, "Unbounded", action)
        # Check output of "virsh domjobinfo --completed"
        info_list.insert(info_list.index("Memory total")+1, "Memory bandwidth")
        info_list[info_list.index("Expected downtime")] = "Total downtime"
        logging.debug("The expected info_list for completed job is %s", info_list)
        cmp_jobinfo(ret_cmplt, info_list, "Completed", action)
Esempio n. 19
0
def run_virsh_domjobinfo(test, params, env):
    """
    Test command: virsh domjobinfo.

    The command returns information about jobs running on a domain.
    1.Prepare test environment.
    2.When the libvirtd == "off", stop the libvirtd service.
    3.Perform virsh domjobinfo operation.
    4.Recover test environment.
    5.Confirm the test result.
    """
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)

    domid = vm.get_id()
    domuuid = vm.get_uuid()

    pre_vm_state = params.get("domjobinfo_pre_vm_state", "null")
    vm_ref = params.get("domjobinfo_vm_ref")
    status_error = params.get("status_error", "no")
    libvirtd = params.get("libvirtd", "on")
    tmp_file = os.path.join(test.tmpdir, '%s.tmp' % vm_name )

    #prepare the state of vm
    if pre_vm_state == "dump":
        virsh.dump(vm_name, tmp_file)
    elif pre_vm_state == "save":
        virsh.save(vm_name, tmp_file)
    elif pre_vm_state == "restore":
        virsh.save(vm_name, tmp_file)
        virsh.restore(tmp_file)
    elif pre_vm_state == "managedsave":
        virsh.managedsave(vm_name)

    #run test case
    if vm_ref == "id":
        vm_ref = domid
    elif vm_ref == "hex_id":
        vm_ref = hex(int(domid))
    elif vm_ref == "name":
        vm_ref = "%s %s" % (vm_name, params.get("domjobinfo_extra"))
    elif vm_ref == "uuid":
        vm_ref = domuuid
    elif vm_ref.find("invalid") != -1:
        vm_ref = params.get(vm_ref)

    if libvirtd == "off":
        utils_libvirtd.libvirtd_stop()

    status = virsh.domjobinfo(vm_ref, ignore_status=True).exit_status

    #recover libvirtd service start
    if libvirtd == "off":
        utils_libvirtd.libvirtd_start()

    #check status_error
    if status_error == "yes":
        if status == 0:
            raise error.TestFail("Run successfully with wrong command!")
    elif status_error == "no":
        if status != 0:
            raise error.TestFail("Run failed with right command")
def copied_migration(vm, params, blockjob_type=None, block_target="vda"):
    """
    Migrate vms with storage copied under some stress.
    And during it, some qemu-monitor-command will be sent.
    """
    dest_uri = params.get("migrate_dest_uri")
    remote_host = params.get("migrate_dest_host")
    copy_option = params.get("copy_storage_option", "")
    username = params.get("remote_user")
    password = params.get("migrate_dest_pwd")
    timeout = int(params.get("thread_timeout", 1200))
    options = "--live %s --unsafe" % copy_option

    # Get vm ip for remote checking
    if vm.is_dead():
        vm.start()
    vm.wait_for_login()
    vms_ip = {}
    vms_ip[vm.name] = vm.get_address()
    logging.debug("VM %s IP: %s", vm.name, vms_ip[vm.name])

    # Start to load stress
    stress_type = params.get("migrate_stress_type")
    if stress_type == "cpu":
        params['stress_args'] = "--cpu 2 --quiet --timeout 60"
    elif stress_type == "memory":
        params['stress_args'] = "--vm 2 --vm-bytes 256M --vm-keep --timeout 60"
    if stress_type is not None:
        utils_test.load_stress("stress_in_vms", [vm], params)

    cp_mig = utlv.MigrationTest()
    migration_thread = threading.Thread(target=cp_mig.thread_func_migration,
                                        args=(vm, dest_uri, options))
    migration_thread.start()
    # Wait for migration launched
    time.sleep(5)
    job_ret = virsh.domjobinfo(vm.name, debug=True)
    if job_ret.exit_status:
        raise error.TestError("Prepare migration for blockjob failed.")

    # Execute some qemu monitor commands
    pause_cmd = "block-job-pause %s" % block_target
    resume_cmd = "block-job-resume %s" % block_target
    cancel_cmd = "block-job-cancel %s" % block_target
    complete_cmd = "block-job-complete %s" % block_target

    blockjob_failures = []
    try:
        if blockjob_type == "cancel":
            virsh.qemu_monitor_command(vm.name, cancel_cmd, debug=True,
                                       ignore_status=False)
        elif blockjob_type == "pause_resume":
            virsh.qemu_monitor_command(vm.name, pause_cmd, debug=True,
                                       ignore_status=False)
            # TODO: Check whether it is paused.
            virsh.qemu_monitor_command(vm.name, resume_cmd, debug=True,
                                       ignore_status=False)
        elif blockjob_type == "complete":
            virsh.qemu_monitor_command(vm.name, complete_cmd, debug=True,
                                       ignore_status=False)
    except error.CmdError, detail:
        blockjob_failures.append(str(detail))
Esempio n. 21
0
def run(test, params, env):
    """
    Test command: virsh domjobinfo.

    The command returns information about jobs running on a domain.
    1.Prepare test environment.
    2.When the libvirtd == "off", stop the libvirtd service.
    3.Perform virsh domjobinfo operation.
    4.Recover test environment.
    5.Confirm the test result.
    """
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)

    domid = vm.get_id()
    domuuid = vm.get_uuid()

    pre_vm_state = params.get("domjobinfo_pre_vm_state", "null")
    vm_ref = params.get("domjobinfo_vm_ref")
    status_error = params.get("status_error", "no")
    libvirtd = params.get("libvirtd", "on")
    tmp_file = os.path.join(test.tmpdir, '%s.tmp' % vm_name)

    # prepare the state of vm
    if pre_vm_state == "dump":
        virsh.dump(vm_name, tmp_file)
    elif pre_vm_state == "save":
        virsh.save(vm_name, tmp_file)
    elif pre_vm_state == "restore":
        virsh.save(vm_name, tmp_file)
        virsh.restore(tmp_file)
    elif pre_vm_state == "managedsave":
        virsh.managedsave(vm_name)

    # run test case
    if vm_ref == "id":
        vm_ref = domid
    elif vm_ref == "hex_id":
        vm_ref = hex(int(domid))
    elif vm_ref == "name":
        vm_ref = "%s %s" % (vm_name, params.get("domjobinfo_extra"))
    elif vm_ref == "uuid":
        vm_ref = domuuid
    elif vm_ref.find("invalid") != -1:
        vm_ref = params.get(vm_ref)

    if libvirtd == "off":
        utils_libvirtd.libvirtd_stop()

    status = virsh.domjobinfo(vm_ref, ignore_status=True).exit_status

    # recover libvirtd service start
    if libvirtd == "off":
        utils_libvirtd.libvirtd_start()

    # check status_error
    if status_error == "yes":
        if status == 0:
            test.fail("Run successfully with wrong command!")
    elif status_error == "no":
        if status != 0:
            test.fail("Run failed with right command")