def do_stress_migration(vms,
                        srcuri,
                        desturi,
                        stress_type,
                        migration_type,
                        params,
                        thread_timeout=60):
    """
    Migrate vms with stress.

    :param vms: migrated vms.
    """
    fail_info = utils_test.load_stress(stress_type, vms, params)
    if len(fail_info):
        logging.warning("Add stress for migration failed:%s", fail_info)

    migtest = utlv.MigrationTest()
    migtest.do_migration(vms,
                         srcuri,
                         desturi,
                         migration_type,
                         options=None,
                         thread_timeout=thread_timeout)

    # vms will be shutdown, so no need to do this cleanup
    # And migrated vms may be not login if the network is local lan
    if stress_type == "stress_on_host":
        utils_test.unload_stress(stress_type, vms)

    if not migtest.RET_MIGRATION:
        raise error.TestFail()
示例#2
0
def copied_migration(vms, params):
    """
    Migrate vms with storage copied.
    """
    dest_uri = params.get("migrate_dest_uri")
    remote_host = params.get("remote_ip")
    copy_option = params.get("copy_storage_option", "")
    username = params.get("remote_user")
    password = params.get("remote_pwd")
    timeout = int(params.get("thread_timeout", 1200))
    options = "--live %s" % copy_option

    # Get vm ip for remote checking
    vms_ip = {}
    for vm in vms:
        if vm.is_dead():
            vm.start()
        vm.wait_for_login()
        vms_ip[vm.name] = vm.get_address()

    cp_mig = utlv.MigrationTest()
    cp_mig.do_migration(vms, None, dest_uri, "orderly", options, timeout)
    check_ip_failures = []
    if cp_mig.RET_MIGRATION:
        for vm in vms:
            try:
                utils_test.check_dest_vm_network(vm, vms_ip[vm.name],
                                                 remote_host, username,
                                                 password)
            except error.TestFail, detail:
                check_ip_failures.append(str(detail))
            cp_mig.cleanup_dest_vm(vm, None, dest_uri)
示例#3
0
def run(test, params, env):
    """
    Test KVM migration scenarios
    """
    migrate_options = params.get("migrate_options", "")
    migrate_postcopy = params.get("migrate_postcopy", "")
    migrate_dest_ip = params.get("migrate_dest_host")
    nfs_mount_path = params.get("nfs_mount_dir")
    migrate_start_state = params.get("migrate_start_state", "paused")
    postcopy_func = None
    if migrate_postcopy:
        postcopy_func = virsh.migrate_postcopy
    migrate_type = params.get("migrate_type", "orderly")
    vm_state = params.get("migrate_vm_state", "running")
    ping_count = int(params.get("ping_count", 15))

    vms = params.get("vms").split()
    vm_list = env.get_all_vms()

    # Params to update disk using shared storage
    params["disk_type"] = "file"
    params["disk_source_protocol"] = "netfs"
    params["mnt_path_name"] = nfs_mount_path

    src_uri = "qemu:///system"
    dest_uri = libvirt_vm.complete_uri(params["server_ip"])

    vmxml_dict = {}

    migrate_setup = libvirt.MigrationTest()
    try:
        for vm in vm_list:
            vmxml_dict[vm.name] = vm_xml.VMXML.new_from_dumpxml(vm.name)
            params["source_dist_img"] = "%s-nfs-img" % vm.name
            if vm.is_alive():
                vm.destroy()
            libvirt.set_vm_disk(vm, params)
            migrate_setup.ping_vm(vm, test, params, ping_count=ping_count)
        try:
            migrate_setup.do_migration(vm_list,
                                       src_uri,
                                       dest_uri,
                                       migrate_type,
                                       migrate_options,
                                       func=postcopy_func,
                                       migrate_start_state=migrate_start_state)
        except Exception, info:
            test.fail(info)
        for vm in vm_list:
            if not migrate_setup.check_vm_state(vm, vm_state, dest_uri):
                test.fail("Migrated VMs failed to be in %s state at "
                          "destination" % vm_state)
            logging.info("Guest state is '%s' at destination is as expected",
                         vm_state)
            migrate_setup.ping_vm(vm,
                                  test,
                                  params,
                                  uri=dest_uri,
                                  ping_count=ping_count)
def do_stress_migration(vms,
                        srcuri,
                        desturi,
                        stress_type,
                        migration_type,
                        params,
                        thread_timeout=60):
    """
    Migrate vms with stress.

    :param vms: migrated vms.
    :param srcuri: connect uri for source machine
    :param desturi: connect uri for destination machine
    :param stress_type: type of stress test in VM
    :param migration_type: type of migration to be performed
    :param params: Test dict params
    :param thread_timeout: default timeout for migration thread

    :raise: exceptions.TestFail if migration fails
    """
    fail_info = utils_test.load_stress(stress_type, vms, params)

    migtest = utlv.MigrationTest()
    options = ''
    if migration_type == "compressed":
        options = "--compressed"
        migration_type = "orderly"
        shared_dir = os.path.dirname(data_dir.get_data_dir())
        src_file = os.path.join(shared_dir, "scripts", "duplicate_pages.py")
        dest_dir = "/tmp"
        for vm in vms:
            session = vm.wait_for_login()
            vm.copy_files_to(src_file, dest_dir)
            status = session.cmd_status("cd /tmp;python duplicate_pages.py")
            if status:
                fail_info.append("Set duplicated pages for vm failed.")

    if len(fail_info):
        logging.warning("Add stress for migration failed:%s", fail_info)

    logging.debug("Starting migration...")
    migrate_options = ("--live --unsafe %s --timeout %s" %
                       (options, params.get("virsh_migrate_timeout", 60)))
    migtest.do_migration(vms,
                         srcuri,
                         desturi,
                         migration_type,
                         options=migrate_options,
                         thread_timeout=thread_timeout)

    # vms will be shutdown, so no need to do this cleanup
    # And migrated vms may be not login if the network is local lan
    if stress_type == "stress_on_host":
        utils_test.unload_stress(stress_type, vms)

    if not migtest.RET_MIGRATION:
        raise exceptions.TestFail()
def copied_migration(test, vms, params):
    """
    Migrate vms with storage copied.
    """
    dest_uri = params.get("migrate_dest_uri")
    remote_host = params.get("migrate_dest_host")
    copy_option = params.get("copy_storage_option", "")
    username = params.get("migrate_dest_user", "root")
    password = params.get("migrate_dest_pwd")
    timeout = int(params.get("thread_timeout", 1200))
    options = "--live %s" % copy_option

    # Get vm ip for remote checking
    vms_ip = {}
    for vm in vms:
        if vm.is_dead():
            vm.start()
        vm.wait_for_login()
        vms_ip[vm.name] = vm.get_address()

    cp_mig = utlv.MigrationTest()
    cp_mig.do_migration(vms,
                        None,
                        dest_uri,
                        "orderly",
                        options,
                        timeout,
                        ignore_status=True)
    check_ip_failures = []

    if cp_mig.RET_MIGRATION:
        for vm in vms:
            try:
                utils_test.check_dest_vm_network(vm, vms_ip[vm.name],
                                                 remote_host, username,
                                                 password)
            except exceptions.TestFail as detail:
                check_ip_failures.append(str(detail))
    else:
        for vm in vms:
            cp_mig.cleanup_dest_vm(vm, None, dest_uri)
        check_output(test, str(cp_mig.ret), params)
        test.fail("Migrate vms with storage copied failed.")
    if len(check_ip_failures):
        test.fail("Check IP failed:%s" % check_ip_failures)

    return cp_mig
示例#6
0
 # Case for option '--timeout --timeout-suspend'
 # 1. Start the guest
 # 2. Set migration speed to a small value. Ensure the migration
 #    duration is much larger than the timeout value
 # 3. Start the migration
 # 4. When the eclipse time reaches the timeout value, check the guest
 #    state to be paused on both source host and target host
 # 5. Wait for the migration done. Check the guest state to be shutoff
 #    on source host and running on target host
 if extra.count("--timeout-suspend"):
     asynch_migration = True
     speed = int(params.get("migrate_speed", 1))
     timeout = int(params.get("timeout_before_suspend", 5))
     logging.debug("Set migration speed to %sM", speed)
     virsh.migrate_setspeed(vm_name, speed, debug=True)
     migration_test = libvirt.MigrationTest()
     migrate_options = "%s %s" % (options, extra)
     vms = [vm]
     params["vm_migration"] = vm
     migration_test.do_migration(vms,
                                 None,
                                 dest_uri,
                                 'orderly',
                                 migrate_options,
                                 thread_timeout=900,
                                 ignore_status=True,
                                 func=check_migration_timeout_suspend,
                                 func_params=params)
     ret_migrate = migration_test.RET_MIGRATION
 if postcopy_cmd != "":
     asynch_migration = True
def run(test, params, env):
    """
    Test virsh migrate command.
    """
    def set_feature(vmxml, feature, value):
        """
        Set guest features for PPC

        :param state: the htm status
        :param vmxml: guest xml
        """
        features_xml = vm_xml.VMFeaturesXML()
        if feature == 'hpt':
            features_xml.hpt_resizing = value
        elif feature == 'htm':
            features_xml.htm = value
        vmxml.features = features_xml
        vmxml.sync()

    def trigger_hpt_resize(session):
        """
        Check the HPT order file and dmesg

        :param session: the session to guest

        :raise: test.fail if required message is not found
        """
        hpt_order_path = "/sys/kernel/debug/powerpc/hpt_order"
        hpt_order = session.cmd_output('cat %s' % hpt_order_path).strip()
        hpt_order = int(hpt_order)
        logging.info('Current hpt_order is %d', hpt_order)
        hpt_order += 1
        cmd = 'echo %d > %s' % (hpt_order, hpt_order_path)
        cmd_result = session.cmd_status_output(cmd)
        result = process.CmdResult(stderr=cmd_result[1],
                                   stdout=cmd_result[1],
                                   exit_status=cmd_result[0])
        libvirt.check_exit_status(result)
        dmesg = session.cmd('dmesg')
        dmesg_content = params.get('dmesg_content').split('|')
        for content in dmesg_content:
            if content % hpt_order not in dmesg:
                test.fail("'%s' is missing in dmesg" % (content % hpt_order))
            else:
                logging.info("'%s' is found in dmesg", content % hpt_order)

    def check_vm_network_accessed(session=None):
        """
        The operations to the VM need to be done before or after
        migration happens

        :param session: The session object to the host

        :raise: test.error when ping fails
        """
        # Confirm local/remote VM can be accessed through network.
        logging.info("Check VM network connectivity")
        s_ping, _ = utils_test.ping(vm.get_address(),
                                    count=10,
                                    timeout=20,
                                    output_func=logging.debug,
                                    session=session)
        if s_ping != 0:
            if session:
                session.close()
            test.fail("%s did not respond after %d sec." % (vm.name, 20))

    def check_virsh_command_and_option(command, option=None):
        """
        Check if virsh command exists

        :param command: the command to be checked
        :param option: the command option to be checked
        """
        msg = "This version of libvirt does not support "
        if not virsh.has_help_command(command):
            test.cancel(msg + "virsh command '%s'" % command)

        if option and not virsh.has_command_help_match(command, option):
            test.cancel(msg + "virsh command '%s' with option '%s'" %
                        (command, option))

    def add_ctrls(vm_xml, dev_type="pci", dev_index="0", dev_model="pci-root"):
        """
        Add multiple devices

        :param dev_type: the type of the device to be added
        :param dev_index: the maximum index of the device to be added
        :param dev_model: the model of the device to be added
        """
        for inx in range(0, int(dev_index) + 1):
            newcontroller = Controller("controller")
            newcontroller.type = dev_type
            newcontroller.index = inx
            newcontroller.model = dev_model
            logging.debug("New device is added:\n%s", newcontroller)
            vm_xml.add_device(newcontroller)
        vm_xml.sync()

    def do_migration(vm, dest_uri, options, extra):
        """
        Execute the migration with given parameters
        :param vm: the guest to be migrated
        :param dest_uri: the destination uri for migration
        :param options: options next to 'migrate' command
        :param extra: options in the end of the migrate command line

        :return: CmdResult object
        """
        logging.info("Sleeping 10 seconds before migration")
        time.sleep(10)
        # Migrate the guest.
        virsh_args.update({"ignore_status": True})
        migration_res = vm.migrate(dest_uri, options, extra, **virsh_args)
        if int(migration_res.exit_status) != 0:
            logging.error("Migration failed for %s.", vm_name)
            return migration_res

        if vm.is_alive():  # vm.connect_uri was updated
            logging.info("VM is alive on destination %s.", dest_uri)
        else:
            test.fail("VM is not alive on destination %s" % dest_uri)

        # Throws exception if console shows panic message
        vm.verify_kernel_crash()
        return migration_res

    def cleanup_libvirtd_log(log_file):
        """
        Remove existing libvirtd log file on source and target host.

        :param log_file: log file with absolute path
        """
        if os.path.exists(log_file):
            logging.debug("Delete local libvirt log file '%s'", log_file)
            os.remove(log_file)
        cmd = "rm -f %s" % log_file
        logging.debug("Delete remote libvirt log file '%s'", log_file)
        cmd_parms = {
            'server_ip': server_ip,
            'server_user': server_user,
            'server_pwd': server_pwd
        }
        remote.run_remote_cmd(cmd, cmd_parms, runner_on_target)

    def cleanup_dest(vm):
        """
        Clean up the destination host environment
        when doing the uni-direction migration.

        :param vm: the guest to be cleaned up
        """
        logging.info("Cleaning up VMs on %s", vm.connect_uri)
        try:
            if virsh.domain_exists(vm.name, uri=vm.connect_uri):
                vm_state = vm.state()
                if vm_state == "paused":
                    vm.resume()
                elif vm_state == "shut off":
                    vm.start()
                vm.destroy(gracefully=False)

                if vm.is_persistent():
                    vm.undefine()

        except Exception as detail:
            logging.error("Cleaning up destination failed.\n%s", detail)

    def run_stress_in_vm():
        """
        The function to load stress in VM
        """
        stress_args = params.get(
            "stress_args", "--cpu 8 --io 4 "
            "--vm 2 --vm-bytes 128M "
            "--timeout 20s")
        try:
            vm_session.cmd('stress %s' % stress_args)
        except Exception as detail:
            logging.debug(detail)

    def control_migrate_speed(to_speed=1):
        """
        Control migration duration

        :param to_speed: the speed value in Mbps to be set for migration
        :return int: the new migration speed after setting
        """
        virsh_args.update({"ignore_status": False})
        old_speed = virsh.migrate_getspeed(vm_name, **virsh_args)
        logging.debug("Current migration speed is %s MiB/s\n",
                      old_speed.stdout.strip())
        logging.debug("Set migration speed to %d MiB/s\n", to_speed)
        cmd_result = virsh.migrate_setspeed(vm_name, to_speed, "",
                                            **virsh_args)
        actual_speed = virsh.migrate_getspeed(vm_name, **virsh_args)
        logging.debug("New migration speed is %s MiB/s\n",
                      actual_speed.stdout.strip())
        return int(actual_speed.stdout.strip())

    def check_setspeed(params):
        """
        Set/get migration speed

        :param params: the parameters used
        :raise: test.fail if speed set does not take effect
        """
        expected_value = int(params.get("migrate_speed",
                                        '41943040')) // (1024 * 1024)
        actual_value = control_migrate_speed(to_speed=expected_value)
        params.update({'compare_to_value': actual_value})
        if actual_value != expected_value:
            test.fail(
                "Migration speed is expected to be '%d MiB/s', but '%d MiB/s' "
                "found" % (expected_value, actual_value))

    def check_domjobinfo(params, option=""):
        """
        Check given item in domjobinfo of the guest is as expected

        :param params: the parameters used
        :param option: options for domjobinfo
        :raise: test.fail if the value of given item is unexpected
        """
        def search_jobinfo(jobinfo):
            """
            Find value of given item in domjobinfo

            :param jobinfo: cmdResult object
            :raise: test.fail if not found
            """
            for item in jobinfo.stdout.splitlines():
                if item.count(jobinfo_item):
                    groups = re.findall(r'[0-9.]+', item.strip())
                    logging.debug("In '%s' search '%s'\n", item, groups[0])
                    if (math.fabs(float(groups[0]) - float(compare_to_value))
                            // float(compare_to_value) > diff_rate):
                        test.fail("{} {} has too much difference from "
                                  "{}".format(jobinfo_item, groups[0],
                                              compare_to_value))
                break

        jobinfo_item = params.get("jobinfo_item")
        compare_to_value = params.get("compare_to_value")
        logging.debug("compare_to_value:%s", compare_to_value)
        diff_rate = float(params.get("diff_rate", "0"))
        if not jobinfo_item or not compare_to_value:
            return
        vm_ref = '{}{}'.format(vm_name, option)
        jobinfo = virsh.domjobinfo(vm_ref, **virsh_args)
        search_jobinfo(jobinfo)

        check_domjobinfo_remote = params.get("check_domjobinfo_remote")
        if check_domjobinfo_remote:
            remote_virsh_session = virsh.VirshPersistent(**remote_virsh_dargs)
            jobinfo = remote_virsh_session.domjobinfo(vm_ref, **virsh_args)
            search_jobinfo(jobinfo)
            remote_virsh_session.close_session()

    def check_maxdowntime(params):
        """
        Set/get migration maxdowntime

        :param params: the parameters used
        :raise: test.fail if maxdowntime set does not take effect
        """
        expected_value = int(
            float(params.get("migrate_maxdowntime", '0.3')) * 1000)
        virsh_args.update({"ignore_status": False})
        old_value = int(virsh.migrate_getmaxdowntime(vm_name).stdout.strip())
        logging.debug("Current migration maxdowntime is %d ms", old_value)
        logging.debug("Set migration maxdowntime to %d ms", expected_value)
        virsh.migrate_setmaxdowntime(vm_name, expected_value, **virsh_args)
        actual_value = int(
            virsh.migrate_getmaxdowntime(vm_name).stdout.strip())
        logging.debug("New migration maxdowntime is %d ms", actual_value)
        if actual_value != expected_value:
            test.fail(
                "Migration maxdowntime is expected to be '%d ms', but '%d ms' "
                "found" % (expected_value, actual_value))
        params.update({'compare_to_value': actual_value})

    def do_actions_during_migrate(params):
        """
        The entry point to execute action list during migration

        :param params: the parameters used
        """
        actions_during_migration = params.get("actions_during_migration")
        if not actions_during_migration:
            return
        for action in actions_during_migration.split(","):
            if action == 'setspeed':
                check_setspeed(params)
            elif action == 'domjobinfo':
                check_domjobinfo(params)
            elif action == 'setmaxdowntime':
                check_maxdowntime(params)
            time.sleep(3)

    def attach_channel_xml():
        """
        Create channel xml and attach it to guest configuration
        """
        # Check if pty channel exists already
        for elem in new_xml.devices.by_device_tag('channel'):
            if elem.type_name == channel_type_name:
                logging.debug(
                    "{0} channel already exists in guest. "
                    "No need to add new one".format(channel_type_name))
                return

        params = {
            'channel_type_name': channel_type_name,
            'target_type': target_type,
            'target_name': target_name
        }
        channel_xml = libvirt.create_channel_xml(params)
        virsh.attach_device(domain_opt=vm_name,
                            file_opt=channel_xml.xml,
                            flagstr="--config",
                            ignore_status=False)
        logging.debug("New VMXML with channel:\n%s", virsh.dumpxml(vm_name))

    def check_timeout_postcopy(params):
        """
        Check the vm state on target host after timeout
        when --postcopy and --timeout-postcopy are used.
        The vm state is expected as running.

        :param params: the parameters used
        """
        timeout = int(params.get("timeout_postcopy", 10))
        time.sleep(timeout + 1)
        remote_virsh_session = virsh.VirshPersistent(**remote_virsh_dargs)
        vm_state = results_stdout_52lts(
            remote_virsh_session.domstate(vm_name)).strip()
        if vm_state != "running":
            remote_virsh_session.close_session()
            test.fail(
                "After timeout '%s' seconds, "
                "the vm state on target host should "
                "be 'running', but '%s' found", timeout, vm_state)
        remote_virsh_session.close_session()

    def get_usable_compress_cache(pagesize):
        """
        Get a number which is bigger than pagesize and is power of two.

        :param pagesize: the given integer
        :return: an integer satisfying the criteria
        """
        def calculate(num):
            result = num & (num - 1)
            return (result == 0)

        item = pagesize
        found = False
        while (not found):
            item += 1
            found = calculate(item)
        logging.debug(
            "%d is smallest one that is bigger than '%s' and "
            "is power of 2", item, pagesize)
        return item

    def check_migration_res(result):
        """
        Check if the migration result is as expected

        :param result: the output of migration
        :raise: test.fail if test is failed
        """
        logging.info("Migration out: %s", results_stdout_52lts(result).strip())
        logging.info("Migration error: %s",
                     results_stderr_52lts(result).strip())

        if status_error:  # Migration should fail
            if err_msg:  # Special error messages are expected
                if not re.search(err_msg,
                                 results_stderr_52lts(result).strip()):
                    test.fail("Can not find the expected patterns '%s' in "
                              "output '%s'" %
                              (err_msg, results_stderr_52lts(result).strip()))
                else:
                    logging.debug("It is the expected error message")
            else:
                if int(result.exit_status) != 0:
                    logging.debug("Migration failure is expected result")
                else:
                    test.fail("Migration success is unexpected result")
        else:
            if int(result.exit_status) != 0:
                test.fail(results_stderr_52lts(result).strip())

    check_parameters(test, params)

    # Params for NFS shared storage
    shared_storage = params.get("migrate_shared_storage", "")
    if shared_storage == "":
        default_guest_asset = defaults.get_default_guest_os_info()['asset']
        default_guest_asset = "%s.qcow2" % default_guest_asset
        shared_storage = os.path.join(params.get("nfs_mount_dir"),
                                      default_guest_asset)
        logging.debug("shared_storage:%s", shared_storage)

    # params for migration connection
    params["virsh_migrate_desturi"] = libvirt_vm.complete_uri(
        params.get("migrate_dest_host"))
    # Params to update disk using shared storage
    params["disk_type"] = "file"
    params["disk_source_protocol"] = "netfs"
    params["mnt_path_name"] = params.get("nfs_mount_dir")

    # Local variables
    virsh_args = {"debug": True}
    virsh_opt = params.get("virsh_opt", "")
    server_ip = params.get("server_ip")
    server_user = params.get("server_user", "root")
    server_pwd = params.get("server_pwd")
    extra = params.get("virsh_migrate_extra")
    options = params.get("virsh_migrate_options")
    src_uri = params.get("virsh_migrate_connect_uri")
    dest_uri = params.get("virsh_migrate_desturi")
    log_file = params.get("libvirt_log", "/var/log/libvirt/libvirtd.log")
    check_complete_job = "yes" == params.get("check_complete_job", "no")
    config_libvirtd = "yes" == params.get("config_libvirtd", "no")
    contrl_index = params.get("new_contrl_index", None)
    asynch_migration = "yes" == params.get("asynch_migrate", "no")
    grep_str_remote_log = params.get("grep_str_remote_log", "")
    grep_str_local_log = params.get("grep_str_local_log", "")
    disable_verify_peer = "yes" == params.get("disable_verify_peer", "no")
    status_error = "yes" == params.get("status_error", "no")
    stress_in_vm = "yes" == params.get("stress_in_vm", "no")
    low_speed = params.get("low_speed", None)
    remote_virsh_dargs = {
        'remote_ip': server_ip,
        'remote_user': server_user,
        'remote_pwd': server_pwd,
        'unprivileged_user': None,
        'ssh_remote_auth': True
    }

    hpt_resize = params.get("hpt_resize", None)
    htm_state = params.get("htm_state", None)

    # For pty channel test
    add_channel = "yes" == params.get("add_channel", "no")
    channel_type_name = params.get("channel_type_name", None)
    target_type = params.get("target_type", None)
    target_name = params.get("target_name", None)
    cmd_run_in_remote_guest = params.get("cmd_run_in_remote_guest", None)
    cmd_run_in_remote_guest_1 = params.get("cmd_run_in_remote_guest_1", None)
    cmd_run_in_remote_host = params.get("cmd_run_in_remote_host", None)
    cmd_run_in_remote_host_1 = params.get("cmd_run_in_remote_host_1", None)
    cmd_run_in_remote_host_2 = params.get("cmd_run_in_remote_host_2", None)

    # For qemu command line checking
    qemu_check = params.get("qemu_check", None)

    xml_check_after_mig = params.get("guest_xml_check_after_mig", None)

    # params for cache matrix test
    cache = params.get("cache")
    remove_cache = "yes" == params.get("remove_cache", "no")
    err_msg = params.get("err_msg")

    arch = platform.machine()
    if any([hpt_resize, contrl_index, htm_state]) and 'ppc64' not in arch:
        test.cancel("The case is PPC only.")

    # For TLS
    tls_recovery = params.get("tls_auto_recovery", "yes")
    # qemu config
    qemu_conf_dict = None
    # libvirtd config
    libvirtd_conf_dict = None

    remote_virsh_session = None
    vm = None
    vm_session = None
    libvirtd_conf = None
    qemu_conf = None
    mig_result = None
    test_exception = None
    is_TestError = False
    is_TestFail = False
    is_TestSkip = False

    # Objects to be cleaned up in the end
    objs_list = []
    tls_obj = None

    # Local variables
    vm_name = params.get("migrate_main_vm")
    vm = env.get_vm(vm_name)
    vm.verify_alive()

    # For safety reasons, we'd better back up  xmlfile.
    new_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    orig_config_xml = new_xml.copy()
    if not orig_config_xml:
        test.error("Backing up xmlfile failed.")

    try:
        # Create a remote runner for later use
        runner_on_target = remote.RemoteRunner(host=server_ip,
                                               username=server_user,
                                               password=server_pwd)

        # Change the configuration files if needed before starting guest
        # For qemu.conf
        if extra.count("--tls"):
            # Setup TLS
            tls_obj = TLSConnection(params)
            if tls_recovery == "yes":
                objs_list.append(tls_obj)
                tls_obj.auto_recover = True
                tls_obj.conn_setup()
            if not disable_verify_peer:
                qemu_conf_dict = {"migrate_tls_x509_verify": "1"}
                # Setup qemu configure
                logging.debug("Configure the qemu")
                cleanup_libvirtd_log(log_file)
                qemu_conf = libvirt.customize_libvirt_config(
                    qemu_conf_dict,
                    config_type="qemu",
                    remote_host=True,
                    extra_params=params)
        # Setup libvirtd
        if config_libvirtd:
            logging.debug("Configure the libvirtd")
            cleanup_libvirtd_log(log_file)
            libvirtd_conf_dict = setup_libvirtd_conf_dict(params)
            libvirtd_conf = libvirt.customize_libvirt_config(
                libvirtd_conf_dict, remote_host=True, extra_params=params)
        # Prepare required guest xml before starting guest
        if contrl_index:
            new_xml.remove_all_device_by_type('controller')
            logging.debug("After removing controllers, current XML:\n%s\n",
                          new_xml)
            add_ctrls(new_xml, dev_index=contrl_index)

        if add_channel:
            attach_channel_xml()

        if hpt_resize:
            set_feature(new_xml, 'hpt', hpt_resize)

        if htm_state:
            set_feature(new_xml, 'htm', htm_state)

        if cache:
            params["driver_cache"] = cache
        if remove_cache:
            params["enable_cache"] = "no"

        # Change the disk of the vm to shared disk and then start VM
        libvirt.set_vm_disk(vm, params)

        if not vm.is_alive():
            vm.start()

        logging.debug("Guest xml after starting:\n%s",
                      vm_xml.VMXML.new_from_dumpxml(vm_name))

        # Check qemu command line after guest is started
        if qemu_check:
            check_content = qemu_check
            if hpt_resize:
                check_content = "%s%s" % (qemu_check, hpt_resize)
            if htm_state:
                check_content = "%s%s" % (qemu_check, htm_state)
            libvirt.check_qemu_cmd_line(check_content)

        # Check local guest network connection before migration
        vm_session = vm.wait_for_login()
        check_vm_network_accessed()

        # Preparation for the running guest before migration
        if hpt_resize and hpt_resize != 'disabled':
            trigger_hpt_resize(vm_session)

        if low_speed:
            control_migrate_speed(int(low_speed))

        if stress_in_vm:
            pkg_name = 'stress'
            logging.debug("Check if stress tool is installed")
            pkg_mgr = utils_package.package_manager(vm_session, pkg_name)
            if not pkg_mgr.is_installed(pkg_name):
                logging.debug("Stress tool will be installed")
                if not pkg_mgr.install():
                    test.error("Package '%s' installation fails" % pkg_name)

            stress_thread = threading.Thread(target=run_stress_in_vm, args=())
            stress_thread.start()

        if extra.count("timeout-postcopy"):
            func_name = check_timeout_postcopy
        if params.get("actions_during_migration"):
            func_name = do_actions_during_migrate
        if extra.count("comp-xbzrle-cache"):
            cache = get_usable_compress_cache(memory.get_page_size())
            extra = "%s %s" % (extra, cache)

        # For --postcopy enable
        postcopy_options = params.get("postcopy_options")
        if postcopy_options:
            extra = "%s %s" % (extra, postcopy_options)

        # Execute migration process
        if not asynch_migration:
            mig_result = do_migration(vm, dest_uri, options, extra)
        else:
            migration_test = libvirt.MigrationTest()

            logging.debug("vm.connect_uri=%s", vm.connect_uri)
            vms = [vm]
            try:
                migration_test.do_migration(vms,
                                            None,
                                            dest_uri,
                                            'orderly',
                                            options,
                                            thread_timeout=900,
                                            ignore_status=True,
                                            virsh_opt=virsh_opt,
                                            func=func_name,
                                            extra_opts=extra,
                                            func_params=params)
                mig_result = migration_test.ret
            except exceptions.TestFail as fail_detail:
                test.fail(fail_detail)
            except exceptions.TestSkipError as skip_detail:
                test.cancel(skip_detail)
            except exceptions.TestError as error_detail:
                test.error(error_detail)
            except Exception as details:
                mig_result = migration_test.ret
                logging.error(details)

        check_migration_res(mig_result)

        if add_channel:
            # Get the channel device source path of remote guest
            if not remote_virsh_session:
                remote_virsh_session = virsh.VirshPersistent(
                    **remote_virsh_dargs)
            file_path = tempfile.mktemp(dir=data_dir.get_tmp_dir())
            remote_virsh_session.dumpxml(vm_name,
                                         to_file=file_path,
                                         debug=True,
                                         ignore_status=True)
            local_vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
            local_vmxml.xmltreefile = xml_utils.XMLTreeFile(file_path)
            for elem in local_vmxml.devices.by_device_tag('channel'):
                logging.debug("Found channel device {}".format(elem))
                if elem.type_name == channel_type_name:
                    host_source = elem.source.get('path')
                    logging.debug(
                        "Remote guest uses {} for channel device".format(
                            host_source))
                    break
            remote_virsh_session.close_session()
            if not host_source:
                test.fail("Can not find source for %s channel on remote host" %
                          channel_type_name)

            # Prepare to wait for message on remote host from the channel
            cmd_parms = {
                'server_ip': server_ip,
                'server_user': server_user,
                'server_pwd': server_pwd
            }
            cmd_result = remote.run_remote_cmd(
                cmd_run_in_remote_host % host_source, cmd_parms,
                runner_on_target)

            # Send message from remote guest to the channel file
            remote_vm_obj = utils_test.RemoteVMManager(cmd_parms)
            vm_ip = vm.get_address()
            vm_pwd = params.get("password")
            remote_vm_obj.setup_ssh_auth(vm_ip, vm_pwd)
            cmd_result = remote_vm_obj.run_command(vm_ip,
                                                   cmd_run_in_remote_guest_1)
            remote_vm_obj.run_command(
                vm_ip, cmd_run_in_remote_guest %
                results_stdout_52lts(cmd_result).strip())
            logging.debug("Sending message is done")

            # Check message on remote host from the channel
            remote.run_remote_cmd(cmd_run_in_remote_host_1, cmd_parms,
                                  runner_on_target)
            logging.debug("Receiving message is done")
            remote.run_remote_cmd(cmd_run_in_remote_host_2, cmd_parms,
                                  runner_on_target)

        if check_complete_job:
            opts = " --completed"
            check_virsh_command_and_option("domjobinfo", opts)
            if extra.count("comp-xbzrle-cache"):
                params.update({'compare_to_value': cache // 1024})
            check_domjobinfo(params, option=opts)

        if grep_str_local_log:
            cmd = "grep -E '%s' %s" % (grep_str_local_log, log_file)
            cmdRes = process.run(cmd, shell=True, ignore_status=True)
            if cmdRes.exit_status:
                test.fail(results_stderr_52lts(cmdRes).strip())
        if grep_str_remote_log:
            cmd = "grep -E '%s' %s" % (grep_str_remote_log, log_file)
            cmd_parms = {
                'server_ip': server_ip,
                'server_user': server_user,
                'server_pwd': server_pwd
            }
            remote.run_remote_cmd(cmd, cmd_parms, runner_on_target)

        if xml_check_after_mig:
            if not remote_virsh_session:
                remote_virsh_session = virsh.VirshPersistent(
                    **remote_virsh_dargs)
            target_guest_dumpxml = results_stdout_52lts(
                remote_virsh_session.dumpxml(vm_name,
                                             debug=True,
                                             ignore_status=True)).strip()
            if hpt_resize:
                check_str = hpt_resize
            elif htm_state:
                check_str = htm_state
            if hpt_resize or htm_state:
                xml_check_after_mig = "%s'%s'" % (xml_check_after_mig,
                                                  check_str)
                if not re.search(xml_check_after_mig, target_guest_dumpxml):
                    remote_virsh_session.close_session()
                    test.fail("Fail to search '%s' in target guest XML:\n%s" %
                              (xml_check_after_mig, target_guest_dumpxml))

            if contrl_index:
                all_ctrls = re.findall(xml_check_after_mig,
                                       target_guest_dumpxml)
                if len(all_ctrls) != int(contrl_index) + 1:
                    remote_virsh_session.close_session()
                    test.fail(
                        "%s pci-root controllers are expected in guest XML, "
                        "but found %s" %
                        (int(contrl_index) + 1, len(all_ctrls)))
            remote_virsh_session.close_session()

        if int(mig_result.exit_status) == 0:
            server_session = remote.wait_for_login('ssh', server_ip, '22',
                                                   server_user, server_pwd,
                                                   r"[\#\$]\s*$")
            check_vm_network_accessed(server_session)
            server_session.close()
    except exceptions.TestFail as details:
        is_TestFail = True
        test_exception = details
    except exceptions.TestSkipError as details:
        is_TestSkip = True
        test_exception = details
    except exceptions.TestError as details:
        is_TestError = True
        test_exception = details
    except Exception as details:
        test_exception = details
    finally:
        logging.debug("Recover test environment")
        try:
            # Clean VM on destination
            vm.connect_uri = dest_uri
            cleanup_dest(vm)
            vm.connect_uri = src_uri

            logging.info("Recovery VM XML configration")
            orig_config_xml.sync()
            logging.debug("The current VM XML:\n%s",
                          orig_config_xml.xmltreefile)

            if remote_virsh_session:
                remote_virsh_session.close_session()

            if extra.count("--tls") and not disable_verify_peer:
                logging.debug("Recover the qemu configuration")
                libvirt.customize_libvirt_config(None,
                                                 config_type="qemu",
                                                 remote_host=True,
                                                 extra_params=params,
                                                 is_recover=True,
                                                 config_object=qemu_conf)

            if config_libvirtd:
                logging.debug("Recover the libvirtd configuration")
                libvirt.customize_libvirt_config(None,
                                                 remote_host=True,
                                                 extra_params=params,
                                                 is_recover=True,
                                                 config_object=libvirtd_conf)

            logging.info("Remove local NFS image")
            source_file = params.get("source_file")
            libvirt.delete_local_disk("file", path=source_file)

            if objs_list:
                for obj in objs_list:
                    logging.debug("Clean up local objs")
                    del obj

        except Exception as exception_detail:
            if (not test_exception and not is_TestError and not is_TestFail
                    and not is_TestSkip):
                raise exception_detail
            else:
                # if any of above exceptions has been raised, only print
                # error log here to avoid of hiding the original issue
                logging.error(exception_detail)
    # Check result
    if is_TestFail:
        test.fail(test_exception)
    if is_TestSkip:
        test.cancel(test_exception)
    if is_TestError:
        test.error(test_exception)
    if not test_exception:
        logging.info("Case execution is done.")
    else:
        test.error(test_exception)
示例#8
0
def run(test, params, env):
    """
    Test virsh migrate command.
    """

    def cleanup_vm(vm, vm_name='', uri=''):
        """
        Clean up vm in the src or destination host environment
        when doing the uni-direction migration.
        """
        # Backup vm name and uri
        uri_bak = vm.connect_uri
        vm_name_bak = vm.name

        # Destroy and undefine vm
        vm.connect_uri = uri if uri else uri_bak
        vm.name = vm_name if vm_name else vm_name_bak
        logging.info("Cleaning up VM %s on %s", vm.name, vm.connect_uri)
        if vm.is_alive():
            vm.destroy()
        if vm.is_persistent():
            vm.undefine()

        # Restore vm connect_uri
        vm.connect_uri = uri_bak
        vm.name = vm_name_bak

    # Check whether there are unset parameters
    for v in list(itervalues(params)):
        if isinstance(v, string_types) and v.count("EXAMPLE"):
            test.cancel("Please set real value for %s" % v)

    # Params for virsh migrate options:
    live_migration = params.get("live_migration") == "yes"
    offline_migration = params.get("offline_migration") == "yes"
    persistent = params.get("persistent") == "yes"
    undefinesource = params.get("undefinesource") == "yes"
    p2p = params.get("p2p") == "yes"
    tunnelled = params.get("tunnelled") == "yes"
    postcopy = params.get("postcopy") == "yes"
    dname = params.get("dname")
    xml_option = params.get("xml_option") == "yes"
    persistent_xml_option = params.get("persistent_xml_option") == "yes"
    extra_options = params.get("virsh_migrate_extra", "")

    if live_migration and not extra_options.count("--live"):
        extra_options = "%s --live" % extra_options
    if offline_migration and not extra_options.count("--offline"):
        extra_options = "%s --offline" % extra_options
    if persistent and not extra_options.count("--persistent"):
        extra_options = "%s --persistent" % extra_options
    if undefinesource and not extra_options.count("--undefinesource"):
        extra_options = "%s --undefinesource" % extra_options
    if p2p and not extra_options.count("--p2p"):
        extra_options = "%s --p2p" % extra_options
    if tunnelled and not extra_options.count("--tunnelled"):
        extra_options = "%s --tunnelled" % extra_options
    if tunnelled and not extra_options.count("--p2p"):
        extra_options = "%s --p2p" % extra_options
    if postcopy and not extra_options.count("--postcopy"):
        extra_options = "%s --postcopy" % extra_options
    if dname and not extra_options.count("--dname"):
        extra_options = "%s --dname %s" % (extra_options, dname)
    if xml_option:
        pass
    if persistent_xml_option and not extra_options.count("--persistent"):
        extra_options = "%s --persistent" % extra_options
    if persistent_xml_option:
        pass

    # Set param migrate_options in case it is used somewhere:
    params.setdefault("migrate_options", extra_options)

    # Params for postcopy migration
    postcopy_timeout = int(params.get("postcopy_migration_timeout", "180"))

    # Params for migrate hosts:
    server_cn = params.get("server_cn")
    client_cn = params.get("client_cn")
    migrate_source_host = client_cn if client_cn else params.get("migrate_source_host")
    migrate_dest_host = server_cn if server_cn else params.get("migrate_dest_host")

    # Params for migrate uri
    transport = params.get("transport", "tls")
    transport_port = params.get("transport_port")
    uri_port = ":%s" % transport_port if transport_port else ''
    hypervisor_driver = params.get("hypervisor_driver", "qemu")
    hypervisor_mode = params.get("hypervisor_mode", 'system')
    if "virsh_migrate_desturi" not in list(params.keys()):
        params["virsh_migrate_desturi"] = "%s+%s://%s%s/%s" % (hypervisor_driver,
                                                               transport,
                                                               migrate_dest_host,
                                                               uri_port,
                                                               hypervisor_mode)
    if "virsh_migrate_srcuri" not in list(params.keys()):
        params["virsh_migrate_srcuri"] = "%s:///%s" % (hypervisor_driver,
                                                       hypervisor_mode)
    dest_uri = params.get("virsh_migrate_desturi")
    src_uri = params.get("virsh_migrate_srcuri")

    # Params for src vm cfg:
    src_vm_cfg = params.get("src_vm_cfg")
    src_vm_status = params.get("src_vm_status")
    with_graphic_passwd = params.get("with_graphic_passwd")
    graphic_passwd = params.get("graphic_passwd")

    # For test result check
    cancel_exception = False
    fail_exception = False
    exception = False
    result_check_pass = True

    # Objects(SSH, TLS and TCP, etc) to be cleaned up in finally
    objs_list = []

    # VM objects for migration test
    vms = []

    try:
        # Get a MigrationTest() Object
        logging.debug("Get a MigrationTest()  object")
        obj_migration = libvirt.MigrationTest()

        # Setup libvirtd remote connection TLS connection env
        if transport == "tls":
            tls_obj = TLSConnection(params)
            # Setup CA, server(on dest host) and client(on src host)
            tls_obj.conn_setup()
            # Add tls_obj to objs_list
            objs_list.append(tls_obj)

        # Enable libvirtd remote connection transport port
        if transport == 'tls':
            transport_port = '16514'
        elif transport == 'tcp':
            transport_port = '16509'
        obj_migration.migrate_pre_setup(dest_uri, params, ports=transport_port)

        # Back up vm name for recovery in finally
        vm_name_backup = params.get("migrate_main_vm")

        # Get a vm object for migration
        logging.debug("Get a vm object for migration")
        vm = env.get_vm(vm_name_backup)

        # Back up vm xml for recovery in finally
        logging.debug("Backup vm xml before migration")
        vm_xml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm.name)
        if not vm_xml_backup:
            test.error("Backing up xmlfile failed.")

        # Prepare shared disk in vm xml for live migration:
        # Change the source of the first disk of vm to shared disk
        if live_migration:
            logging.debug("Prepare shared disk in vm xml for live migration")
            storage_type = params.get("storage_type")
            if storage_type == 'nfs':
                logging.debug("Prepare nfs shared disk in vm xml")
                nfs_mount_dir = params.get("nfs_mount_dir")
                libvirt.update_vm_disk_source(vm.name, nfs_mount_dir)
                libvirt.update_vm_disk_driver_cache(vm.name, driver_cache="none")
            else:
                # TODO:Other storage types
                test.cancel("Other storage type is not supported for now")
                pass

        # Prepare graphic password in vm xml
        if with_graphic_passwd in ["yes", "no"]:
            logging.debug("Set VM graphic passwd in vm xml")
            # Get graphics list in vm xml
            vmxml_tmp = vm_xml.VMXML.new_from_inactive_dumpxml(vm.name)
            graphics_list = vmxml_tmp.get_graphics_devices

            if not graphics_list:
                # Add spice graphic with passwd to vm xml
                logging.debug("Add spice graphic to vm xml")
                graphics.Graphics.add_graphic(vm.name, graphic_passwd, "spice")
            elif graphic_passwd:
                # Graphics already exist in vm xml and passwd is required
                # Add passwd to the first graphic device in vm xml
                logging.debug("Add graphic passwd to vm xml")
                vm_xml.VMXML.add_security_info(vmxml_tmp, graphic_passwd)
                vmxml_tmp.sync()
            else:
                # Graphics already exist in vm xml and non-passwd is required
                # Do nothing here as passwd has been removed by new_from_inactive_dumpxml()
                pass

        # Prepare for required src vm status.
        logging.debug("Turning %s into certain state.", vm.name)
        if src_vm_status == "running" and not vm.is_alive():
            vm.start()
        elif src_vm_status == "shut off" and not vm.is_dead():
            vm.destroy()

        # Prepare for required src vm persistency.
        logging.debug("Prepare for required src vm persistency")
        if src_vm_cfg == "persistent" and not vm.is_persistent():
            logging.debug("Make src vm persistent")
            vm_xml_backup.define()
        elif src_vm_cfg == "transient" and vm.is_persistent():
            logging.debug("Make src vm transient")
            vm.undefine()

        # Prepare for postcopy migration: install and run stress in VM
        if postcopy and src_vm_status == "running":
            logging.debug("Install and run stress in vm for postcopy migration")
            pkg_name = 'stress'

            # Get a vm session
            logging.debug("Get a vm session")
            vm_session = vm.wait_for_login()
            if not vm_session:
                test.error("Can't get a vm session successfully")

            # Install package stress if it is not installed in vm
            logging.debug("Check if stress tool is installed for postcopy migration")
            pkg_mgr = utils_package.package_manager(vm_session, pkg_name)
            if not pkg_mgr.is_installed(pkg_name):
                logging.debug("Stress tool will be installed")
                if not pkg_mgr.install():
                    test.error("Package '%s' installation fails" % pkg_name)

            # Run stress in vm
            logging.debug("Run stress in vm")
            stress_args = params.get("stress_args")
            vm_session.cmd('stress %s' % stress_args)

        # Prepare for --xml <updated_xml_file>.
        if xml_option:
            logging.debug("Preparing new xml file for --xml option.")

            # Get the vm xml
            vmxml_tmp = vm_xml.VMXML.new_from_dumpxml(vm.name,
                                                      "--security-info --migratable")

            # Update something in the xml file: e.g. title
            # Note: VM ABI shall not be broken when migrating with updated_xml
            updated_title = "VM Title in updated xml"
            vmxml_tmp.title = updated_title

            # Add --xml to migrate extra_options
            extra_options = ("%s --xml=%s" % (extra_options, vmxml_tmp.xml))

        # Prepare for --persistent-xml <updated_xml_file>.
        if persistent_xml_option:
            logging.debug("Preparing new xml file for --persistent-xml option.")

            # Get the vm xml
            vmxml_persist_tmp = vm_xml.VMXML.new_from_inactive_dumpxml(vm.name,
                                                                       "--security-info")

            # Update something in the xml file: e.g. title
            # Note: VM ABI shall not be broken when migrating with updated_xml
            updated_persist_title = "VM Title in updated persist xml"
            vmxml_persist_tmp.title = updated_persist_title

            # Add --persistent-xml to migrate extra_options
            extra_options = ("%s --persistent-xml=%s" % (extra_options, vmxml_persist_tmp.xml))

        # Prepare host env: clean up vm on dest host
        logging.debug("Clean up vm on dest host before migration")
        if dname:
            cleanup_vm(vm, dname, dest_uri)
        else:
            cleanup_vm(vm, vm.name, dest_uri)

        # Prepare host env: set selinux state before migration
        logging.debug("Set selinux to enforcing before migration")
        utils_selinux.set_status(params.get("selinux_state", "enforcing"))

        # Check vm network connectivity by ping before migration
        logging.debug("Check vm network before migration")
        if src_vm_status == "running":
            obj_migration.ping_vm(vm, params)

        # Get VM uptime before migration
        if src_vm_status == "running":
            vm_uptime = vm.uptime()
            logging.info("Check VM uptime before migration: %s", vm_uptime)

        # Print vm active xml before migration
        process.system_output("virsh dumpxml %s --security-info" %
                              vm.name, shell=True)

        # Print vm inactive xml before migration
        process.system_output("virsh dumpxml %s --security-info --inactive" %
                              vm.name, shell=True)

        # Do uni-direction migration.
        # NOTE: vm.connect_uri will be set to dest_uri once migration is complete successfully
        logging.debug("Start to do migration test.")
        vms.append(vm)
        if postcopy:
            # Monitor the qemu monitor event of "postcopy-active" for postcopy migration
            logging.debug("Monitor the qemu monitor event for postcopy migration")
            virsh_session = virsh.VirshSession(virsh_exec=virsh.VIRSH_EXEC, auto_close=True)
            cmd = "qemu-monitor-event --loop --domain %s --event MIGRATION" % vm.name
            virsh_session.sendline(cmd)

            # Do live migration and switch to postcopy by "virsh migrate-postcopy"
            logging.debug("Start to do postcopy migration")
            obj_migration.do_migration(vms, src_uri, dest_uri, "orderly",
                                       options="",
                                       thread_timeout=postcopy_timeout,
                                       ignore_status=False,
                                       func=virsh.migrate_postcopy,
                                       extra_opts=extra_options,
                                       shell=True)

            # Check "postcopy-active" event after postcopy migration
            logging.debug("Check postcopy-active event after postcopy migration")
            virsh_session.send_ctrl("^C")
            events_output = virsh_session.get_stripped_output()
            logging.debug("events_output are %s", events_output)
            pattern = "postcopy-active"
            if not re.search(pattern, events_output):
                test.fail("Migration didn't switch to postcopy mode")
                virsh_session.close()
            virsh_session.close()

        else:
            logging.debug("Start to do precopy migration")
            obj_migration.do_migration(vms, src_uri, dest_uri, "orderly",
                                       options="",
                                       ignore_status=False,
                                       extra_opts=extra_options)

        """
        # Check src vm after migration
        # First, update vm name and connect_uri to src vm's
        """
        vm.name = vm_name_backup
        vm.connect_uri = src_uri
        logging.debug("Start to check %s state on src %s after migration.",
                      vm.name, src_uri)

        # Check src vm status after migration: existence, running, shutoff, etc
        logging.debug("Check vm status on source after migration")
        if offline_migration:
            if src_vm_status == "shut off" and undefinesource:
                if vm.exists():
                    result_check_pass = False
                    logging.error("Src vm should not exist after offline migration"
                                  " with --undefinesource")
                    logging.debug("Src vm state is %s" % vm.state())
            elif not obj_migration.check_vm_state(vm.name, src_vm_status, uri=vm.connect_uri):
                result_check_pass = False
                logging.error("Src vm should be %s after offline migration" % src_vm_status)
                logging.debug("Src vm state is %s" % vm.state())

        if live_migration:
            if not undefinesource and src_vm_cfg == "persistent":
                if not obj_migration.check_vm_state(vm.name, "shut off", uri=vm.connect_uri):
                    result_check_pass = False
                    logging.error("Src vm should be shutoff after live migration")
                    logging.debug("Src vm state is %s" % vm.state())
            elif vm.exists():
                result_check_pass = False
                logging.error("Src vm should not exist after live migration")
                logging.debug("Src vm state is %s" % vm.state())

        # Check src vm status after migration: persistency
        logging.debug("Check vm persistency on source after migration")
        if src_vm_cfg == "persistent" and not undefinesource:
            if not vm.is_persistent():
                # Src vm should be persistent after migration without --undefinesource
                result_check_pass = False
                logging.error("Src vm should be persistent after migration")
        elif vm.is_persistent():
            result_check_pass = False
            logging.error("Src vm should be not be persistent after migration")

        """
        # Check dst vm after migration
        # First, update vm name and connect_uri to dst vm's
        """
        vm.name = dname if dname else vm.name
        vm.connect_uri = dest_uri
        logging.debug("Start to check %s state on target %s after migration.",
                      vm.name, vm.connect_uri)

        # Check dst vm status after migration: running, shutoff, etc
        logging.debug("Check vm status on target after migration")
        if live_migration:
            if not obj_migration.check_vm_state(vm.name, src_vm_status, uri=vm.connect_uri):
                result_check_pass = False
                logging.error("Dst vm should be %s after live migration", src_vm_status)
        elif vm.is_alive():
            result_check_pass = False
            logging.error("Dst vm should not be alive after offline migration")

        # Print vm active xml after migration
        process.system_output("virsh -c %s dumpxml %s --security-info" %
                              (vm.connect_uri, vm.name), shell=True)

        # Print vm inactive xml after migration
        process.system_output("virsh -c %s dumpxml %s --security-info --inactive" %
                              (vm.connect_uri, vm.name), shell=True)

        # Check dst vm xml after migration
        logging.debug("Check vm xml on target after migration")
        remote_virsh = virsh.Virsh(uri=vm.connect_uri)
        vmxml_active_tmp = vm_xml.VMXML.new_from_dumpxml(vm.name, "--security-info",
                                                         remote_virsh)
        vmxml_inactive_tmp = vm_xml.VMXML.new_from_inactive_dumpxml(vm.name,
                                                                    "--security-info",
                                                                    remote_virsh)
        # Check dst vm xml after migration: --xml <updated_xml_file>
        if xml_option and not offline_migration:
            logging.debug("Check vm active xml for --xml")
            if not vmxml_active_tmp.title == updated_title:
                print("vmxml active tmp title is %s" % vmxml_active_tmp.title)
                result_check_pass = False
                logging.error("--xml doesn't take effect in migration")

        if xml_option and offline_migration:
            logging.debug("Check vm inactive xml for --xml")
            if not vmxml_active_tmp.title == updated_title:
                result_check_pass = False
                logging.error("--xml doesn't take effect in migration")

        # Check dst vm xml after migration: --persistent-xml <updated_xml_file>
        if persistent_xml_option:
            logging.debug("Check vm inactive xml for --persistent-xml")
            if not offline_migration and not vmxml_inactive_tmp.title == updated_persist_title:
                print("vmxml inactive tmp title is %s" % vmxml_inactive_tmp.title)
                result_check_pass = False
                logging.error("--persistent-xml doesn't take effect in live migration")
            elif offline_migration and vmxml_inactive_tmp.title == updated_persist_title:
                result_check_pass = False
                logging.error("--persistent-xml should not take effect in offline "
                              "migration")

        # Check dst vm xml after migration: graphic passwd
        if with_graphic_passwd == "yes":
            logging.debug("Check graphic passwd in vm xml after migration")
            graphic_active = vmxml_active_tmp.devices.by_device_tag('graphics')[0]
            graphic_inactive = vmxml_inactive_tmp.devices.by_device_tag('graphics')[0]
            try:
                logging.debug("Check graphic passwd in active vm xml")
                if graphic_active.passwd != graphic_passwd:
                    result_check_pass = False
                    logging.error("Graphic passwd in active xml of dst vm should be %s",
                                  graphic_passwd)

                logging.debug("Check graphic passwd in inactive vm xml")
                if graphic_inactive.passwd != graphic_passwd:
                    result_check_pass = False
                    logging.error("Graphic passwd in inactive xml of dst vm should be %s",
                                  graphic_passwd)
            except LibvirtXMLNotFoundError:
                result_check_pass = False
                logging.error("Graphic passwd lost in dst vm xml")

        # Check dst vm uptime, network, etc after live migration
        if live_migration:
            # Check dst VM uptime after migration
            # Note: migrated_vm_uptime should be greater than the vm_uptime got
            # before migration
            migrated_vm_uptime = vm.uptime(connect_uri=dest_uri)
            logging.info("Check VM uptime in destination after "
                         "migration: %s", migrated_vm_uptime)
            if not migrated_vm_uptime:
                result_check_pass = False
                logging.error("Failed to check vm uptime after migration")
            elif vm_uptime > migrated_vm_uptime:
                result_check_pass = False
                logging.error("VM went for a reboot while migrating to destination")

            # Check dst VM network connectivity after migration
            logging.debug("Check VM network connectivity after migrating")
            obj_migration.ping_vm(vm, params, uri=dest_uri)

            # Restore vm.connect_uri as it is set to src_uri in ping_vm()
            logging.debug("Restore vm.connect_uri as it is set to src_uri in ping_vm()")
            vm.connect_uri = dest_uri

        # Check dst vm status after migration: persistency
        logging.debug("Check vm persistency on target after migration")
        if persistent:
            if not vm.is_persistent():
                result_check_pass = False
                logging.error("Dst vm should be persistent after migration "
                              "with --persistent")
                time.sleep(10)
            # Destroy vm and check vm state should be shutoff. BZ#1076354
            vm.destroy()
            if not obj_migration.check_vm_state(vm.name, "shut off", uri=vm.connect_uri):
                result_check_pass = False
                logging.error("Dst vm with name %s should exist and be shutoff", vm.name)
        elif vm.is_persistent():
            result_check_pass = False
            logging.error("Dst vm should not be persistent after migration "
                          "without --persistent")

    finally:
        logging.debug("Start to clean up env")
        # Clean up vm on dest and src host
        for vm in vms:
            cleanup_vm(vm, vm_name=dname, uri=dest_uri)
            cleanup_vm(vm, vm_name=vm_name_backup, uri=src_uri)

        # Recover source vm defination (just in case).
        logging.info("Recover vm defination on source")
        if vm_xml_backup:
            vm_xml_backup.define()

        # Clean up SSH, TCP, TLS test env
        if objs_list and len(objs_list) > 0:
            logging.debug("Clean up test env: SSH, TCP, TLS, etc")
            for obj in objs_list:
                obj.auto_recover = True
                del obj

        # Disable libvirtd remote connection transport port
        obj_migration.migrate_pre_setup(dest_uri, params, cleanup=True, ports=transport_port)

        # Check test result.
        if not result_check_pass:
            test.fail("Migration succeed, but some check points didn't pass."
                      "Please check the error log for details")
示例#9
0
def run(test, params, env):
    """
    Test KVM migration scenarios
    """
    migrate_options = params.get("migrate_options", "")
    migrate_postcopy = params.get("migrate_postcopy", "")
    migrate_dest_ip = params.get("migrate_dest_host")
    nfs_mount_path = params.get("nfs_mount_dir")
    migrate_start_state = params.get("migrate_start_state", "paused")
    machine_types = params.get("migrate_all_machine_types", "no") == "yes"
    migrate_back = params.get("migrate_back", "yes") == "yes"
    postcopy_func = None
    if migrate_postcopy:
        postcopy_func = virsh.migrate_postcopy
    migrate_type = params.get("migrate_type", "orderly")
    vm_state = params.get("migrate_vm_state", "running")
    ping_count = int(params.get("ping_count", 10))
    thread_timeout = int(params.get("thread_timeout", 3600))

    vm_list = env.get_all_vms()

    # Params to update disk using shared storage
    params["disk_type"] = "file"
    params["disk_source_protocol"] = "netfs"
    params["mnt_path_name"] = nfs_mount_path

    src_uri = "qemu:///system"
    dest_uri = libvirt_vm.complete_uri(params["server_ip"])

    vmxml_dict = {}
    vmxml_machine = {}

    machine_list = params.get("machine_type").split()
    virt_type = params.get("hvm_or_pv", "hvm")
    arch = params.get("vm_arch_name", platform.machine())

    # Get all supported machine types in source
    if machine_types:
        machine_list = libvirt.get_machine_types(arch, virt_type)
        if not machine_list:
            test.cancel("Libvirt doesn't support %s virtualization on "
                        "arch %s in source host" % (virt_type, arch))
        logging.debug("Supported machine types in source: %s",
                      ", ".join(map(str, machine_list)))

        # Get all supported machine types in target host
        virsh_remote = virsh.Virsh(uri=dest_uri)
        remote_machine_list = libvirt.get_machine_types(
            arch, virt_type, virsh_instance=virsh_remote)
        if not remote_machine_list:
            test.cancel("Libvirt doesn't support %s virtualization on "
                        "arch %s in target host" % (virt_type, arch))
        logging.debug("Supported machine types in target: %s",
                      ", ".join(map(str, remote_machine_list)))

        # use machine types supported by both source and target host
        machine_list = list(
            set(machine_list).intersection(remote_machine_list))
        if not machine_list:
            test.cancel(
                "Migration not supported as source machine type and target "
                "machine type doesn't match")
        logging.debug(
            "Supported machine types that are common in  source and "
            "target are: %s", ", ".join(map(str, machine_list)))

    migrate_setup = libvirt.MigrationTest()
    # Perform migration with each machine type
    try:
        for vm in vm_list:
            vmxml_dict[vm.name] = libvirt_xml.vm_xml.VMXML.new_from_dumpxml(
                vm.name)
            params["source_dist_img"] = "%s-nfs-img" % vm.name
            if vm.is_alive():
                vm.destroy()
            libvirt.set_vm_disk(vm, params)
        info_list = []
        for machine in machine_list:
            uptime = {}
            for vm in vm_list:
                vmxml_machine[
                    vm.name] = libvirt_xml.vm_xml.VMXML.new_from_dumpxml(
                        vm.name)
                # update machine type
                update_machinetype(test, vmxml_machine[vm.name], machine)
                if vm.is_alive():
                    vm.destroy()
                if "offline" not in migrate_options:
                    vm.start()
                    vm.wait_for_login()
                    uptime[vm.name] = vm.uptime()
                    logging.info("uptime of VM %s: %s", vm.name,
                                 uptime[vm.name])
                    migrate_setup.ping_vm(vm, params, ping_count=ping_count)
            try:
                logging.debug(
                    "Migrating source to target from %s to %s "
                    "with machine type: %s", src_uri, dest_uri, machine)
                # Initialize it to avoid current iteration fail to not affect
                # next iteration
                migrate_setup.RET_MIGRATION = True
                migrate_setup.do_migration(
                    vm_list,
                    src_uri,
                    dest_uri,
                    migrate_type,
                    migrate_options,
                    func=postcopy_func,
                    migrate_start_state=migrate_start_state,
                    thread_timeout=thread_timeout)
            except Exception as info:
                info_list.append(info)
                logging.error(
                    "Failed to migrate VM from source to target "
                    "%s to %s with machine type: %s", dest_uri, src_uri,
                    machine)

            if migrate_setup.RET_MIGRATION:
                uptime = migrate_setup.post_migration_check(vm_list,
                                                            params,
                                                            uptime,
                                                            uri=dest_uri)
                if migrate_back:
                    migrate_setup.migrate_pre_setup(src_uri, params)
                    logging.debug(
                        "Migrating back to source from %s to %s "
                        "with machine type: %s", dest_uri, src_uri, machine)
                    try:
                        migrate_setup.do_migration(
                            vm_list,
                            dest_uri,
                            src_uri,
                            migrate_type,
                            options=migrate_options,
                            func=postcopy_func,
                            migrate_start_state=migrate_start_state,
                            thread_timeout=thread_timeout,
                            virsh_uri=dest_uri)
                    except Exception as info:
                        logging.error(
                            "Failed to migrate back to source from "
                            "%s to %s with machine type: %s", dest_uri,
                            src_uri, machine)
                        info_list.append(info)
                        cleanup_vm(vm_list, vmxml_machine, migrate_setup,
                                   src_uri, dest_uri)
                        continue
                    uptime = migrate_setup.post_migration_check(
                        vm_list, params, uptime)
                    migrate_setup.migrate_pre_setup(src_uri,
                                                    params,
                                                    cleanup=True)
        if info_list:
            test.fail(" |".join(map(str, info_list)))
    finally:
        logging.debug("cleanup the migration setup in source/destination")
        cleanup_vm(vm_list, vmxml_dict, migrate_setup, src_uri, dest_uri)
        for source_file in params.get("source_file_list", []):
            libvirt.delete_local_disk("file", path=source_file)
示例#10
0
def copied_migration(vm, params, blockjob_type=None, block_target="vda"):
    """
    Migrate vms with storage copied under some stress.
    And during it, some qemu-monitor-command will be sent.
    """
    dest_uri = params.get("migrate_dest_uri")
    remote_host = params.get("migrate_dest_host")
    copy_option = params.get("copy_storage_option", "")
    username = params.get("remote_user")
    password = params.get("migrate_dest_pwd")
    timeout = int(params.get("thread_timeout", 1200))
    options = "--live %s --unsafe" % copy_option

    # Get vm ip for remote checking
    if vm.is_dead():
        vm.start()
    vm.wait_for_login()
    vms_ip = {}
    vms_ip[vm.name] = vm.get_address()
    logging.debug("VM %s IP: %s", vm.name, vms_ip[vm.name])

    # Start to load stress
    stress_type = params.get("migrate_stress_type")
    if stress_type == "cpu":
        params['stress_args'] = "--cpu 2 --quiet --timeout 60"
    elif stress_type == "memory":
        params['stress_args'] = "--vm 2 --vm-bytes 256M --vm-keep --timeout 60"
    if stress_type is not None:
        utils_test.load_stress("stress_in_vms", [vm], params)

    cp_mig = utlv.MigrationTest()
    migration_thread = threading.Thread(target=cp_mig.thread_func_migration,
                                        args=(vm, dest_uri, options))
    migration_thread.start()
    # Wait for migration launched
    time.sleep(5)
    job_ret = virsh.domjobinfo(vm.name, debug=True)
    if job_ret.exit_status:
        raise error.TestError("Prepare migration for blockjob failed.")

    # Execute some qemu monitor commands
    pause_cmd = "block-job-pause %s" % block_target
    resume_cmd = "block-job-resume %s" % block_target
    cancel_cmd = "block-job-cancel %s" % block_target
    complete_cmd = "block-job-complete %s" % block_target

    blockjob_failures = []
    try:
        if blockjob_type == "cancel":
            virsh.qemu_monitor_command(vm.name,
                                       cancel_cmd,
                                       debug=True,
                                       ignore_status=False)
        elif blockjob_type == "pause_resume":
            virsh.qemu_monitor_command(vm.name,
                                       pause_cmd,
                                       debug=True,
                                       ignore_status=False)
            # TODO: Check whether it is paused.
            virsh.qemu_monitor_command(vm.name,
                                       resume_cmd,
                                       debug=True,
                                       ignore_status=False)
        elif blockjob_type == "complete":
            virsh.qemu_monitor_command(vm.name,
                                       complete_cmd,
                                       debug=True,
                                       ignore_status=False)
    except error.CmdError, detail:
        blockjob_failures.append(str(detail))
def run(test, params, env):
    """
    Test command: virsh migrate-setspeed <domain> <bandwidth>
                  virsh migrate-getspeed <domain>.

    1) Prepare test environment.
    2) Try to set the maximum migration bandwidth (in MiB/s)
       for a domain through valid and invalid command.
    3) Recover test environment.
    4) Check result.
    """

    # MAIN TEST CODE ###
    # Process cartesian parameters
    vm_name = params.get("migrate_main_vm")
    bandwidth = params.get("bandwidth", "default")
    options_extra = params.get("options_extra", "")
    status_error = "yes" == params.get("status_error", "yes")
    virsh_dargs = {'debug': True}
    # Checking uris for migration
    twice_migration = "yes" == params.get("twice_migration", "no")
    if twice_migration:
        src_uri = params.get("migrate_src_uri", "qemu+ssh://EXAMPLE/system")
        dest_uri = params.get("migrate_dest_uri", "qemu+ssh://EXAMPLE/system")
        if src_uri.count('///') or src_uri.count('EXAMPLE'):
            raise error.TestNAError("The src_uri '%s' is invalid" % src_uri)
        if dest_uri.count('///') or dest_uri.count('EXAMPLE'):
            raise error.TestNAError("The dest_uri '%s' is invalid" % dest_uri)

    bz1083483 = False
    if bandwidth == "zero":
        expected_value = 0
    elif bandwidth == "one":
        expected_value = 1
    elif bandwidth == "negative":
        expected_value = -1
        bz1083483 = True
    elif bandwidth == "default":
        expected_value = DEFAULT
    elif bandwidth == "UINT32_MAX":
        expected_value = UINT32_MiB
    elif bandwidth == "INT64_MAX":
        expected_value = INT64_MiB
    elif bandwidth == "UINT64_MAX":
        expected_value = UINT64_MiB
        bz1083483 = True
    elif bandwidth == "INVALID_VALUE":
        expected_value = INT64_MiB + 1
        bz1083483 = True
    else:
        expected_value = bandwidth

    orig_value = virsh.migrate_getspeed(vm_name).stdout.strip()

    def set_get_speed(vm_name,
                      expected_value,
                      status_error=False,
                      options_extra="",
                      **virsh_dargs):
        """Set speed and check its result"""
        result = virsh.migrate_setspeed(vm_name, expected_value, options_extra,
                                        **virsh_dargs)
        status = result.exit_status
        err = result.stderr.strip()

        # Check status_error
        if status_error:
            if status == 0 or err == "":
                # Without code for bz1083483 applied, this will succeed
                # when it shouldn't be succeeding.
                if bz1083483 and not libvirt_version.version_compare(1, 2, 4):
                    raise error.TestNAError("bz1083483 should result in fail")
                else:
                    raise error.TestFail("Expect fail, but run successfully!")

            # no need to perform getspeed if status_error is true
            return
        else:
            if status != 0 or err != "":
                raise error.TestFail("Run failed with right "
                                     "virsh migrate-setspeed command")

        result = virsh.migrate_getspeed(vm_name, **virsh_dargs)
        status = result.exit_status
        actual_value = result.stdout.strip()
        err = result.stderr.strip()

        if status != 0 or err != "":
            raise error.TestFail("Run failed with virsh migrate-getspeed")

        logging.info("The expected bandwidth is %s MiB/s, "
                     "the actual bandwidth is %s MiB/s" %
                     (expected_value, actual_value))

        if int(actual_value) != int(expected_value):
            raise error.TestFail("Bandwidth value from getspeed "
                                 "is different from expected value "
                                 "set by setspeed")

    def verify_migration_speed(test, params, env):
        """
        Check if migration speed is effective with twice migration.
        """
        vms = env.get_all_vms()
        src_uri = params.get("migrate_src_uri", "qemu+ssh://EXAMPLE/system")
        dest_uri = params.get("migrate_dest_uri", "qemu+ssh://EXAMPLE/system")

        if not len(vms):
            raise error.TestNAError("Please provide migrate_vms for test.")

        if src_uri.count('///') or src_uri.count('EXAMPLE'):
            raise error.TestNAError("The src_uri '%s' is invalid" % src_uri)

        if dest_uri.count('///') or dest_uri.count('EXAMPLE'):
            raise error.TestNAError("The dest_uri '%s' is invalid" % dest_uri)

        remote_host = params.get("migrate_dest_host")
        username = params.get("migrate_dest_user", "root")
        password = params.get("migrate_dest_pwd")
        # Config ssh autologin for remote host
        ssh_key.setup_ssh_key(remote_host, username, password, port=22)

        # Check migrated vms' state
        for vm in vms:
            if vm.is_dead():
                vm.start()

        load_vm_names = params.get("load_vms").split()
        # vms for load
        load_vms = []
        for vm_name in load_vm_names:
            load_vms.append(
                libvirt_vm.VM(vm_name, params, test.bindir,
                              env.get("address_cache")))
        params["load_vms"] = load_vms

        bandwidth = int(params.get("bandwidth", "4"))
        stress_type = params.get("stress_type", "load_vms_booting")
        migration_type = params.get("migration_type", "orderly")
        thread_timeout = int(params.get("thread_timeout", "60"))
        delta = float(params.get("allowed_delta", "0.1"))
        virsh_migrate_timeout = int(params.get("virsh_migrate_timeout", "60"))
        # virsh migrate options
        virsh_migrate_options = "--live --unsafe --timeout %s" % virsh_migrate_timeout
        # Migrate vms to remote host
        mig_first = utlv.MigrationTest()
        virsh_dargs = {"debug": True}
        for vm in vms:
            set_get_speed(vm.name, bandwidth, virsh_dargs=virsh_dargs)
            vm.wait_for_login()
        utils_test.load_stress(stress_type, vms, params)
        mig_first.do_migration(vms,
                               src_uri,
                               dest_uri,
                               migration_type,
                               options=virsh_migrate_options,
                               thread_timeout=thread_timeout)
        for vm in vms:
            mig_first.cleanup_dest_vm(vm, None, dest_uri)
            # Keep it clean for second migration
            if vm.is_alive():
                vm.destroy()

        # Migrate vms again with new bandwidth
        second_bandwidth = params.get("second_bandwidth", "times")
        if second_bandwidth == "half":
            second_bandwidth = bandwidth / 2
            speed_times = 2
        elif second_bandwidth == "times":
            second_bandwidth = bandwidth * 2
            speed_times = 0.5
        elif second_bandwidth == "same":
            second_bandwidth = bandwidth
            speed_times = 1

        # Migrate again
        for vm in vms:
            if vm.is_dead():
                vm.start()
            vm.wait_for_login()
            set_get_speed(vm.name, second_bandwidth, virsh_dargs=virsh_dargs)
        utils_test.load_stress(stress_type, vms, params)
        mig_second = utlv.MigrationTest()
        mig_second.do_migration(vms,
                                src_uri,
                                dest_uri,
                                migration_type,
                                options=virsh_migrate_options,
                                thread_timeout=thread_timeout)
        for vm in vms:
            mig_second.cleanup_dest_vm(vm, None, dest_uri)

        fail_info = []
        # Check whether migration failed
        if len(fail_info):
            raise error.TestFail(fail_info)

        for vm in vms:
            first_time = mig_first.mig_time[vm.name]
            second_time = mig_second.mig_time[vm.name]
            logging.debug(
                "Migration time for %s:\n"
                "Time with Bandwidth '%s' first: %s\n"
                "Time with Bandwidth '%s' second: %s", vm.name, bandwidth,
                first_time, second_bandwidth, second_time)
            shift = float(abs(first_time * speed_times -
                              second_time)) / float(second_time)
            logging.debug("Shift:%s", shift)
            if delta < shift:
                fail_info.append(
                    "Spent time for migrating %s is intolerable." % vm.name)

        # Check again for speed result
        if len(fail_info):
            raise error.TestFail(fail_info)

    # Run test case
    try:
        set_get_speed(vm_name, expected_value, status_error, options_extra,
                      **virsh_dargs)
        if twice_migration:
            verify_migration_speed(test, params, env)
        else:
            set_get_speed(vm_name, expected_value, status_error, options_extra,
                          **virsh_dargs)
    finally:
        #restore bandwidth to default
        virsh.migrate_setspeed(vm_name, orig_value)
        if twice_migration:
            for vm in env.get_all_vms():
                utlv.MigrationTest().cleanup_dest_vm(vm, src_uri, dest_uri)
                if vm.is_alive():
                    vm.destroy(gracefully=False)
示例#12
0
def run(test, params, env):
    """
    Test virsh migrate command.
    """

    def set_feature(vmxml, feature, value):
        """
        Set guest features for PPC

        :param state: the htm status
        :param vmxml: guest xml
        """
        features_xml = vm_xml.VMFeaturesXML()
        if feature == 'hpt':
            hpt_xml = vm_xml.VMFeaturesHptXML()
            hpt_xml.resizing = value
            features_xml.hpt = hpt_xml
        elif feature == 'htm':
            features_xml.htm = value
        vmxml.features = features_xml
        vmxml.sync()

    def trigger_hpt_resize(session):
        """
        Check the HPT order file and dmesg

        :param session: the session to guest

        :raise: test.fail if required message is not found
        """
        hpt_order_path = "/sys/kernel/debug/powerpc/hpt_order"
        hpt_order = session.cmd_output('cat %s' % hpt_order_path).strip()
        hpt_order = int(hpt_order)
        logging.info('Current hpt_order is %d', hpt_order)
        hpt_order += 1
        cmd = 'echo %d > %s' % (hpt_order, hpt_order_path)
        cmd_result = session.cmd_status_output(cmd)
        result = process.CmdResult(stderr=cmd_result[1],
                                   stdout=cmd_result[1],
                                   exit_status=cmd_result[0])
        libvirt.check_exit_status(result)
        dmesg = session.cmd('dmesg')
        dmesg_content = params.get('dmesg_content').split('|')
        for content in dmesg_content:
            if content % hpt_order not in dmesg:
                test.fail("'%s' is missing in dmesg" % (content % hpt_order))
            else:
                logging.info("'%s' is found in dmesg", content % hpt_order)

    def check_vm_network_accessed(session=None):
        """
        The operations to the VM need to be done before or after
        migration happens

        :param session: The session object to the host

        :raise: test.error when ping fails
        """
        # Confirm local/remote VM can be accessed through network.
        logging.info("Check VM network connectivity")
        s_ping, _ = utils_test.ping(vm.get_address(),
                                    count=10,
                                    timeout=20,
                                    output_func=logging.debug,
                                    session=session)
        if s_ping != 0:
            if session:
                session.close()
            test.fail("%s did not respond after %d sec." % (vm.name, 20))

    def check_virsh_command_and_option(command, option=None):
        """
        Check if virsh command exists

        :param command: the command to be checked
        :param option: the command option to be checked
        """
        msg = "This version of libvirt does not support "
        if not virsh.has_help_command(command):
            test.cancel(msg + "virsh command '%s'" % command)

        if option and not virsh.has_command_help_match(command, option):
            test.cancel(msg + "virsh command '%s' with option '%s'" % (command,
                                                                       option))

    def add_ctrls(vm_xml, dev_type="pci", dev_index="0", dev_model="pci-root"):
        """
        Add multiple devices

        :param dev_type: the type of the device to be added
        :param dev_index: the maximum index of the device to be added
        :param dev_model: the model of the device to be added
        """
        for inx in range(0, int(dev_index) + 1):
            newcontroller = Controller("controller")
            newcontroller.type = dev_type
            newcontroller.index = inx
            newcontroller.model = dev_model
            logging.debug("New device is added:\n%s", newcontroller)
            vm_xml.add_device(newcontroller)
        vm_xml.sync()

    def do_migration(vm, dest_uri, options, extra):
        """
        Execute the migration with given parameters
        :param vm: the guest to be migrated
        :param dest_uri: the destination uri for migration
        :param options: options next to 'migrate' command
        :param extra: options in the end of the migrate command line

        :return: CmdResult object
        """
        logging.info("Sleeping 10 seconds before migration")
        time.sleep(10)
        # Migrate the guest.
        virsh_args.update({"ignore_status": True})
        migration_res = vm.migrate(dest_uri, options, extra, **virsh_args)
        if int(migration_res.exit_status) != 0:
            logging.error("Migration failed for %s.", vm_name)
            return migration_res

        if vm.is_alive():  # vm.connect_uri was updated
            logging.info("VM is alive on destination %s.", dest_uri)
        else:
            test.fail("VM is not alive on destination %s" % dest_uri)

        # Throws exception if console shows panic message
        vm.verify_kernel_crash()
        return migration_res

    def cleanup_libvirtd_log(log_file):
        """
        Remove existing libvirtd log file on source and target host.

        :param log_file: log file with absolute path
        """
        if os.path.exists(log_file):
            logging.debug("Delete local libvirt log file '%s'", log_file)
            os.remove(log_file)
        cmd = "rm -f %s" % log_file
        logging.debug("Delete remote libvirt log file '%s'", log_file)
        remote.run_remote_cmd(cmd, cmd_parms, runner_on_target)

    def cleanup_dest(vm):
        """
        Clean up the destination host environment
        when doing the uni-direction migration.

        :param vm: the guest to be cleaned up
        """
        logging.info("Cleaning up VMs on %s", vm.connect_uri)
        try:
            if virsh.domain_exists(vm.name, uri=vm.connect_uri):
                vm_state = vm.state()
                if vm_state == "paused":
                    vm.resume()
                elif vm_state == "shut off":
                    vm.start()
                vm.destroy(gracefully=False)

                if vm.is_persistent():
                    vm.undefine()

        except Exception as detail:
            logging.error("Cleaning up destination failed.\n%s", detail)

    def run_stress_in_vm():
        """
        The function to load stress in VM
        """
        stress_args = params.get("stress_args", "--cpu 8 --io 4 "
                                 "--vm 2 --vm-bytes 128M "
                                 "--timeout 20s")
        try:
            vm_session.cmd('stress %s' % stress_args)
        except Exception as detail:
            logging.debug(detail)

    def control_migrate_speed(to_speed=1, opts=""):
        """
        Control migration duration

        :param to_speed: the speed value in Mbps to be set for migration
        :return int: the new migration speed after setting
        """
        virsh_args.update({"ignore_status": False})
        old_speed = virsh.migrate_getspeed(vm_name, extra=opts, **virsh_args)
        logging.debug("Current migration speed is %s MiB/s\n", old_speed.stdout.strip())
        logging.debug("Set migration speed to %d MiB/s\n", to_speed)
        cmd_result = virsh.migrate_setspeed(vm_name, to_speed, extra=opts, **virsh_args)
        actual_speed = virsh.migrate_getspeed(vm_name, extra=opts, **virsh_args)
        logging.debug("New migration speed is %s MiB/s\n", actual_speed.stdout.strip())
        return int(actual_speed.stdout.strip())

    def check_setspeed(params):
        """
        Set/get migration speed

        :param params: the parameters used
        :raise: test.fail if speed set does not take effect
        """
        expected_value = int(params.get("migrate_speed", '41943040')) // (1024 * 1024)
        actual_value = control_migrate_speed(to_speed=expected_value)
        params.update({'compare_to_value': actual_value})
        if actual_value != expected_value:
            test.fail("Migration speed is expected to be '%d MiB/s', but '%d MiB/s' "
                      "found" % (expected_value, actual_value))

    def check_domjobinfo(params, option=""):
        """
        Check given item in domjobinfo of the guest is as expected

        :param params: the parameters used
        :param option: options for domjobinfo
        :raise: test.fail if the value of given item is unexpected
        """
        def search_jobinfo(jobinfo):
            """
            Find value of given item in domjobinfo

            :param jobinfo: cmdResult object
            :raise: test.fail if not found
            """
            for item in jobinfo.stdout.splitlines():
                if item.count(jobinfo_item):
                    groups = re.findall(r'[0-9.]+', item.strip())
                    logging.debug("In '%s' search '%s'\n", item, groups[0])
                    if (math.fabs(float(groups[0]) - float(compare_to_value)) //
                       float(compare_to_value) > diff_rate):
                        test.fail("{} {} has too much difference from "
                                  "{}".format(jobinfo_item,
                                              groups[0],
                                              compare_to_value))
                break

        jobinfo_item = params.get("jobinfo_item")
        compare_to_value = params.get("compare_to_value")
        logging.debug("compare_to_value:%s", compare_to_value)
        diff_rate = float(params.get("diff_rate", "0"))
        if not jobinfo_item or not compare_to_value:
            return
        vm_ref = '{}{}'.format(vm_name, option)
        jobinfo = virsh.domjobinfo(vm_ref, **virsh_args)
        search_jobinfo(jobinfo)

        check_domjobinfo_remote = params.get("check_domjobinfo_remote")
        if check_domjobinfo_remote:
            remote_virsh_session = virsh.VirshPersistent(**remote_virsh_dargs)
            jobinfo = remote_virsh_session.domjobinfo(vm_ref, **virsh_args)
            search_jobinfo(jobinfo)
            remote_virsh_session.close_session()

    def search_jobinfo_output(jobinfo, items_to_check, postcopy_req=False):
        """
        Check the results of domjobinfo

        :param jobinfo: cmdResult object
        :param items_to_check: expected value
        :param postcopy_req: True for postcopy migration and False for precopy
        :return: False if not found
        """
        expected_value = copy.deepcopy(items_to_check)
        logging.debug("The items_to_check is %s", expected_value)
        for item in jobinfo.splitlines():
            item_key = item.strip().split(':')[0]
            if "all_items" in expected_value and len(item_key) > 0:
                # "Time elapsed w/o network" and "Downtime w/o network"
                # have a chance to be missing, it is normal
                if item_key in ['Downtime w/o network', 'Time elapsed w/o network']:
                    continue
                if expected_value["all_items"][0] == item_key:
                    del expected_value["all_items"][0]
                else:
                    test.fail("The item '%s' should be '%s'" %
                              (item_key, expected_value["all_items"][0]))

            if item_key in expected_value:
                item_value = ':'.join(item.strip().split(':')[1:]).strip()
                if item_value != expected_value.get(item_key):
                    test.fail("The value of {} is {} which is not "
                              "expected".format(item_key,
                                                item_value))
                else:
                    del expected_value[item_key]
            if postcopy_req and item_key == "Postcopy requests":
                if int(item.strip().split(':')[1]) <= 0:
                    test.fail("The value of Postcopy requests is incorrect")

        # Check if all the items in expect_dict checked or not
        if "all_items" in expected_value:
            if len(expected_value["all_items"]) > 0:
                test.fail("Missing item: {} from all_items"
                          .format(expected_value["all_items"]))
            else:
                del expected_value["all_items"]
        if len(expected_value) != 0:
            test.fail("Missing item: {}".format(expected_value))

    def set_migratepostcopy():
        """
        Switch to postcopy during migration
        """
        res = virsh.migrate_postcopy(vm_name)
        logging.debug("Command output: %s", res)

        if not utils_misc.wait_for(
           lambda: migration_test.check_vm_state(vm_name, "paused",
                                                 "post-copy"), 10):
            test.fail("vm status is expected to 'paused (post-copy)'")

    def check_domjobinfo_output(option="", is_mig_compelete=False):
        """
        Check all items in domjobinfo of the guest on both remote and local

        :param option: options for domjobinfo
        :param is_mig_compelete: False for domjobinfo checking during migration,
                            True for domjobinfo checking after migration
        :raise: test.fail if the value of given item is unexpected
        """
        expected_list_during_mig = ["Job type", "Operation", "Time elapsed",
                                    "Data processed", "Data remaining",
                                    "Data total", "Memory processed",
                                    "Memory remaining", "Memory total",
                                    "Memory bandwidth", "Dirty rate", "Page size",
                                    "Iteration", "Constant pages", "Normal pages",
                                    "Normal data", "Expected downtime", "Setup time"]
        if libvirt_version.version_compare(4, 10, 0):
            expected_list_during_mig.insert(13, "Postcopy requests")

        expected_list_after_mig_src = copy.deepcopy(expected_list_during_mig)
        expected_list_after_mig_src[-2] = 'Total downtime'
        expected_list_after_mig_dest = copy.deepcopy(expected_list_after_mig_src)

        # Check version in remote
        if not expected_list_after_mig_dest.count("Postcopy requests"):
            remote_session = remote.remote_login("ssh", server_ip, "22", server_user,
                                                 server_pwd, "#")
            if libvirt_version.version_compare(4, 10, 0, session=remote_session):
                expected_list_after_mig_dest.insert(14, "Postcopy requests")
            remote_session.close()

        expect_dict = {"src_notdone": {"Job type": "Unbounded",
                                       "Operation": "Outgoing migration",
                                       "all_items": expected_list_during_mig},
                       "dest_notdone": {"error": "Operation not supported: mig"
                                                 "ration statistics are availab"
                                                 "le only on the source host"},
                       "src_done": {"Job type": "Completed",
                                    "Operation": "Outgoing migration",
                                    "all_items": expected_list_after_mig_src},
                       "dest_done": {"Job type": "Completed",
                                     "Operation": "Incoming migration",
                                     "all_items": expected_list_after_mig_dest}}
        pc_opt = False
        if postcopy_options:
            pc_opt = True
            if is_mig_compelete:
                expect_dict["dest_done"].clear()
                expect_dict["dest_done"]["Job type"] = "None"
            else:
                set_migratepostcopy()

        vm_ref = '{}{}'.format(vm_name, option)
        src_jobinfo = virsh.domjobinfo(vm_ref, **virsh_args)
        cmd = "virsh domjobinfo {} {}".format(vm_name, option)
        dest_jobinfo = remote.run_remote_cmd(cmd, cmd_parms, runner_on_target)

        if not is_mig_compelete:
            search_jobinfo_output(src_jobinfo.stdout, expect_dict["src_notdone"],
                                  postcopy_req=pc_opt)
            search_jobinfo_output(dest_jobinfo.stderr, expect_dict["dest_notdone"])
        else:
            search_jobinfo_output(src_jobinfo.stdout, expect_dict["src_done"])
            search_jobinfo_output(dest_jobinfo.stdout, expect_dict["dest_done"])

    def check_maxdowntime(params):
        """
        Set/get migration maxdowntime

        :param params: the parameters used
        :raise: test.fail if maxdowntime set does not take effect
        """
        expected_value = int(float(params.get("migrate_maxdowntime", '0.3')) * 1000)
        virsh_args.update({"ignore_status": False})
        old_value = int(virsh.migrate_getmaxdowntime(vm_name).stdout.strip())
        logging.debug("Current migration maxdowntime is %d ms", old_value)
        logging.debug("Set migration maxdowntime to %d ms", expected_value)
        virsh.migrate_setmaxdowntime(vm_name, expected_value, **virsh_args)
        actual_value = int(virsh.migrate_getmaxdowntime(vm_name).stdout.strip())
        logging.debug("New migration maxdowntime is %d ms", actual_value)
        if actual_value != expected_value:
            test.fail("Migration maxdowntime is expected to be '%d ms', but '%d ms' "
                      "found" % (expected_value, actual_value))
        params.update({'compare_to_value': actual_value})

    def run_time(init_time=2):
        """
        Compare the duration of func to an expected one

        :param init_time: Expected run time
        :raise: test.fail if func takes more than init_time(second)
        """
        def check_time(func):
            def wrapper(*args, **kwargs):
                start = time.time()
                result = func(*args, **kwargs)
                duration = time.time() - start
                if duration > init_time:
                    test.fail("It takes too long to check {}. The duration is "
                              "{}s which should be less than {}s"
                              .format(func.__doc__, duration, init_time))
                return result
            return wrapper
        return check_time

    def run_domstats(vm_name):
        """
        Run domstats and domstate during migration in source and destination

        :param vm_name: VM name
        :raise: test.fail if domstats does not return in 2s
                or domstate is incorrect
        """
        @run_time()
        def check_source_stats(vm_name):
            """domstats in source"""
            vm_stats = virsh.domstats(vm_name)
            logging.debug("domstats in source: {}".format(vm_stats))

        @run_time()
        def check_dest_stats(vm_name):
            """domstats in target"""
            cmd = "virsh domstats {}".format(vm_name)
            dest_stats = remote.run_remote_cmd(cmd, cmd_parms, runner_on_target)
            logging.debug("domstats in destination: {}".format(dest_stats))

        expected_remote_state = "paused"
        expected_source_state = ["paused", "running"]
        if postcopy_options:
            set_migratepostcopy()
            expected_remote_state = "running"
            expected_source_state = ["paused"]

        check_source_stats(vm_name)
        vm_stat = virsh.domstate(vm_name, ignore_status=False)
        if ((not len(vm_stat.stdout.split())) or
           vm_stat.stdout.split()[0] not in expected_source_state):
            test.fail("Incorrect VM stat on source machine: {}"
                      .format(vm_stat.stdout))

        check_dest_stats(vm_name)
        cmd = "virsh domstate {}".format(vm_name)
        remote_vm_state = remote.run_remote_cmd(cmd, cmd_parms,
                                                runner_on_target,
                                                ignore_status=False)
        if ((not len(remote_vm_state.stdout.split())) or
           remote_vm_state.stdout.split()[0] != expected_remote_state):
            test.fail("Incorrect VM stat on destination machine: {}"
                      .format(remote_vm_state.stdout))
        else:
            logging.debug("vm stat on destination: {}".format(remote_vm_state))
        if postcopy_options:
            vm_stat = virsh.domstate(vm_name, ignore_status=False)
            if ((not len(vm_stat.stdout.split())) or
               vm_stat.stdout.split()[0] != "paused"):
                test.fail("Incorrect VM stat on source machine: {}"
                          .format(vm_stat.stdout))

    def kill_qemu_target():
        """
        Kill qemu process on target host during Finish Phase of migration

        :raise: test.fail if domstate is not "post-copy failed" after
                qemu killed
        """
        if not vm.is_qemu():
            test.cancel("This case is qemu guest only.")
        set_migratepostcopy()
        emulator = new_xml.get_devices('emulator')[0]
        logging.debug("emulator is %s", emulator.path)
        cmd = 'pkill -9 {}'.format(os.path.basename(emulator.path))
        runner_on_target.run(cmd)
        if not utils_misc.wait_for(
           lambda: migration_test.check_vm_state(vm_name, "paused",
                                                 "post-copy failed"), 60):
            test.fail("vm status is expected to 'paused (post-copy failed)'")

    def do_actions_during_migrate(params):
        """
        The entry point to execute action list during migration

        :param params: the parameters used
        """
        actions_during_migration = params.get("actions_during_migration")
        if not actions_during_migration:
            return
        for action in actions_during_migration.split(","):
            if action == 'setspeed':
                check_setspeed(params)
            elif action == 'domjobinfo':
                check_domjobinfo(params)
            elif action == 'setmaxdowntime':
                check_maxdowntime(params)
            elif action == 'converge':
                check_converge(params)
            elif action == 'domjobinfo_output_all':
                check_domjobinfo_output()
            elif action == 'checkdomstats':
                run_domstats(vm_name)
            elif action == 'killqemutarget':
                kill_qemu_target()
            time.sleep(3)

    def attach_channel_xml():
        """
        Create channel xml and attach it to guest configuration
        """
        # Check if pty channel exists already
        for elem in new_xml.devices.by_device_tag('channel'):
            if elem.type_name == channel_type_name:
                logging.debug("{0} channel already exists in guest. "
                              "No need to add new one".format(channel_type_name))
                return

        params = {'channel_type_name': channel_type_name,
                  'target_type': target_type,
                  'target_name': target_name}
        channel_xml = libvirt.create_channel_xml(params)
        virsh.attach_device(domain_opt=vm_name, file_opt=channel_xml.xml,
                            flagstr="--config", ignore_status=False)
        logging.debug("New VMXML with channel:\n%s", virsh.dumpxml(vm_name))

    def check_timeout_postcopy(params):
        """
        Check the vm state on target host after timeout
        when --postcopy and --timeout-postcopy are used.
        The vm state is expected as running.

        :param params: the parameters used
        """
        timeout = int(params.get("timeout_postcopy", 10))
        time.sleep(timeout + 1)
        remote_virsh_session = virsh.VirshPersistent(**remote_virsh_dargs)
        vm_state = results_stdout_52lts(remote_virsh_session.domstate(vm_name)).strip()
        if vm_state != "running":
            remote_virsh_session.close_session()
            test.fail("After timeout '%s' seconds, "
                      "the vm state on target host should "
                      "be 'running', but '%s' found" % (timeout, vm_state))
        remote_virsh_session.close_session()

    def check_converge(params):
        """
        Handle option '--auto-converge --auto-converge-initial
        --auto-converge-increment '.
        'Auto converge throttle' in domjobinfo should start with
        the initial value and increase with correct increment
        and max value is 99.

        :param params: The parameters used
        :raise: exceptions.TestFail when unexpected or no throttle
                       is found
        """
        initial = int(params.get("initial", 20))
        increment = int(params.get("increment", 10))
        max_converge = int(params.get("max_converge", 99))
        allow_throttle_list = [initial + count * increment
                               for count in range(0, (100 - initial) // increment + 1)
                               if (initial + count * increment) < 100]
        allow_throttle_list.append(max_converge)
        logging.debug("The allowed 'Auto converge throttle' value "
                      "is %s", allow_throttle_list)

        throttle = 0
        jobtype = "None"

        while throttle < 100:
            cmd_result = virsh.domjobinfo(vm_name, debug=True,
                                          ignore_status=True)
            if cmd_result.exit_status:
                logging.debug(cmd_result.stderr)
                # Check if migration is completed
                if "domain is not running" in cmd_result.stderr:
                    args = vm_name + " --completed"
                    cmd_result = virsh.domjobinfo(args, debug=True,
                                                  ignore_status=True)
                    if cmd_result.exit_status:
                        test.error("Failed to get domjobinfo and domjobinfo "
                                   "--completed: %s" % cmd_result.stderr)
                else:
                    test.error("Failed to get domjobinfo: %s" % cmd_result.stderr)
            jobinfo = cmd_result.stdout
            for line in jobinfo.splitlines():
                key = line.split(':')[0]
                if key.count("Job type"):
                    jobtype = line.split(':')[-1].strip()
                elif key.count("Auto converge throttle"):
                    throttle = int(line.split(':')[-1].strip())
                    logging.debug("Auto converge throttle:%s", str(throttle))
            if throttle and throttle not in allow_throttle_list:
                test.fail("Invalid auto converge throttle "
                          "value '%s'" % throttle)
            if throttle == 99:
                logging.debug("'Auto converge throttle' reaches maximum "
                              "allowed value 99")
                break
            if jobtype == "None" or jobtype == "Completed":
                logging.debug("Jobtype:%s", jobtype)
                if not throttle:
                    test.fail("'Auto converge throttle' is "
                              "not found in the domjobinfo")
                break
            time.sleep(5)

    def get_usable_compress_cache(pagesize):
        """
        Get a number which is bigger than pagesize and is power of two.

        :param pagesize: the given integer
        :return: an integer satisfying the criteria
        """
        def calculate(num):
            result = num & (num - 1)
            return (result == 0)

        item = pagesize
        found = False
        while (not found):
            item += 1
            found = calculate(item)
        logging.debug("%d is smallest one that is bigger than '%s' and "
                      "is power of 2", item, pagesize)
        return item

    def update_config_file(config_type, new_conf, remote_host=True,
                           params=None):
        """
        Update the specified configuration file with dict

        :param config_type: Like libvirtd, qemu
        :param new_conf: The str including new configuration
        :param remote_host: True to also update in remote host
        :param params: The dict including parameters to connect remote host
        :return: utils_config.LibvirtConfigCommon object
        """
        logging.debug("Update configuration file")
        cleanup_libvirtd_log(log_file)
        config_dict = eval(new_conf)
        updated_conf = libvirt.customize_libvirt_config(config_dict,
                                                        config_type=config_type,
                                                        remote_host=remote_host,
                                                        extra_params=params)
        return updated_conf

    def check_migration_res(result):
        """
        Check if the migration result is as expected

        :param result: the output of migration
        :raise: test.fail if test is failed
        """
        logging.info("Migration out: %s", results_stdout_52lts(result).strip())
        logging.info("Migration error: %s", results_stderr_52lts(result).strip())

        if status_error:  # Migration should fail
            if err_msg:   # Special error messages are expected
                if not re.search(err_msg, results_stderr_52lts(result).strip()):
                    test.fail("Can not find the expected patterns '%s' in "
                              "output '%s'" % (err_msg,
                                               results_stderr_52lts(result).strip()))
                else:
                    logging.debug("It is the expected error message")
            else:
                if int(result.exit_status) != 0:
                    logging.debug("Migration failure is expected result")
                else:
                    test.fail("Migration success is unexpected result")
        else:
            if int(result.exit_status) != 0:
                test.fail(results_stderr_52lts(result).strip())

    check_parameters(test, params)

    # Params for NFS shared storage
    shared_storage = params.get("migrate_shared_storage", "")
    if shared_storage == "":
        default_guest_asset = defaults.get_default_guest_os_info()['asset']
        default_guest_asset = "%s.qcow2" % default_guest_asset
        shared_storage = os.path.join(params.get("nfs_mount_dir"),
                                      default_guest_asset)
        logging.debug("shared_storage:%s", shared_storage)

    # params for migration connection
    params["virsh_migrate_desturi"] = libvirt_vm.complete_uri(
                                       params.get("migrate_dest_host"))
    # Params to update disk using shared storage
    params["disk_type"] = "file"
    params["disk_source_protocol"] = "netfs"
    params["mnt_path_name"] = params.get("nfs_mount_dir")

    # Local variables
    virsh_args = {"debug": True}
    virsh_opt = params.get("virsh_opt", "")
    server_ip = params.get("server_ip")
    server_user = params.get("server_user", "root")
    server_pwd = params.get("server_pwd")
    extra = params.get("virsh_migrate_extra")
    options = params.get("virsh_migrate_options")
    src_uri = params.get("virsh_migrate_connect_uri")
    dest_uri = params.get("virsh_migrate_desturi")
    log_file = params.get("libvirt_log", "/var/log/libvirt/libvirtd.log")
    check_complete_job = "yes" == params.get("check_complete_job", "no")
    check_domjobinfo_results = "yes" == params.get("check_domjobinfo_results")
    contrl_index = params.get("new_contrl_index", None)
    asynch_migration = "yes" == params.get("asynch_migrate", "no")
    grep_str_remote_log = params.get("grep_str_remote_log", "")
    grep_str_local_log = params.get("grep_str_local_log", "")
    disable_verify_peer = "yes" == params.get("disable_verify_peer", "no")
    status_error = "yes" == params.get("status_error", "no")
    stress_in_vm = "yes" == params.get("stress_in_vm", "no")
    low_speed = params.get("low_speed", None)
    remote_virsh_dargs = {'remote_ip': server_ip, 'remote_user': server_user,
                          'remote_pwd': server_pwd, 'unprivileged_user': None,
                          'ssh_remote_auth': True}
    cmd_parms = {'server_ip': server_ip, 'server_user': server_user,
                 'server_pwd': server_pwd}
    hpt_resize = params.get("hpt_resize", None)
    htm_state = params.get("htm_state", None)

    # For pty channel test
    add_channel = "yes" == params.get("add_channel", "no")
    channel_type_name = params.get("channel_type_name", None)
    target_type = params.get("target_type", None)
    target_name = params.get("target_name", None)
    cmd_run_in_remote_guest = params.get("cmd_run_in_remote_guest", None)
    cmd_run_in_remote_guest_1 = params.get("cmd_run_in_remote_guest_1", None)
    cmd_run_in_remote_host = params.get("cmd_run_in_remote_host", None)
    cmd_run_in_remote_host_1 = params.get("cmd_run_in_remote_host_1", None)
    cmd_run_in_remote_host_2 = params.get("cmd_run_in_remote_host_2", None)

    # For qemu command line checking
    qemu_check = params.get("qemu_check", None)

    xml_check_after_mig = params.get("guest_xml_check_after_mig", None)

    # params for cache matrix test
    cache = params.get("cache")
    remove_cache = "yes" == params.get("remove_cache", "no")
    err_msg = params.get("err_msg")

    arch = platform.machine()
    if any([hpt_resize, contrl_index, htm_state]) and 'ppc64' not in arch:
        test.cancel("The case is PPC only.")

    # For TLS
    tls_recovery = params.get("tls_auto_recovery", "yes")
    # qemu config
    qemu_conf_dict = None
    # libvirtd config
    libvirtd_conf_dict = None

    # remote shell session
    remote_session = None

    remote_virsh_session = None
    vm = None
    vm_session = None
    libvirtd_conf = None
    qemu_conf = None
    mig_result = None
    test_exception = None
    is_TestError = False
    is_TestFail = False
    is_TestSkip = False

    # Objects to be cleaned up in the end
    objs_list = []
    tls_obj = None

    # Local variables
    vm_name = params.get("migrate_main_vm")
    vm = env.get_vm(vm_name)
    vm.verify_alive()

    # For safety reasons, we'd better back up  xmlfile.
    new_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    orig_config_xml = new_xml.copy()
    if not orig_config_xml:
        test.error("Backing up xmlfile failed.")

    try:
        # Create a remote runner for later use
        runner_on_target = remote.RemoteRunner(host=server_ip,
                                               username=server_user,
                                               password=server_pwd)

        # Change the configuration files if needed before starting guest
        # For qemu.conf
        if extra.count("--tls"):
            # Setup TLS
            tls_obj = TLSConnection(params)
            if tls_recovery == "yes":
                objs_list.append(tls_obj)
                tls_obj.auto_recover = True
                tls_obj.conn_setup()

        # Setup qemu.conf
        qemu_conf_dict = params.get("qemu_conf_dict")
        if qemu_conf_dict:
            qemu_conf = update_config_file('qemu',
                                           qemu_conf_dict,
                                           params=params)

        # Setup libvirtd
        libvirtd_conf_dict = params.get("libvirtd_conf_dict")
        if libvirtd_conf_dict:
            libvirtd_conf = update_config_file('libvirtd',
                                               libvirtd_conf_dict,
                                               params=params)

        # Prepare required guest xml before starting guest
        if contrl_index:
            new_xml.remove_all_device_by_type('controller')
            logging.debug("After removing controllers, current XML:\n%s\n", new_xml)
            add_ctrls(new_xml, dev_index=contrl_index)

        if add_channel:
            attach_channel_xml()

        if hpt_resize:
            set_feature(new_xml, 'hpt', hpt_resize)

        if htm_state:
            set_feature(new_xml, 'htm', htm_state)

        if cache:
            params["driver_cache"] = cache
        if remove_cache:
            params["enable_cache"] = "no"

        # Change the disk of the vm to shared disk and then start VM
        libvirt.set_vm_disk(vm, params)

        if not vm.is_alive():
            vm.start()

        logging.debug("Guest xml after starting:\n%s", vm_xml.VMXML.new_from_dumpxml(vm_name))

        # Check qemu command line after guest is started
        if qemu_check:
            check_content = qemu_check
            if hpt_resize:
                check_content = "%s%s" % (qemu_check, hpt_resize)
            if htm_state:
                check_content = "%s%s" % (qemu_check, htm_state)
            libvirt.check_qemu_cmd_line(check_content)

        # Check local guest network connection before migration
        vm_session = vm.wait_for_login()
        check_vm_network_accessed()

        # Preparation for the running guest before migration
        if hpt_resize and hpt_resize != 'disabled':
            trigger_hpt_resize(vm_session)

        if stress_in_vm:
            pkg_name = 'stress'
            logging.debug("Check if stress tool is installed")
            pkg_mgr = utils_package.package_manager(vm_session, pkg_name)
            if not pkg_mgr.is_installed(pkg_name):
                logging.debug("Stress tool will be installed")
                if not pkg_mgr.install():
                    test.error("Package '%s' installation fails" % pkg_name)

            stress_thread = threading.Thread(target=run_stress_in_vm,
                                             args=())
            stress_thread.start()

        if extra.count("timeout-postcopy"):
            func_name = check_timeout_postcopy
        if params.get("actions_during_migration"):
            func_name = do_actions_during_migrate
        if extra.count("comp-xbzrle-cache"):
            cache = get_usable_compress_cache(memory.get_page_size())
            extra = "%s %s" % (extra, cache)

        # For --postcopy enable
        postcopy_options = params.get("postcopy_options")
        if postcopy_options:
            extra = "%s %s" % (extra, postcopy_options)

        if low_speed:
            control_migrate_speed(int(low_speed))
            if postcopy_options and libvirt_version.version_compare(5, 0, 0):
                control_migrate_speed(int(low_speed), opts=postcopy_options)
        # Execute migration process
        if not asynch_migration:
            mig_result = do_migration(vm, dest_uri, options, extra)
        else:
            migration_test = libvirt.MigrationTest()

            logging.debug("vm.connect_uri=%s", vm.connect_uri)
            vms = [vm]
            try:
                migration_test.do_migration(vms, None, dest_uri, 'orderly',
                                            options, thread_timeout=900,
                                            ignore_status=True, virsh_opt=virsh_opt,
                                            func=func_name, extra_opts=extra,
                                            func_params=params)
                mig_result = migration_test.ret
            except exceptions.TestFail as fail_detail:
                test.fail(fail_detail)
            except exceptions.TestSkipError as skip_detail:
                test.cancel(skip_detail)
            except exceptions.TestError as error_detail:
                test.error(error_detail)
            except Exception as details:
                mig_result = migration_test.ret
                logging.error(details)

        check_migration_res(mig_result)

        if add_channel:
            # Get the channel device source path of remote guest
            if not remote_virsh_session:
                remote_virsh_session = virsh.VirshPersistent(**remote_virsh_dargs)
            file_path = tempfile.mktemp(dir=data_dir.get_tmp_dir())
            remote_virsh_session.dumpxml(vm_name, to_file=file_path,
                                         debug=True,
                                         ignore_status=True)
            local_vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
            local_vmxml.xmltreefile = xml_utils.XMLTreeFile(file_path)
            for elem in local_vmxml.devices.by_device_tag('channel'):
                logging.debug("Found channel device {}".format(elem))
                if elem.type_name == channel_type_name:
                    host_source = elem.source.get('path')
                    logging.debug("Remote guest uses {} for channel device".format(host_source))
                    break
            remote_virsh_session.close_session()
            if not host_source:
                test.fail("Can not find source for %s channel on remote host" % channel_type_name)

            # Prepare to wait for message on remote host from the channel
            cmd_result = remote.run_remote_cmd(cmd_run_in_remote_host % host_source,
                                               cmd_parms,
                                               runner_on_target)

            # Send message from remote guest to the channel file
            remote_vm_obj = utils_test.RemoteVMManager(cmd_parms)
            vm_ip = vm.get_address()
            vm_pwd = params.get("password")
            remote_vm_obj.setup_ssh_auth(vm_ip, vm_pwd)
            cmd_result = remote_vm_obj.run_command(vm_ip, cmd_run_in_remote_guest_1)
            remote_vm_obj.run_command(vm_ip, cmd_run_in_remote_guest % results_stdout_52lts(cmd_result).strip())
            logging.debug("Sending message is done")

            # Check message on remote host from the channel
            remote.run_remote_cmd(cmd_run_in_remote_host_1, cmd_parms, runner_on_target)
            logging.debug("Receiving message is done")
            remote.run_remote_cmd(cmd_run_in_remote_host_2, cmd_parms, runner_on_target)

        if check_complete_job:
            opts = " --completed"
            check_virsh_command_and_option("domjobinfo", opts)
            if extra.count("comp-xbzrle-cache"):
                params.update({'compare_to_value': cache // 1024})
            check_domjobinfo(params, option=opts)
            if check_domjobinfo_results:
                check_domjobinfo_output(option=opts, is_mig_compelete=True)

        if grep_str_local_log:
            cmd = "grep -E '%s' %s" % (grep_str_local_log, log_file)
            cmdRes = process.run(cmd, shell=True, ignore_status=True)
            if cmdRes.exit_status:
                test.fail(results_stderr_52lts(cmdRes).strip())
        if grep_str_remote_log:
            cmd = "grep -E '%s' %s" % (grep_str_remote_log, log_file)
            remote.run_remote_cmd(cmd, cmd_parms, runner_on_target)

        if xml_check_after_mig:
            if not remote_virsh_session:
                remote_virsh_session = virsh.VirshPersistent(**remote_virsh_dargs)
            target_guest_dumpxml = results_stdout_52lts(
                remote_virsh_session.dumpxml(vm_name,
                                             debug=True,
                                             ignore_status=True)).strip()
            if hpt_resize:
                check_str = hpt_resize
            elif htm_state:
                check_str = htm_state
            if hpt_resize or htm_state:
                xml_check_after_mig = "%s'%s'" % (xml_check_after_mig, check_str)
                if not re.search(xml_check_after_mig, target_guest_dumpxml):
                    remote_virsh_session.close_session()
                    test.fail("Fail to search '%s' in target guest XML:\n%s"
                              % (xml_check_after_mig, target_guest_dumpxml))

            if contrl_index:
                all_ctrls = re.findall(xml_check_after_mig, target_guest_dumpxml)
                if len(all_ctrls) != int(contrl_index) + 1:
                    remote_virsh_session.close_session()
                    test.fail("%s pci-root controllers are expected in guest XML, "
                              "but found %s" % (int(contrl_index) + 1, len(all_ctrls)))
            remote_virsh_session.close_session()

        if int(mig_result.exit_status) == 0:
            server_session = remote.wait_for_login('ssh', server_ip, '22',
                                                   server_user, server_pwd,
                                                   r"[\#\$]\s*$")
            check_vm_network_accessed(server_session)
            server_session.close()
    except exceptions.TestFail as details:
        is_TestFail = True
        test_exception = details
    except exceptions.TestSkipError as details:
        is_TestSkip = True
        test_exception = details
    except exceptions.TestError as details:
        is_TestError = True
        test_exception = details
    except Exception as details:
        test_exception = details
    finally:
        logging.debug("Recover test environment")
        try:
            # Clean VM on destination
            vm.connect_uri = dest_uri
            cleanup_dest(vm)
            vm.connect_uri = src_uri

            logging.info("Recovery VM XML configration")
            orig_config_xml.sync()
            logging.debug("The current VM XML:\n%s", orig_config_xml.xmltreefile)

            if remote_virsh_session:
                remote_virsh_session.close_session()

            if remote_session:
                remote_session.close()

            # Delete files on target
            # Killing qemu process on target may lead a problem like
            # qemu process becomes a zombie process whose ppid is 1.
            # As a workaround, have to remove the files under
            # /var/run/libvirt/qemu to make libvirt work.
            if vm.is_qemu():
                dest_pid_files = os.path.join("/var/run/libvirt/qemu",
                                              vm_name + '*')
                cmd = "rm -f %s" % dest_pid_files
                logging.debug("Delete remote pid files '%s'", dest_pid_files)
                remote.run_remote_cmd(cmd, cmd_parms, runner_on_target)

            if extra.count("--tls") and not disable_verify_peer:
                logging.debug("Recover the qemu configuration")
                libvirt.customize_libvirt_config(None,
                                                 config_type="qemu",
                                                 remote_host=True,
                                                 extra_params=params,
                                                 is_recover=True,
                                                 config_object=qemu_conf)

            for update_conf in [libvirtd_conf, qemu_conf]:
                if update_conf:
                    logging.debug("Recover the configurations")
                    libvirt.customize_libvirt_config(None,
                                                     remote_host=True,
                                                     extra_params=params,
                                                     is_recover=True,
                                                     config_object=update_conf)

            logging.info("Remove local NFS image")
            source_file = params.get("source_file")
            libvirt.delete_local_disk("file", path=source_file)

            if objs_list:
                for obj in objs_list:
                    logging.debug("Clean up local objs")
                    del obj

        except Exception as exception_detail:
            if (not test_exception and not is_TestError and
               not is_TestFail and not is_TestSkip):
                raise exception_detail
            else:
                # if any of above exceptions has been raised, only print
                # error log here to avoid of hiding the original issue
                logging.error(exception_detail)
    # Check result
    if is_TestFail:
        test.fail(test_exception)
    if is_TestSkip:
        test.cancel(test_exception)
    if is_TestError:
        test.error(test_exception)
    if not test_exception:
        logging.info("Case execution is done.")
    else:
        test.error(test_exception)
def copied_migration(test, vm, params, blockjob_type=None, block_target="vda"):
    """
    Migrate vms with storage copied under some stress.
    And during it, some qemu-monitor-command will be sent.
    """
    dest_uri = params.get("migrate_dest_uri")
    remote_host = params.get("migrate_dest_host")
    copy_option = params.get("copy_storage_option", "")
    username = params.get("remote_user")
    password = params.get("migrate_dest_pwd")
    timeout = int(params.get("thread_timeout", 1200))
    options = "--live %s --unsafe" % copy_option

    # Get vm ip for remote checking
    if vm.is_dead():
        vm.start()
    vm.wait_for_login()
    vms_ip = {}
    vms_ip[vm.name] = vm.get_address()
    logging.debug("VM %s IP: %s", vm.name, vms_ip[vm.name])

    # Start to load stress
    stress_type = params.get("migrate_stress_type")
    if stress_type == "cpu":
        params['stress_args'] = "--cpu 2 --quiet --timeout 60"
    elif stress_type == "memory":
        params['stress_args'] = "--vm 2 --vm-bytes 256M --vm-keep --timeout 60"
    if stress_type is not None:
        utils_test.load_stress("stress_in_vms", [vm], params)

    cp_mig = utlv.MigrationTest()
    migration_thread = threading.Thread(target=cp_mig.thread_func_migration,
                                        args=(vm, dest_uri, options))
    migration_thread.start()
    # Wait for migration launched
    time.sleep(5)
    job_ret = virsh.domjobinfo(vm.name, debug=True)
    if job_ret.exit_status:
        test.error("Prepare migration for blockjob failed.")

    # Execute some qemu monitor commands
    pause_cmd = "block-job-pause %s" % block_target
    resume_cmd = "block-job-resume %s" % block_target
    cancel_cmd = "block-job-cancel %s" % block_target
    complete_cmd = "block-job-complete %s" % block_target

    blockjob_failures = []
    try:
        if blockjob_type == "cancel":
            virsh.qemu_monitor_command(vm.name,
                                       cancel_cmd,
                                       debug=True,
                                       ignore_status=False)
        elif blockjob_type == "pause_resume":
            virsh.qemu_monitor_command(vm.name,
                                       pause_cmd,
                                       debug=True,
                                       ignore_status=False)
            # TODO: Check whether it is paused.
            virsh.qemu_monitor_command(vm.name,
                                       resume_cmd,
                                       debug=True,
                                       ignore_status=False)
        elif blockjob_type == "complete":
            virsh.qemu_monitor_command(vm.name,
                                       complete_cmd,
                                       debug=True,
                                       ignore_status=False)
    except process.CmdError as detail:
        blockjob_failures.append(str(detail))

    # Job info FYI
    virsh.domjobinfo(vm.name, debug=True)

    if len(blockjob_failures):
        timeout = 30

    migration_thread.join(timeout)
    if migration_thread.isAlive():
        logging.error("Migrate %s timeout.", migration_thread)
        cp_mig.RET_LOCK.acquire()
        cp_mig.RET_MIGRATION = False
        cp_mig.RET_LOCK.release()

    if len(blockjob_failures):
        cp_mig.cleanup_dest_vm(vm, None, dest_uri)
        test.fail("Run qemu monitor command failed %s" % blockjob_failures)

    check_ip_failures = []
    if cp_mig.RET_MIGRATION:
        try:
            utils_test.check_dest_vm_network(vm, vms_ip[vm.name], remote_host,
                                             username, password)
        except exceptions.TestFail as detail:
            check_ip_failures.append(str(detail))
        cp_mig.cleanup_dest_vm(vm, None, dest_uri)
        if blockjob_type in ["cancel", "complete"]:
            test.fail("Storage migration passed even after " "cancellation.")
    else:
        cp_mig.cleanup_dest_vm(vm, None, dest_uri)
        if blockjob_type in ["cancel", "complete"]:
            logging.error("Expected Migration Error for %s", blockjob_type)
            return
        else:
            test.fail("Command blockjob does not work well under "
                      "storage copied migration.")

    if len(check_ip_failures):
        test.fail("Check IP failed:%s" % check_ip_failures)
def run(test, params, env):
    """
    Test virsh migrate command.
    """
    def check_vm_network_accessed(session=None):
        """
        The operations to the VM need to be done before or after
        migration happens

        :param session: The session object to the host

        :raise: test.error when ping fails
        """
        # Confirm local/remote VM can be accessed through network.
        logging.info("Check VM network connectivity")
        s_ping, _ = utils_test.ping(vm.get_address(),
                                    count=10,
                                    timeout=20,
                                    output_func=logging.debug,
                                    session=session)
        if s_ping != 0:
            if session:
                session.close()
            test.fail("%s did not respond after %d sec." % (vm.name, 20))

    def check_migration_res(result):
        """
        Check if the migration result is as expected

        :param result: the output of migration
        :raise: test.fail if test is failed
        """
        if not result:
            test.error("No migration result is returned.")

        logging.info("Migration out: %s", results_stdout_52lts(result).strip())
        logging.info("Migration error: %s",
                     results_stderr_52lts(result).strip())

        if status_error:  # Migration should fail
            if err_msg:  # Special error messages are expected
                if not re.search(err_msg,
                                 results_stderr_52lts(result).strip()):
                    test.fail("Can not find the expected patterns '%s' in "
                              "output '%s'" %
                              (err_msg, results_stderr_52lts(result).strip()))
                else:
                    logging.debug("It is the expected error message")
            else:
                if int(result.exit_status) != 0:
                    logging.debug("Migration failure is expected result")
                else:
                    test.fail("Migration success is unexpected result")
        else:
            if int(result.exit_status) != 0:
                test.fail(results_stderr_52lts(result).strip())

    check_parameters(test, params)

    # Params for NFS shared storage
    shared_storage = params.get("migrate_shared_storage", "")
    if shared_storage == "":
        default_guest_asset = defaults.get_default_guest_os_info()['asset']
        default_guest_asset = "%s.qcow2" % default_guest_asset
        shared_storage = os.path.join(params.get("nfs_mount_dir"),
                                      default_guest_asset)
        logging.debug("shared_storage:%s", shared_storage)

    # Params to update disk using shared storage
    params["disk_type"] = "file"
    params["disk_source_protocol"] = "netfs"
    params["mnt_path_name"] = params.get("nfs_mount_dir")

    # Local variables
    virsh_args = {"debug": True}
    virsh_options = params.get("virsh_options", "")

    server_ip = params.get("server_ip")
    server_user = params.get("server_user", "root")
    server_pwd = params.get("server_pwd")
    client_ip = params.get("client_ip")
    client_pwd = params.get("client_pwd")
    extra = params.get("virsh_migrate_extra")
    options = params.get("virsh_migrate_options")

    guest_src_url = params.get("guest_src_url")
    guest_src_path = params.get("guest_src_path",
                                "/var/lib/libvirt/images/guest.img")
    check_disk = params.get("check_disk") == "yes"
    disk_model = params.get("disk_model")
    disk_target = params.get("disk_target", "vda")
    controller_model = params.get("controller_model")

    check_interface = params.get("check_interface") == "yes"
    iface_type = params.get("iface_type", "network")
    iface_model = params.get("iface_model", "virtio")
    iface_params = {
        'type': iface_type,
        'model': iface_model,
        'del_addr': True,
        'source': '{"network": "default"}'
    }

    migr_vm_back = params.get("migrate_vm_back", "no") == "yes"
    status_error = "yes" == params.get("status_error", "no")
    remote_virsh_dargs = {
        'remote_ip': server_ip,
        'remote_user': server_user,
        'remote_pwd': server_pwd,
        'unprivileged_user': None,
        'ssh_remote_auth': True
    }

    xml_check_after_mig = params.get("guest_xml_check_after_mig", None)

    err_msg = params.get("err_msg")
    vm_session = None
    remote_virsh_session = None
    vm = None
    mig_result = None

    if not libvirt_version.version_compare(5, 0, 0):
        test.cancel("This libvirt version doesn't support "
                    "virtio-transitional model.")
    # Make sure all of parameters are assigned a valid value
    check_parameters(test, params)

    # params for migration connection
    params["virsh_migrate_desturi"] = libvirt_vm.complete_uri(
        params.get("migrate_dest_host"))
    params["virsh_migrate_connect_uri"] = libvirt_vm.complete_uri(
        params.get("migrate_source_host"))
    src_uri = params.get("virsh_migrate_connect_uri")
    dest_uri = params.get("virsh_migrate_desturi")

    vm_name = params.get("migrate_main_vm")
    vm = env.get_vm(vm_name)
    vm.verify_alive()

    # For safety reasons, we'd better back up  xmlfile.
    new_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    orig_config_xml = new_xml.copy()

    migrate_setup = libvirt.MigrationTest()

    try:
        # Create a remote runner for later use
        runner_on_target = remote.RemoteRunner(host=server_ip,
                                               username=server_user,
                                               password=server_pwd)
        # download guest source and update interface model to keep guest up
        if guest_src_url:
            blk_source = download.get_file(guest_src_url, guest_src_path)
            if not blk_source:
                test.error("Fail to download image.")
            params["blk_source_name"] = blk_source
            if (not check_interface) and iface_model:
                iface_dict = {'model': iface_model}
                libvirt.modify_vm_iface(vm_name, "update_iface", iface_dict)
            if not check_disk:
                params["disk_model"] = "virtio-transitional"

        if check_interface:
            libvirt.modify_vm_iface(vm_name, "update_iface", iface_params)
        # Change the disk of the vm
        libvirt.set_vm_disk(vm, params)

        if not vm.is_alive():
            vm.start()

        logging.debug("Guest xml after starting:\n%s",
                      vm_xml.VMXML.new_from_dumpxml(vm_name))

        # Check local guest network connection before migration
        vm_session = vm.wait_for_login(restart_network=True)
        check_vm_network_accessed()

        # Execute migration process
        migration_test = libvirt.MigrationTest()
        vms = [vm]
        migration_test.do_migration(vms,
                                    None,
                                    dest_uri,
                                    'orderly',
                                    options,
                                    thread_timeout=900,
                                    ignore_status=True,
                                    virsh_opt=virsh_options,
                                    extra_opts=extra)
        mig_result = migration_test.ret

        check_migration_res(mig_result)

        if int(mig_result.exit_status) == 0:
            server_session = remote.wait_for_login('ssh', server_ip, '22',
                                                   server_user, server_pwd,
                                                   r"[\#\$]\s*$")
            check_vm_network_accessed(server_session)
            server_session.close()

        if xml_check_after_mig:
            if not remote_virsh_session:
                remote_virsh_session = virsh.VirshPersistent(
                    **remote_virsh_dargs)
            target_guest_dumpxml = results_stdout_52lts(
                remote_virsh_session.dumpxml(vm_name,
                                             debug=True,
                                             ignore_status=True)).strip()
            if check_disk:
                check_str = disk_model if disk_model else controller_model
            if check_interface:
                check_str = iface_model
            xml_check_after_mig = "%s'%s'" % (xml_check_after_mig, check_str)
            if not re.search(xml_check_after_mig, target_guest_dumpxml):
                test.fail("Fail to search '%s' in target guest XML:\n%s" %
                          (xml_check_after_mig, target_guest_dumpxml))
            remote_virsh_session.close_session()

        # Execute migration from remote
        if migr_vm_back:
            ssh_connection = utils_conn.SSHConnection(server_ip=client_ip,
                                                      server_pwd=client_pwd,
                                                      client_ip=server_ip,
                                                      client_pwd=server_pwd)
            try:
                ssh_connection.conn_check()
            except utils_conn.ConnectionError:
                ssh_connection.conn_setup()
                ssh_connection.conn_check()

            # Pre migration setup for local machine
            migrate_setup.migrate_pre_setup(src_uri, params)
            cmd = "virsh migrate %s %s %s" % (vm_name, virsh_options, src_uri)
            logging.debug("Start migration: %s", cmd)
            cmd_result = remote.run_remote_cmd(cmd, params, runner_on_target)
            logging.info(cmd_result)
            if cmd_result.exit_status:
                test.fail("Failed to run '%s' on remote: %s" %
                          (cmd, cmd_result))

    finally:
        logging.debug("Recover test environment")
        # Clean VM on destination
        vm.connect_uri = ''
        migration_test.cleanup_dest_vm(vm, src_uri, dest_uri)

        logging.info("Recovery VM XML configration")
        orig_config_xml.sync()
        logging.debug("The current VM XML:\n%s", orig_config_xml.xmltreefile)

        # Clean up of pre migration setup for local machine
        if migr_vm_back:
            if 'ssh_connection' in locals():
                ssh_connection.auto_recover = True
            migrate_setup = libvirt.MigrationTest()
            migrate_setup.migrate_pre_setup(src_uri, params, cleanup=True)
        if remote_virsh_session:
            remote_virsh_session.close_session()

        logging.info("Remove local NFS image")
        source_file = params.get("source_file")
        libvirt.delete_local_disk("file", path=source_file)
        if guest_src_url and blk_source:
            libvirt.delete_local_disk("file", path=blk_source)
示例#15
0
def run(test, params, env):
    """
    Test remote access with TCP, TLS connection
    """
    test_dict = dict(params)
    vm_name = test_dict.get("main_vm")
    vm = env.get_vm(vm_name)
    start_vm = test_dict.get("start_vm", "no")

    # Server and client parameters
    server_ip = test_dict.get("server_ip")
    server_user = test_dict.get("server_user")
    server_pwd = test_dict.get("server_pwd")
    client_ip = test_dict.get("client_ip")
    client_user = test_dict.get("client_user")
    client_pwd = test_dict.get("client_pwd")
    server_cn = test_dict.get("server_cn")
    client_cn = test_dict.get("client_cn")
    target_ip = test_dict.get("target_ip", "")
    # generate remote IP
    if target_ip == "":
        if server_cn:
            target_ip = server_cn
        elif server_ip:
            target_ip = server_ip
        else:
            target_ip = target_ip
    remote_virsh_dargs = {'remote_ip': server_ip, 'remote_user': server_user,
                          'remote_pwd': server_pwd, 'unprivileged_user': None,
                          'ssh_remote_auth': True}

    # Ceph disk parameters
    driver = test_dict.get("test_driver", "qemu")
    transport = test_dict.get("transport")
    plus = test_dict.get("conn_plus", "+")
    source_type = test_dict.get("vm_disk_source_type", "file")
    virsh_options = test_dict.get("virsh_options", "--verbose --live")
    vol_name = test_dict.get("vol_name")
    disk_src_protocol = params.get("disk_source_protocol")
    source_file = test_dict.get("disk_source_file")
    disk_format = test_dict.get("disk_format", "qcow2")
    mon_host = params.get("mon_host")
    ceph_key_opt = ""
    attach_disk = False
    # Disk XML file
    disk_xml = None
    # Define ceph_disk conditional variable
    ceph_disk = "yes" == test_dict.get("ceph_disk")

    # For --postcopy enable
    postcopy_options = test_dict.get("postcopy_options")
    if postcopy_options and not virsh_options.count(postcopy_options):
        virsh_options = "%s %s" % (virsh_options, postcopy_options)
        test_dict['virsh_options'] = virsh_options

    # For bi-directional and tls reverse test
    uri_port = test_dict.get("uri_port", ":22")
    uri_path = test_dict.get("uri_path", "/system")
    src_uri = test_dict.get("migration_source_uri", "qemu:///system")
    uri = "%s%s%s://%s%s%s" % (driver, plus, transport,
                               target_ip, uri_port, uri_path)
    test_dict["desuri"] = uri

    # Make sure all of parameters are assigned a valid value
    check_parameters(test, test_dict)
    # Set up SSH key

    #ssh_key.setup_ssh_key(server_ip, server_user, server_pwd, port=22)
    remote_session = remote.wait_for_login('ssh', server_ip, '22',
                                           server_user, server_pwd,
                                           r"[\#\$]\s*$")
    remote_session.close()
    #ssh_key.setup_ssh_key(server_ip, server_user, server_pwd, port=22)

    # Set up remote ssh key and remote /etc/hosts file for bi-direction migration
    migr_vm_back = "yes" == test_dict.get("migrate_vm_back", "no")
    if migr_vm_back:
        ssh_key.setup_remote_ssh_key(server_ip, server_user, server_pwd)
        ssh_key.setup_remote_known_hosts_file(client_ip,
                                              server_ip,
                                              server_user,
                                              server_pwd)
    # Reset Vm state if needed
    if vm.is_alive() and start_vm == "no":
        vm.destroy(gracefully=False)

    # Back up xml file.
    vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)

    # Setup migration context
    migrate_setup = libvirt.MigrationTest()
    migrate_setup.migrate_pre_setup(test_dict["desuri"], params)

    # Install ceph-common on remote host machine.
    remote_ssh_session = remote.remote_login("ssh", server_ip, "22", server_user,
                                             server_pwd, r"[\#\$]\s*$")
    if not utils_package.package_install(["ceph-common"], remote_ssh_session):
        test.error("Failed ot install required packages on remote host")
    remote_ssh_session.close()
    try:
        # Create a remote runner for later use
        runner_on_target = remote.RemoteRunner(host=server_ip,
                                               username=server_user,
                                               password=server_pwd)
        # Get initial Selinux config flex bit
        LOCAL_SELINUX_ENFORCING_STATUS = utils_selinux.get_status()
        logging.info("previous local enforce :%s", LOCAL_SELINUX_ENFORCING_STATUS)
        cmd_result = remote.run_remote_cmd('getenforce', params, runner_on_target)
        REMOTE_SELINUX_ENFORCING_STATUS = cmd_result.stdout_text
        logging.info("previous remote enforce :%s", REMOTE_SELINUX_ENFORCING_STATUS)

        if ceph_disk:
            logging.info("Put local SELinux in permissive mode when test ceph migrating")
            utils_selinux.set_status("enforcing")

            logging.info("Put remote SELinux in permissive mode")
            cmd = "setenforce enforcing"
            cmd_result = remote.run_remote_cmd(cmd, params, runner_on_target)
            status, output = cmd_result.exit_status, cmd_result.stdout_text.strip()
            if status:
                test.Error("Failed to set SELinux "
                           "in permissive mode")

            # Prepare ceph disk.
            key_file = os.path.join(data_dir.get_tmp_dir(), "ceph.key")
            test_dict['key_file'] = key_file
            test_dict['first_disk'] = vm.get_first_disk_devices()
            ceph_key_opt, secret_uuid = prepare_ceph_disk(test_dict, remote_virsh_dargs, test, runner_on_target)
            host_ip = test_dict.get('mon_host')
            disk_image = test_dict.get('disk_img')

            # Build auth information.
            auth_attrs = {}
            auth_attrs['auth_user'] = params.get("auth_user")
            auth_attrs['secret_type'] = params.get("secret_type")
            auth_attrs['secret_uuid'] = secret_uuid
            build_disk_xml(vm_name, disk_format, host_ip, disk_src_protocol,
                           vol_name, disk_image, auth=auth_attrs)

            vm_xml_cxt = process.run("virsh dumpxml %s" % vm_name, shell=True).stdout_text
            logging.debug("The VM XML with ceph disk source: \n%s", vm_xml_cxt)
            try:
                if vm.is_dead():
                    vm.start()
            except virt_vm.VMStartError as e:
                logging.info("Failed to start VM")
                test.fail("Failed to start VM: %s" % vm_name)

        # Ensure the same VM name doesn't exist on remote host before migrating.
        destroy_vm_cmd = "virsh destroy %s" % vm_name
        remote.run_remote_cmd(cmd, params, runner_on_target)

        # Trigger migration
        migrate_vm(test, test_dict)

        if migr_vm_back:
            ssh_connection = utils_conn.SSHConnection(server_ip=client_ip,
                                                      server_pwd=client_pwd,
                                                      client_ip=server_ip,
                                                      client_pwd=server_pwd)
            try:
                ssh_connection.conn_check()
            except utils_conn.ConnectionError:
                ssh_connection.conn_setup()
                ssh_connection.conn_check()
            # Pre migration setup for local machine
            migrate_setup.migrate_pre_setup(src_uri, params)
            cmd = "virsh migrate %s %s %s" % (vm_name,
                                              virsh_options, src_uri)
            logging.debug("Start migrating: %s", cmd)
            cmd_result = remote.run_remote_cmd(cmd, params, runner_on_target)
            status, output = cmd_result.exit_status, cmd_result.stdout_text.strip()
            logging.info(output)
            if status:
                destroy_cmd = "virsh destroy %s" % vm_name
                remote.run_remote_cmd(cmd, params, runner_on_target)
                test.fail("Failed to run '%s' on remote: %s"
                          % (cmd, output))
    finally:
        logging.info("Recovery test environment")
        # Clean up of pre migration setup for local machine
        if migr_vm_back:
            migrate_setup.migrate_pre_setup(src_uri, params,
                                            cleanup=True)
        # Ensure VM can be cleaned up on remote host even migrating fail.
        destroy_vm_cmd = "virsh destroy %s" % vm_name
        remote.run_remote_cmd(cmd, params, runner_on_target)

        logging.info("Recovery VM XML configration")
        vmxml_backup.sync()
        logging.debug("The current VM XML:\n%s", vmxml_backup.xmltreefile)

        # Clean up ceph environment.
        if disk_src_protocol == "rbd":
            # Clean up secret
            secret_list = get_secret_list()
            if secret_list:
                for secret_uuid in secret_list:
                    virsh.secret_undefine(secret_uuid)
            # Clean up dirty secrets on remote host if testing involve in ceph auth.
            client_name = test_dict.get('client_name')
            client_key = test_dict.get("client_key")
            if client_name and client_key:
                try:
                    remote_virsh = virsh.VirshPersistent(**remote_virsh_dargs)
                    remote_dirty_secret_list = get_secret_list(remote_virsh)
                    for dirty_secret_uuid in remote_dirty_secret_list:
                        remote_virsh.secret_undefine(dirty_secret_uuid)
                except (process.CmdError, remote.SCPError) as detail:
                    test.Error(detail)
                finally:
                    remote_virsh.close_session()
            # Delete the disk if it exists.
            disk_src_name = "%s/%s" % (vol_name, test_dict.get('disk_img'))
            cmd = ("rbd -m {0} {1} info {2} && rbd -m {0} {1} rm "
                   "{2}".format(mon_host, ceph_key_opt, disk_src_name))
            process.run(cmd, ignore_status=True, shell=True)

        if LOCAL_SELINUX_ENFORCING_STATUS:
            logging.info("Restore SELinux in original mode")
            utils_selinux.set_status(LOCAL_SELINUX_ENFORCING_STATUS)
        if REMOTE_SELINUX_ENFORCING_STATUS:
            logging.info("Put remote SELinux in original mode")
            cmd = "yes yes | setenforce %s" % REMOTE_SELINUX_ENFORCING_STATUS
            remote.run_remote_cmd(cmd, params, runner_on_target)

        # Remove known hosts on local host
        cmd = "ssh-keygen -R  %s" % server_ip
        process.run(cmd, ignore_status=True, shell=True)

        # Remove known hosts on remote host
        cmd = "ssh-keygen -R  %s" % client_ip
        remote.run_remote_cmd(cmd, params, runner_on_target)
示例#16
0
def run(test, params, env):
    """
    Test command: migrate-compcache <domain> [--size <number>]

    1) Run migrate-compcache command and check return code.
    """
    vm_ref = params.get("vm_ref", "name")
    vm_name = params.get('main_vm')
    start_vm = 'yes' == params.get('start_vm', 'yes')
    pause_vm = 'yes' == params.get('pause_after_start_vm', 'no')
    expect_succeed = 'yes' == params.get('expect_succeed', 'yes')
    size_option = params.get('size_option', 'valid')
    action = params.get('compcache_action', 'get')
    vm = env.get_vm(vm_name)

    # Check if the virsh command migrate-compcache is available
    if not virsh.has_help_command('migrate-compcache'):
        raise error.TestNAError("This version of libvirt does not support "
                                "virsh command migrate-compcache")

    # Prepare the VM state if it's not correct.
    if start_vm and not vm.is_alive():
        vm.start()
    elif not start_vm and vm.is_alive():
        vm.destroy()
    if pause_vm and not vm.is_paused():
        vm.pause()

    # Setup domain reference
    if vm_ref == 'domname':
        vm_ref = vm_name

    # Setup size according to size_option:
    # minimal: Same as memory page size
    # maximal: Same as guest memory
    # empty: An empty string
    # small: One byte less than page size
    # large: Larger than guest memory
    # huge : Largest int64
    page_size = get_page_size()
    if size_option == 'minimal':
        size = str(page_size)
    elif size_option == 'maximal':
        size = str(vm.get_max_mem() * 1024)
    elif size_option == 'empty':
        size = '""'
    elif size_option == 'small':
        size = str(page_size - 1)
    elif size_option == 'large':
        # Guest memory is larger than the max mem set,
        # add 50MB to ensure size exceeds guest memory.
        size = str(vm.get_max_mem() * 1024 + 50000000)
    elif size_option == 'huge':
        size = str(2**64 - 1)
    else:
        size = size_option

    # If we need to get, just omit the size option
    if action == 'get':
        size = None

    # Run testing command
    result = virsh.migrate_compcache(vm_ref, size=size)
    logging.debug(result)

    remote_uri = params.get("jobabort_remote_uri")
    remote_host = params.get("migrate_dest_host")
    remote_user = params.get("migrate_dest_user", "root")
    remote_pwd = params.get("migrate_dest_pwd")
    check_job_compcache = False
    if not remote_host.count(
            "EXAMPLE") and size is not None and expect_succeed:
        # Config ssh autologin for remote host
        ssh_key.setup_ssh_key(remote_host, remote_user, remote_pwd, port=22)
        if vm.is_dead():
            vm.start()
        if vm.is_paused():
            vm.resume()
        vm.wait_for_login()
        # Do actual migration to verify compression cache of migrate jobs
        command = "virsh migrate %s %s --compressed" % (vm_name, remote_uri)
        p = subprocess.Popen(command,
                             shell=True,
                             stdout=subprocess.PIPE,
                             stderr=subprocess.PIPE)

        # Give enough time for starting job
        t = 0
        while t < 5:
            jobtype = vm.get_job_type()
            if "None" == jobtype:
                t += 1
                time.sleep(1)
                continue
            elif jobtype is False:
                logging.error("Get job type failed.")
                break
            else:
                logging.debug("Job started: %s", jobtype)
                break

        jobinfo = virsh.domjobinfo(vm_ref, debug=True,
                                   ignore_status=True).stdout
        check_job_compcache = True
        if p.poll():
            try:
                p.kill()
            except OSError:
                pass

        # Cleanup in case of successful migration
        utlv.MigrationTest().cleanup_dest_vm(vm, None, remote_uri)

    # Shut down the VM to make sure the compcache setting cleared
    if vm.is_alive():
        vm.destroy()

    # Check test result
    if expect_succeed:
        if result.exit_status != 0:
            raise error.TestFail(
                'Expected succeed, but failed with result:\n%s' % result)
        if check_job_compcache:
            for line in jobinfo.splitlines():
                detail = line.split(":")
                if detail[0].count("Compression cache"):
                    value = detail[-1].split()[0].strip()
                    value = int(float(value))
                    unit = detail[-1].split()[-1].strip()
                    if unit == "KiB":
                        size = int(int(size) / 1024)
                    elif unit == "MiB":
                        size = int(int(size) / 1048576)
                    elif unit == "GiB":
                        size = int(int(size) / 1073741824)
                    if value != size:
                        raise error.TestFail("Compression cache is not match"
                                             " with setted")
                    else:
                        return
            raise error.TestFail("Get compression cahce in job failed.")
    elif not expect_succeed:
        if result.exit_status == 0:
            raise error.TestFail(
                'Expected fail, but succeed with result:\n%s' % result)
    def verify_migration_speed(test, params, env):
        """
        Check if migration speed is effective with twice migration.
        """
        vms = env.get_all_vms()
        src_uri = params.get("migrate_src_uri", "qemu+ssh://EXAMPLE/system")
        dest_uri = params.get("migrate_dest_uri", "qemu+ssh://EXAMPLE/system")

        if not len(vms):
            raise error.TestNAError("Please provide migrate_vms for test.")

        if src_uri.count('///') or src_uri.count('EXAMPLE'):
            raise error.TestNAError("The src_uri '%s' is invalid" % src_uri)

        if dest_uri.count('///') or dest_uri.count('EXAMPLE'):
            raise error.TestNAError("The dest_uri '%s' is invalid" % dest_uri)

        remote_host = params.get("migrate_dest_host")
        username = params.get("migrate_dest_user", "root")
        password = params.get("migrate_dest_pwd")
        # Config ssh autologin for remote host
        ssh_key.setup_ssh_key(remote_host, username, password, port=22)

        # Check migrated vms' state
        for vm in vms:
            if vm.is_dead():
                vm.start()

        load_vm_names = params.get("load_vms").split()
        # vms for load
        load_vms = []
        for vm_name in load_vm_names:
            load_vms.append(
                libvirt_vm.VM(vm_name, params, test.bindir,
                              env.get("address_cache")))
        params["load_vms"] = load_vms

        bandwidth = int(params.get("bandwidth", "4"))
        stress_type = params.get("stress_type", "load_vms_booting")
        migration_type = params.get("migration_type", "orderly")
        thread_timeout = int(params.get("thread_timeout", "60"))
        delta = float(params.get("allowed_delta", "0.1"))
        virsh_migrate_timeout = int(params.get("virsh_migrate_timeout", "60"))
        # virsh migrate options
        virsh_migrate_options = "--live --unsafe --timeout %s" % virsh_migrate_timeout
        # Migrate vms to remote host
        mig_first = utlv.MigrationTest()
        virsh_dargs = {"debug": True}
        for vm in vms:
            set_get_speed(vm.name, bandwidth, virsh_dargs=virsh_dargs)
            vm.wait_for_login()
        utils_test.load_stress(stress_type, vms, params)
        mig_first.do_migration(vms,
                               src_uri,
                               dest_uri,
                               migration_type,
                               options=virsh_migrate_options,
                               thread_timeout=thread_timeout)
        for vm in vms:
            mig_first.cleanup_dest_vm(vm, None, dest_uri)
            # Keep it clean for second migration
            if vm.is_alive():
                vm.destroy()

        # Migrate vms again with new bandwidth
        second_bandwidth = params.get("second_bandwidth", "times")
        if second_bandwidth == "half":
            second_bandwidth = bandwidth / 2
            speed_times = 2
        elif second_bandwidth == "times":
            second_bandwidth = bandwidth * 2
            speed_times = 0.5
        elif second_bandwidth == "same":
            second_bandwidth = bandwidth
            speed_times = 1

        # Migrate again
        for vm in vms:
            if vm.is_dead():
                vm.start()
            vm.wait_for_login()
            set_get_speed(vm.name, second_bandwidth, virsh_dargs=virsh_dargs)
        utils_test.load_stress(stress_type, vms, params)
        mig_second = utlv.MigrationTest()
        mig_second.do_migration(vms,
                                src_uri,
                                dest_uri,
                                migration_type,
                                options=virsh_migrate_options,
                                thread_timeout=thread_timeout)
        for vm in vms:
            mig_second.cleanup_dest_vm(vm, None, dest_uri)

        fail_info = []
        # Check whether migration failed
        if len(fail_info):
            raise error.TestFail(fail_info)

        for vm in vms:
            first_time = mig_first.mig_time[vm.name]
            second_time = mig_second.mig_time[vm.name]
            logging.debug(
                "Migration time for %s:\n"
                "Time with Bandwidth '%s' first: %s\n"
                "Time with Bandwidth '%s' second: %s", vm.name, bandwidth,
                first_time, second_bandwidth, second_time)
            shift = float(abs(first_time * speed_times -
                              second_time)) / float(second_time)
            logging.debug("Shift:%s", shift)
            if delta < shift:
                fail_info.append(
                    "Spent time for migrating %s is intolerable." % vm.name)

        # Check again for speed result
        if len(fail_info):
            raise error.TestFail(fail_info)
def run(test, params, env):
    def check_vm_network_accessed():
        """
        The operations to the VM need to be done before migration happens
        """
        # 1. Confirm local VM can be accessed through network.
        logging.info("Check local VM network connectivity before migrating")
        s_ping, o_ping = utils_test.ping(vm.get_address(),
                                         count=10,
                                         timeout=20)
        logging.info(o_ping)
        if s_ping != 0:
            test.error("%s did not respond after %d sec." % (vm.name, 20))

    def check_virsh_command_and_option(command, option=None):
        """
        Check if virsh command exists

        :param command: the command to be checked
        :param option: the command option to be checked
        """
        msg = "This version of libvirt does not support "
        if not virsh.has_help_command(command):
            test.cancel(msg + "virsh command '%s'" % command)

        if option and not virsh.has_command_help_match(command, option):
            test.cancel(msg + "virsh command '%s' with option '%s'" %
                        (command, option))

    def do_migration(vm, dest_uri, options, extra):
        """
        Execute the migration with given parameters
        :param vm: the guest to be migrated
        :param dest_uri: the destination uri for migration
        :param options: options next to 'migrate' command
        :param extra: options in the end of the migrate command line

        :return: CmdResult object
        """
        logging.info("Sleeping 10 seconds before migration")
        time.sleep(10)
        # Migrate the guest.
        migration_res = vm.migrate(dest_uri, options, extra, **virsh_args)
        logging.info("Migration out: %s",
                     results_stdout_52lts(migration_res).strip())
        logging.info("Migration error: %s",
                     results_stderr_52lts(migration_res).strip())
        if int(migration_res.exit_status) != 0:
            logging.error("Migration failed for %s.", vm_name)
            return migration_res

        if vm.is_alive():  # vm.connect_uri was updated
            logging.info("VM is alive on destination %s.", dest_uri)
        else:
            test.fail("VM is not alive on destination %s" % dest_uri)

        # Throws exception if console shows panic message
        vm.verify_kernel_crash()
        return migration_res

    def cleanup_libvirtd_log(log_file):
        """
        Remove existing libvirtd log file on source and target host.

        :param log_file: log file with absolute path
        """
        if os.path.exists(log_file):
            logging.debug("Delete local libvirt log file '%s'", log_file)
            os.remove(log_file)
        cmd = "rm -f %s" % log_file
        logging.debug("Delete remote libvirt log file '%s'", log_file)
        run_remote_cmd(cmd)

    def cleanup_dest(vm):
        """
        Clean up the destination host environment
        when doing the uni-direction migration.

        :param vm: the guest to be cleaned up
        """
        logging.info("Cleaning up VMs on %s", vm.connect_uri)
        try:
            if virsh.domain_exists(vm.name, uri=vm.connect_uri):
                vm_state = vm.state()
                if vm_state == "paused":
                    vm.resume()
                elif vm_state == "shut off":
                    vm.start()
                vm.destroy(gracefully=False)

                if vm.is_persistent():
                    vm.undefine()

        except Exception as detail:
            logging.error("Cleaning up destination failed.\n%s", detail)

    def run_remote_cmd(cmd):
        """
        A function to run a command on remote host.

        :param cmd: the command to be executed

        :return: CmdResult object
        """
        remote_runner = remote.RemoteRunner(host=server_ip,
                                            username=server_user,
                                            password=server_pwd)
        cmdResult = remote_runner.run(cmd, ignore_status=True)
        if cmdResult.exit_status:
            test.fail("Failed to run '%s' on remote: %s" %
                      (cmd, results_stderr_52lts(cmdResult).strip()))
        return cmdResult

    def run_stress_in_vm():
        """
        The function to load stress in VM
        """
        stress_args = params.get(
            "stress_args", "--cpu 8 --io 4 "
            "--vm 2 --vm-bytes 128M "
            "--timeout 20s")
        try:
            vm_session.cmd('stress %s' % stress_args)
        except Exception as detail:
            logging.debug(detail)

    def check_timeout_postcopy(params):
        """
        Check the vm state on target host after timeout
        when --postcopy and --timeout-postcopy are used.
        The vm state is expected as running.

        :param params: the parameters used
        """
        timeout = int(params.get("timeout_postcopy", 10))
        time.sleep(timeout + 1)
        remote_virsh_session = virsh.VirshPersistent(**remote_virsh_dargs)
        vm_state = results_stdout_52lts(
            remote_virsh_session.domstate(vm_name)).strip()
        if vm_state != "running":
            remote_virsh_session.close_session()
            test.fail(
                "After timeout '%s' seconds, "
                "the vm state on target host should "
                "be 'running', but '%s' found", timeout, vm_state)
        remote_virsh_session.close_session()

    def get_usable_compress_cache(pagesize):
        """
        Get a number which is bigger than pagesize and is power of two.

        :param pagesize: the given integer
        :return: an integer satisfying the criteria
        """
        def calculate(num):
            result = num & (num - 1)
            return (result == 0)

        item = pagesize
        found = False
        while (not found):
            item += 1
            found = calculate(item)
        logging.debug(
            "%d is smallest one that is bigger than '%s' and "
            "is power of 2", item, pagesize)
        return item

    check_parameters(test, params)

    # Params for NFS shared storage
    shared_storage = params.get("migrate_shared_storage", "")
    if shared_storage == "":
        default_guest_asset = defaults.get_default_guest_os_info()['asset']
        default_guest_asset = "%s.qcow2" % default_guest_asset
        shared_storage = os.path.join(params.get("nfs_mount_dir"),
                                      default_guest_asset)
        logging.debug("shared_storage:%s", shared_storage)

    # params for migration connection
    params["virsh_migrate_desturi"] = libvirt_vm.complete_uri(
        params.get("migrate_dest_host"))
    # Params to update disk using shared storage
    params["disk_type"] = "file"
    params["disk_source_protocol"] = "netfs"
    params["mnt_path_name"] = params.get("nfs_mount_dir")

    # Local variables
    virsh_args = {"ignore_status": True, "debug": True}
    server_ip = params.get("server_ip")
    server_user = params.get("server_user", "root")
    server_pwd = params.get("server_pwd")
    extra = params.get("virsh_migrate_extra")
    options = params.get("virsh_migrate_options")
    src_uri = params.get("virsh_migrate_connect_uri")
    dest_uri = params.get("virsh_migrate_desturi")
    log_file = params.get("libvirt_log", "/var/log/libvirt/libvirtd.log")
    check_complete_job = "yes" == params.get("check_complete_job", "no")
    config_libvirtd = "yes" == params.get("config_libvirtd", "no")
    grep_str_remote_log = params.get("grep_str_remote_log", "")
    grep_str_local_log = params.get("grep_str_local_log", "")
    stress_in_vm = "yes" == params.get("stress_in_vm", "no")
    remote_virsh_dargs = {
        'remote_ip': server_ip,
        'remote_user': server_user,
        'remote_pwd': server_pwd,
        'unprivileged_user': None,
        'ssh_remote_auth': True
    }

    # For TLS
    tls_recovery = params.get("tls_auto_recovery", "yes")
    # qemu config
    qemu_conf_dict = None
    # libvirtd config
    libvirtd_conf_dict = None

    remote_virsh_session = None
    vm = None
    vm_session = None
    libvirtd_conf = None
    qemu_conf = None
    mig_result = None
    test_exception = None
    is_TestError = False
    is_TestFail = False
    is_TestSkip = False
    asynch_migration = False

    # Objects to be cleaned up in the end
    objs_list = []
    tls_obj = None

    # Local variables
    vm_name = params.get("migrate_main_vm")
    vm = env.get_vm(vm_name)
    vm.verify_alive()

    # For safety reasons, we'd better back up  xmlfile.
    orig_config_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    if not orig_config_xml:
        test.error("Backing up xmlfile failed.")

    try:
        # Change the disk of the vm to shared disk
        libvirt.set_vm_disk(vm, params)

        if extra.count("--tls"):
            qemu_conf_dict = {"migrate_tls_x509_verify": "1"}
            # Setup TLS
            tls_obj = TLSConnection(params)
            if tls_recovery == "yes":
                objs_list.append(tls_obj)
                tls_obj.auto_recover = True
                tls_obj.conn_setup()
            # Setup qemu configure
            logging.debug("Configure the qemu")
            cleanup_libvirtd_log(log_file)
            qemu_conf = libvirt.customize_libvirt_config(qemu_conf_dict,
                                                         config_type="qemu",
                                                         remote_host=True,
                                                         extra_params=params)
        # Setup libvirtd
        if config_libvirtd:
            logging.debug("Configure the libvirtd")
            cleanup_libvirtd_log(log_file)
            libvirtd_conf_dict = setup_libvirtd_conf_dict(params)
            libvirtd_conf = libvirt.customize_libvirt_config(
                libvirtd_conf_dict, remote_host=True, extra_params=params)
        if not vm.is_alive():
            vm.start()
        vm_session = vm.wait_for_login()
        check_vm_network_accessed()

        if stress_in_vm:
            pkg_name = 'stress'
            logging.debug("Check if stress tool is installed")
            pkg_mgr = utils_package.package_manager(vm_session, pkg_name)
            if not pkg_mgr.is_installed(pkg_name):
                logging.debug("Stress tool will be installed")
                if not pkg_mgr.install():
                    test.error("Package '%s' installation fails" % pkg_name)

            stress_thread = threading.Thread(target=run_stress_in_vm, args=())
            stress_thread.start()

        if extra.count("timeout-postcopy"):
            asynch_migration = True
            func_name = check_timeout_postcopy
        if extra.count("comp-xbzrle-cache"):
            cache = get_usable_compress_cache(memory.get_page_size())
            extra = "%s %s" % (extra, cache)

        # For --postcopy enable
        postcopy_options = params.get("postcopy_options")
        if postcopy_options and not extra.count(postcopy_options):
            extra = "%s %s" % (extra, postcopy_options)

        if not asynch_migration:
            mig_result = do_migration(vm, dest_uri, options, extra)
        else:
            migration_test = libvirt.MigrationTest()

            logging.debug("vm.connect_uri=%s", vm.connect_uri)
            vms = [vm]
            try:
                migration_test.do_migration(vms,
                                            None,
                                            dest_uri,
                                            'orderly',
                                            options,
                                            thread_timeout=900,
                                            ignore_status=True,
                                            func=func_name,
                                            extra_opts=extra,
                                            func_params=params)
                mig_result = migration_test.ret
            except exceptions.TestFail as fail_detail:
                test.fail(fail_detail)
            except exceptions.TestSkipError as skip_detail:
                test.cancel(skip_detail)
            except exceptions.TestError as error_detail:
                test.error(error_detail)
            except Exception as details:
                mig_result = migration_test.ret
                logging.error(details)

        if int(mig_result.exit_status) != 0:
            test.fail(results_stderr_52lts(mig_result).strip())
        if check_complete_job:
            search_str_domjobinfo = params.get("search_str_domjobinfo", None)
            opts = "--completed"
            args = vm_name + " " + opts
            check_virsh_command_and_option("domjobinfo", opts)
            jobinfo = results_stdout_52lts(
                virsh.domjobinfo(args, debug=True,
                                 ignore_status=True)).strip()
            logging.debug("Local job info on completion:\n%s", jobinfo)
            if extra.count("comp-xbzrle-cache") and search_str_domjobinfo:
                search_str_domjobinfo = "%s %s" % (search_str_domjobinfo,
                                                   cache // 1024)
            if search_str_domjobinfo:
                if not re.search(search_str_domjobinfo, jobinfo):
                    test.fail("Fail to search '%s' on local:\n%s" %
                              (search_str_domjobinfo, jobinfo))
            # Check remote host
            remote_virsh_session = virsh.VirshPersistent(**remote_virsh_dargs)
            jobinfo = results_stdout_52lts(
                remote_virsh_session.domjobinfo(args,
                                                debug=True,
                                                ignore_status=True)).strip()
            logging.debug("Remote job info on completion:\n%s", jobinfo)
            if search_str_domjobinfo:
                if not re.search(search_str_domjobinfo, jobinfo):
                    remote_virsh_session.close_session()
                    test.fail("Fail to search '%s' on remote:\n%s" %
                              (search_str_domjobinfo, jobinfo))
            remote_virsh_session.close_session()
        if grep_str_local_log:
            cmd = "grep -E '%s' %s" % (grep_str_local_log, log_file)
            cmdRes = process.run(cmd, shell=True, ignore_status=True)
            if cmdRes.exit_status:
                test.fail(results_stderr_52lts(cmdRes).strip())
        if grep_str_remote_log:
            cmd = "grep -E '%s' %s" % (grep_str_remote_log, log_file)
            run_remote_cmd(cmd)

    except exceptions.TestFail as details:
        is_TestFail = True
        test_exception = details
    except exceptions.TestSkipError as details:
        is_TestSkip = True
        test_exception = details
    except exceptions.TestError as details:
        is_TestError = True
        test_exception = details
    except Exception as details:
        test_exception = details
    finally:
        logging.debug("Recover test environment")
        try:
            # Clean VM on destination
            vm.connect_uri = dest_uri
            cleanup_dest(vm)
            vm.connect_uri = src_uri

            logging.info("Recovery VM XML configration")
            orig_config_xml.sync()
            logging.debug("The current VM XML:\n%s",
                          orig_config_xml.xmltreefile)

            if remote_virsh_session:
                remote_virsh_session.close_session()

            if extra.count("--tls"):
                logging.debug("Recover the qemu configuration")
                libvirt.customize_libvirt_config(None,
                                                 config_type="qemu",
                                                 remote_host=True,
                                                 extra_params=params,
                                                 is_recover=True,
                                                 config_object=qemu_conf)

            if config_libvirtd:
                logging.debug("Recover the libvirtd configuration")
                libvirt.customize_libvirt_config(None,
                                                 remote_host=True,
                                                 extra_params=params,
                                                 is_recover=True,
                                                 config_object=libvirtd_conf)

            logging.info("Remove local NFS image")
            source_file = params.get("source_file")
            libvirt.delete_local_disk("file", path=source_file)

            if objs_list:
                for obj in objs_list:
                    logging.debug("Clean up local objs")
                    del obj

        except Exception as exception_detail:
            if (not test_exception and not is_TestError and not is_TestFail
                    and not is_TestSkip):
                raise exception_detail
            else:
                # if any of above exceptions has been raised, only print
                # error log here to avoid of hiding the original issue
                logging.error(exception_detail)
    # Check result
    if is_TestFail:
        test.fail(test_exception)
    if is_TestSkip:
        test.cancel(test_exception)
    if is_TestError:
        test.error(test_exception)
    if not test_exception:
        logging.info("Case execution is done.")
    else:
        test.error(test_exception)
def run(test, params, env):
    """
    Test virsh migrate command.
    """
    def set_feature(vmxml, feature, value):
        """
        Set guest features for PPC

        :param state: the htm status
        :param vmxml: guest xml
        """
        features_xml = vm_xml.VMFeaturesXML()
        if feature == 'hpt':
            features_xml.hpt_resizing = value
        elif feature == 'htm':
            features_xml.htm = value
        vmxml.features = features_xml
        vmxml.sync()

    def trigger_hpt_resize(session):
        """
        Check the HPT order file and dmesg

        :param session: the session to guest

        :raise: test.fail if required message is not found
        """
        hpt_order_path = "/sys/kernel/debug/powerpc/hpt_order"
        hpt_order = session.cmd_output('cat %s' % hpt_order_path).strip()
        hpt_order = int(hpt_order)
        logging.info('Current hpt_order is %d', hpt_order)
        hpt_order += 1
        cmd = 'echo %d > %s' % (hpt_order, hpt_order_path)
        cmd_result = session.cmd_status_output(cmd)
        result = process.CmdResult(stderr=cmd_result[1],
                                   stdout=cmd_result[1],
                                   exit_status=cmd_result[0])
        libvirt.check_exit_status(result)
        dmesg = session.cmd('dmesg')
        dmesg_content = params.get('dmesg_content').split('|')
        for content in dmesg_content:
            if content % hpt_order not in dmesg:
                test.fail("'%s' is missing in dmesg" % (content % hpt_order))
            else:
                logging.info("'%s' is found in dmesg", content % hpt_order)

    def check_qemu_cmd_line(content, err_ignore=False):
        """
        Check the specified content in the qemu command line

        :param content: the desired string to search
        :param err_ignore: True to return False when fail
                           False to raise exception when fail

        :return: True if exist, False otherwise
        """
        cmd = 'ps -ef|grep qemu|grep -v grep'
        qemu_line = results_stdout_52lts(process.run(cmd, shell=True))
        if content not in qemu_line:
            if err_ignore:
                return False
            else:
                test.fail("Expected '%s' was not found in "
                          "qemu command line" % content)
        return True

    def check_vm_network_accessed(session=None):
        """
        The operations to the VM need to be done before or after
        migration happens

        :param session: The session object to the host

        :raise: test.error when ping fails
        """
        # Confirm local/remote VM can be accessed through network.
        logging.info("Check VM network connectivity")
        s_ping, _ = utils_test.ping(vm.get_address(),
                                    count=10,
                                    timeout=20,
                                    output_func=logging.debug,
                                    session=session)
        if s_ping != 0:
            if session:
                session.close()
            test.fail("%s did not respond after %d sec." % (vm.name, 20))

    def check_virsh_command_and_option(command, option=None):
        """
        Check if virsh command exists

        :param command: the command to be checked
        :param option: the command option to be checked
        """
        msg = "This version of libvirt does not support "
        if not virsh.has_help_command(command):
            test.cancel(msg + "virsh command '%s'" % command)

        if option and not virsh.has_command_help_match(command, option):
            test.cancel(msg + "virsh command '%s' with option '%s'" %
                        (command, option))

    def add_ctrls(vm_xml, dev_type="pci", dev_index="0", dev_model="pci-root"):
        """
        Add multiple devices

        :param dev_type: the type of the device to be added
        :param dev_index: the maximum index of the device to be added
        :param dev_model: the model of the device to be added
        """
        for inx in range(0, int(dev_index) + 1):
            newcontroller = Controller("controller")
            newcontroller.type = dev_type
            newcontroller.index = inx
            newcontroller.model = dev_model
            logging.debug("New device is added:\n%s", newcontroller)
            vm_xml.add_device(newcontroller)
        vm_xml.sync()

    def do_migration(vm, dest_uri, options, extra):
        """
        Execute the migration with given parameters
        :param vm: the guest to be migrated
        :param dest_uri: the destination uri for migration
        :param options: options next to 'migrate' command
        :param extra: options in the end of the migrate command line

        :return: CmdResult object
        """
        logging.info("Sleeping 10 seconds before migration")
        time.sleep(10)
        # Migrate the guest.
        migration_res = vm.migrate(dest_uri, options, extra, **virsh_args)
        logging.info("Migration out: %s",
                     results_stdout_52lts(migration_res).strip())
        logging.info("Migration error: %s",
                     results_stderr_52lts(migration_res).strip())
        if int(migration_res.exit_status) != 0:
            logging.error("Migration failed for %s.", vm_name)
            return migration_res

        if vm.is_alive():  # vm.connect_uri was updated
            logging.info("VM is alive on destination %s.", dest_uri)
        else:
            test.fail("VM is not alive on destination %s" % dest_uri)

        # Throws exception if console shows panic message
        vm.verify_kernel_crash()
        return migration_res

    def cleanup_libvirtd_log(log_file):
        """
        Remove existing libvirtd log file on source and target host.

        :param log_file: log file with absolute path
        """
        if os.path.exists(log_file):
            logging.debug("Delete local libvirt log file '%s'", log_file)
            os.remove(log_file)
        cmd = "rm -f %s" % log_file
        logging.debug("Delete remote libvirt log file '%s'", log_file)
        run_remote_cmd(cmd)

    def cleanup_dest(vm):
        """
        Clean up the destination host environment
        when doing the uni-direction migration.

        :param vm: the guest to be cleaned up
        """
        logging.info("Cleaning up VMs on %s", vm.connect_uri)
        try:
            if virsh.domain_exists(vm.name, uri=vm.connect_uri):
                vm_state = vm.state()
                if vm_state == "paused":
                    vm.resume()
                elif vm_state == "shut off":
                    vm.start()
                vm.destroy(gracefully=False)

                if vm.is_persistent():
                    vm.undefine()

        except Exception as detail:
            logging.error("Cleaning up destination failed.\n%s", detail)

    def run_remote_cmd(cmd):
        """
        A function to run a command on remote host.

        :param cmd: the command to be executed

        :return: CmdResult object
        """
        remote_runner = remote.RemoteRunner(host=server_ip,
                                            username=server_user,
                                            password=server_pwd)
        cmdResult = remote_runner.run(cmd, ignore_status=True)
        if cmdResult.exit_status:
            test.fail("Failed to run '%s' on remote: %s" %
                      (cmd, results_stderr_52lts(cmdResult).strip()))
        return cmdResult

    def run_stress_in_vm():
        """
        The function to load stress in VM
        """
        stress_args = params.get(
            "stress_args", "--cpu 8 --io 4 "
            "--vm 2 --vm-bytes 128M "
            "--timeout 20s")
        try:
            vm_session.cmd('stress %s' % stress_args)
        except Exception as detail:
            logging.debug(detail)

    def check_timeout_postcopy(params):
        """
        Check the vm state on target host after timeout
        when --postcopy and --timeout-postcopy are used.
        The vm state is expected as running.

        :param params: the parameters used
        """
        timeout = int(params.get("timeout_postcopy", 10))
        time.sleep(timeout + 1)
        remote_virsh_session = virsh.VirshPersistent(**remote_virsh_dargs)
        vm_state = results_stdout_52lts(
            remote_virsh_session.domstate(vm_name)).strip()
        if vm_state != "running":
            remote_virsh_session.close_session()
            test.fail(
                "After timeout '%s' seconds, "
                "the vm state on target host should "
                "be 'running', but '%s' found", timeout, vm_state)
        remote_virsh_session.close_session()

    def get_usable_compress_cache(pagesize):
        """
        Get a number which is bigger than pagesize and is power of two.

        :param pagesize: the given integer
        :return: an integer satisfying the criteria
        """
        def calculate(num):
            result = num & (num - 1)
            return (result == 0)

        item = pagesize
        found = False
        while (not found):
            item += 1
            found = calculate(item)
        logging.debug(
            "%d is smallest one that is bigger than '%s' and "
            "is power of 2", item, pagesize)
        return item

    check_parameters(test, params)

    # Params for NFS shared storage
    shared_storage = params.get("migrate_shared_storage", "")
    if shared_storage == "":
        default_guest_asset = defaults.get_default_guest_os_info()['asset']
        default_guest_asset = "%s.qcow2" % default_guest_asset
        shared_storage = os.path.join(params.get("nfs_mount_dir"),
                                      default_guest_asset)
        logging.debug("shared_storage:%s", shared_storage)

    # params for migration connection
    params["virsh_migrate_desturi"] = libvirt_vm.complete_uri(
        params.get("migrate_dest_host"))
    # Params to update disk using shared storage
    params["disk_type"] = "file"
    params["disk_source_protocol"] = "netfs"
    params["mnt_path_name"] = params.get("nfs_mount_dir")

    # Local variables
    virsh_args = {"ignore_status": True, "debug": True}
    server_ip = params.get("server_ip")
    server_user = params.get("server_user", "root")
    server_pwd = params.get("server_pwd")
    extra = params.get("virsh_migrate_extra")
    options = params.get("virsh_migrate_options")
    src_uri = params.get("virsh_migrate_connect_uri")
    dest_uri = params.get("virsh_migrate_desturi")
    log_file = params.get("libvirt_log", "/var/log/libvirt/libvirtd.log")
    check_complete_job = "yes" == params.get("check_complete_job", "no")
    config_libvirtd = "yes" == params.get("config_libvirtd", "no")
    contrl_index = params.get("new_contrl_index", None)
    grep_str_remote_log = params.get("grep_str_remote_log", "")
    grep_str_local_log = params.get("grep_str_local_log", "")
    stress_in_vm = "yes" == params.get("stress_in_vm", "no")
    remote_virsh_dargs = {
        'remote_ip': server_ip,
        'remote_user': server_user,
        'remote_pwd': server_pwd,
        'unprivileged_user': None,
        'ssh_remote_auth': True
    }

    hpt_resize = params.get("hpt_resize", None)
    htm_state = params.get("htm_state", None)
    qemu_check = params.get("qemu_check", None)
    xml_check_after_mig = params.get("guest_xml_check_after_mig", None)

    arch = platform.machine()
    if any([hpt_resize, contrl_index, htm_state]) and 'ppc64' not in arch:
        test.cancel("The case is PPC only.")

    # For TLS
    tls_recovery = params.get("tls_auto_recovery", "yes")
    # qemu config
    qemu_conf_dict = None
    # libvirtd config
    libvirtd_conf_dict = None

    remote_virsh_session = None
    vm = None
    vm_session = None
    libvirtd_conf = None
    qemu_conf = None
    mig_result = None
    test_exception = None
    is_TestError = False
    is_TestFail = False
    is_TestSkip = False
    asynch_migration = False

    # Objects to be cleaned up in the end
    objs_list = []
    tls_obj = None

    # Local variables
    vm_name = params.get("migrate_main_vm")
    vm = env.get_vm(vm_name)
    vm.verify_alive()

    # For safety reasons, we'd better back up  xmlfile.
    new_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    orig_config_xml = new_xml.copy()
    if not orig_config_xml:
        test.error("Backing up xmlfile failed.")

    try:
        # Change VM xml in below part
        if contrl_index:
            new_xml.remove_all_device_by_type('controller')
            logging.debug("After removing controllers, current XML:\n%s\n",
                          new_xml)
            add_ctrls(new_xml, dev_index=contrl_index)

        if extra.count("--tls"):
            qemu_conf_dict = {"migrate_tls_x509_verify": "1"}
            # Setup TLS
            tls_obj = TLSConnection(params)
            if tls_recovery == "yes":
                objs_list.append(tls_obj)
                tls_obj.auto_recover = True
                tls_obj.conn_setup()
            # Setup qemu configure
            logging.debug("Configure the qemu")
            cleanup_libvirtd_log(log_file)
            qemu_conf = libvirt.customize_libvirt_config(qemu_conf_dict,
                                                         config_type="qemu",
                                                         remote_host=True,
                                                         extra_params=params)
        # Setup libvirtd
        if config_libvirtd:
            logging.debug("Configure the libvirtd")
            cleanup_libvirtd_log(log_file)
            libvirtd_conf_dict = setup_libvirtd_conf_dict(params)
            libvirtd_conf = libvirt.customize_libvirt_config(
                libvirtd_conf_dict, remote_host=True, extra_params=params)
        if hpt_resize:
            set_feature(new_xml, 'hpt', hpt_resize)

        if htm_state:
            set_feature(new_xml, 'htm', htm_state)
        # Change the disk of the vm to shared disk and then start VM
        libvirt.set_vm_disk(vm, params)
        if not vm.is_alive():
            vm.start()

        logging.debug("Guest xml after starting:\n%s",
                      vm_xml.VMXML.new_from_dumpxml(vm_name))

        if qemu_check:
            check_content = qemu_check
            if hpt_resize:
                check_content = "%s%s" % (qemu_check, hpt_resize)
            if htm_state:
                check_content = "%s%s" % (qemu_check, htm_state)
            check_qemu_cmd_line(check_content)

        vm_session = vm.wait_for_login()
        check_vm_network_accessed()

        if hpt_resize and hpt_resize != 'disabled':
            trigger_hpt_resize(vm_session)

        if stress_in_vm:
            pkg_name = 'stress'
            logging.debug("Check if stress tool is installed")
            pkg_mgr = utils_package.package_manager(vm_session, pkg_name)
            if not pkg_mgr.is_installed(pkg_name):
                logging.debug("Stress tool will be installed")
                if not pkg_mgr.install():
                    test.error("Package '%s' installation fails" % pkg_name)

            stress_thread = threading.Thread(target=run_stress_in_vm, args=())
            stress_thread.start()

        if extra.count("timeout-postcopy"):
            asynch_migration = True
            func_name = check_timeout_postcopy
        if extra.count("comp-xbzrle-cache"):
            cache = get_usable_compress_cache(memory.get_page_size())
            extra = "%s %s" % (extra, cache)

        # For --postcopy enable
        postcopy_options = params.get("postcopy_options")
        if postcopy_options and not extra.count(postcopy_options):
            extra = "%s %s" % (extra, postcopy_options)

        if not asynch_migration:
            mig_result = do_migration(vm, dest_uri, options, extra)
        else:
            migration_test = libvirt.MigrationTest()

            logging.debug("vm.connect_uri=%s", vm.connect_uri)
            vms = [vm]
            try:
                migration_test.do_migration(vms,
                                            None,
                                            dest_uri,
                                            'orderly',
                                            options,
                                            thread_timeout=900,
                                            ignore_status=True,
                                            func=func_name,
                                            extra_opts=extra,
                                            func_params=params)
                mig_result = migration_test.ret
            except exceptions.TestFail as fail_detail:
                test.fail(fail_detail)
            except exceptions.TestSkipError as skip_detail:
                test.cancel(skip_detail)
            except exceptions.TestError as error_detail:
                test.error(error_detail)
            except Exception as details:
                mig_result = migration_test.ret
                logging.error(details)

        if int(mig_result.exit_status) != 0:
            test.fail(results_stderr_52lts(mig_result).strip())

        if check_complete_job:
            search_str_domjobinfo = params.get("search_str_domjobinfo", None)
            opts = "--completed"
            args = vm_name + " " + opts
            check_virsh_command_and_option("domjobinfo", opts)
            jobinfo = results_stdout_52lts(
                virsh.domjobinfo(args, debug=True,
                                 ignore_status=True)).strip()
            logging.debug("Local job info on completion:\n%s", jobinfo)
            if extra.count("comp-xbzrle-cache") and search_str_domjobinfo:
                search_str_domjobinfo = "%s %s" % (search_str_domjobinfo,
                                                   cache // 1024)
            if search_str_domjobinfo:
                if not re.search(search_str_domjobinfo, jobinfo):
                    test.fail("Fail to search '%s' on local:\n%s" %
                              (search_str_domjobinfo, jobinfo))
            # Check remote host
            if not remote_virsh_session:
                remote_virsh_session = virsh.VirshPersistent(
                    **remote_virsh_dargs)
            jobinfo = results_stdout_52lts(
                remote_virsh_session.domjobinfo(args,
                                                debug=True,
                                                ignore_status=True)).strip()
            logging.debug("Remote job info on completion:\n%s", jobinfo)
            if search_str_domjobinfo:
                if not re.search(search_str_domjobinfo, jobinfo):
                    remote_virsh_session.close_session()
                    test.fail("Fail to search '%s' on remote:\n%s" %
                              (search_str_domjobinfo, jobinfo))
            remote_virsh_session.close_session()

        if grep_str_local_log:
            cmd = "grep -E '%s' %s" % (grep_str_local_log, log_file)
            cmdRes = process.run(cmd, shell=True, ignore_status=True)
            if cmdRes.exit_status:
                test.fail(results_stderr_52lts(cmdRes).strip())
        if grep_str_remote_log:
            cmd = "grep -E '%s' %s" % (grep_str_remote_log, log_file)
            run_remote_cmd(cmd)

        if xml_check_after_mig:
            if not remote_virsh_session:
                remote_virsh_session = virsh.VirshPersistent(
                    **remote_virsh_dargs)
            target_guest_dumpxml = results_stdout_52lts(
                remote_virsh_session.dumpxml(vm_name,
                                             debug=True,
                                             ignore_status=True)).strip()
            if hpt_resize:
                check_str = hpt_resize
            elif htm_state:
                check_str = htm_state
            if hpt_resize or htm_state:
                xml_check_after_mig = "%s'%s'" % (xml_check_after_mig,
                                                  check_str)
                if not re.search(xml_check_after_mig, target_guest_dumpxml):
                    remote_virsh_session.close_session()
                    test.fail("Fail to search '%s' in target guest XML:\n%s" %
                              (xml_check_after_mig, target_guest_dumpxml))

            if contrl_index:
                all_ctrls = re.findall(xml_check_after_mig,
                                       target_guest_dumpxml)
                if len(all_ctrls) != int(contrl_index) + 1:
                    remote_virsh_session.close_session()
                    test.fail(
                        "%s pci-root controllers are expected in guest XML, "
                        "but found %s" %
                        (int(contrl_index) + 1, len(all_ctrls)))
            remote_virsh_session.close_session()

        server_session = remote.wait_for_login('ssh', server_ip, '22',
                                               server_user, server_pwd,
                                               r"[\#\$]\s*$")
        check_vm_network_accessed(server_session)
        server_session.close()
    except exceptions.TestFail as details:
        is_TestFail = True
        test_exception = details
    except exceptions.TestSkipError as details:
        is_TestSkip = True
        test_exception = details
    except exceptions.TestError as details:
        is_TestError = True
        test_exception = details
    except Exception as details:
        test_exception = details
    finally:
        logging.debug("Recover test environment")
        try:
            # Clean VM on destination
            vm.connect_uri = dest_uri
            cleanup_dest(vm)
            vm.connect_uri = src_uri

            logging.info("Recovery VM XML configration")
            orig_config_xml.sync()
            logging.debug("The current VM XML:\n%s",
                          orig_config_xml.xmltreefile)

            if remote_virsh_session:
                remote_virsh_session.close_session()

            if extra.count("--tls"):
                logging.debug("Recover the qemu configuration")
                libvirt.customize_libvirt_config(None,
                                                 config_type="qemu",
                                                 remote_host=True,
                                                 extra_params=params,
                                                 is_recover=True,
                                                 config_object=qemu_conf)

            if config_libvirtd:
                logging.debug("Recover the libvirtd configuration")
                libvirt.customize_libvirt_config(None,
                                                 remote_host=True,
                                                 extra_params=params,
                                                 is_recover=True,
                                                 config_object=libvirtd_conf)

            logging.info("Remove local NFS image")
            source_file = params.get("source_file")
            libvirt.delete_local_disk("file", path=source_file)

            if objs_list:
                for obj in objs_list:
                    logging.debug("Clean up local objs")
                    del obj

        except Exception as exception_detail:
            if (not test_exception and not is_TestError and not is_TestFail
                    and not is_TestSkip):
                raise exception_detail
            else:
                # if any of above exceptions has been raised, only print
                # error log here to avoid of hiding the original issue
                logging.error(exception_detail)
    # Check result
    if is_TestFail:
        test.fail(test_exception)
    if is_TestSkip:
        test.cancel(test_exception)
    if is_TestError:
        test.error(test_exception)
    if not test_exception:
        logging.info("Case execution is done.")
    else:
        test.error(test_exception)
示例#20
0
def run(test, params, env):
    """
    Test virsh migrate when disks are virtio-scsi.
    """
    def check_vm_state(vm, state):
        """
        Return True if vm is in the correct state.
        """
        try:
            actual_state = vm.state()
        except process.CmdError:
            return False
        if actual_state == state:
            return True
        else:
            return False

    def check_disks_in_vm(vm, vm_ip, disks_list=[], runner=None):
        """
        Check disks attached to vm.
        """
        fail_list = []
        while len(disks_list):
            disk = disks_list.pop()
            if runner:
                check_cmd = ("ssh %s \"dd if=/dev/urandom of=%s bs=1 "
                             "count=1024\"" % (vm_ip, disk))
                try:
                    logging.debug(runner.run(check_cmd))
                    continue
                except process.CmdError as detail:
                    logging.debug("Remote checking failed:%s", detail)
                    fail_list.append(disk)
            else:
                check_cmd = "dd if=/dev/urandom of=%s bs=1 count=1024"
                session = vm.wait_for_login()
                cs = session.cmd_status(check_cmd)
                if cs:
                    fail_list.append(disk)
                session.close()
        if len(fail_list):
            test.fail("Checking attached devices failed:%s" % fail_list)

    def get_disk_id(device):
        """
        Show disk by id.
        """
        output = process.run("ls /dev/disk/by-id/", shell=True).stdout_text
        for line in output.splitlines():
            disk_ids = line.split()
            for disk_id in disk_ids:
                disk = os.path.basename(
                    process.run("readlink %s" % disk_id,
                                shell=True).stdout_text)
                if disk == os.path.basename(device):
                    return disk_id
        return None

    def cleanup_ssh_config(vm):
        session = vm.wait_for_login()
        session.cmd("rm -f ~/.ssh/authorized_keys")
        session.cmd("rm -f ~/.ssh/id_rsa*")
        session.close()

    vm = env.get_vm(params.get("migrate_main_vm"))
    source_type = params.get("disk_source_type", "file")
    device_type = params.get("disk_device_type", "disk")
    disk_format = params.get("disk_format_type", "raw")
    if source_type == "file":
        params['added_disk_type'] = "file"
    else:
        params['added_disk_type'] = "block"
        block_device = params.get("disk_block_device", "/dev/EXAMPLE")
        if block_device.count("EXAMPLE"):
            # Prepare host parameters
            local_host = params.get("migrate_source_host", "LOCAL.EXAMPLE")
            remote_host = params.get("migrate_dest_host", "REMOTE.EXAMPLE")
            remote_user = params.get("migrate_dest_user", "root")
            remote_passwd = params.get("migrate_dest_pwd")
            if remote_host.count("EXAMPLE") or local_host.count("EXAMPLE"):
                test.cancel("Config remote or local host first.")
            rdm_params = {
                'remote_ip': remote_host,
                'remote_user': remote_user,
                'remote_pwd': remote_passwd
            }
            rdm = utils_test.RemoteDiskManager(rdm_params)
            # Try to build an iscsi device
            # For local, target is a device name
            target = utlv.setup_or_cleanup_iscsi(
                is_setup=True, is_login=True, emulated_image="emulated-iscsi")
            logging.debug("Created target: %s", target)
            try:
                # Attach this iscsi device both local and remote
                remote_device = rdm.iscsi_login_setup(local_host, target)
            except Exception as detail:
                utlv.setup_or_cleanup_iscsi(is_setup=False)
                test.error("Attach iscsi device on remote failed:%s" % detail)

            # Use id to get same path on local and remote
            block_device = get_disk_id(target)
            if block_device is None:
                rdm.iscsi_login_setup(local_host, target, is_login=False)
                utlv.setup_or_cleanup_iscsi(is_setup=False)
                test.error("Set iscsi device couldn't find id?")

    srcuri = params.get("virsh_migrate_srcuri")
    dsturi = params.get("virsh_migrate_dsturi")
    remote_ip = params.get("remote_ip")
    username = params.get("remote_user", "root")
    host_pwd = params.get("remote_pwd")
    # Connection to remote, init here for cleanup
    runner = None
    # Identify easy config. mistakes early
    warning_text = ("Migration VM %s URI %s appears problematic "
                    "this may lead to migration problems. "
                    "Consider specifying vm.connect_uri using "
                    "fully-qualified network-based style.")

    if srcuri.count('///') or srcuri.count('EXAMPLE'):
        test.cancel(warning_text % ('source', srcuri))

    if dsturi.count('///') or dsturi.count('EXAMPLE'):
        test.cancel(warning_text % ('destination', dsturi))

    # Config auto-login to remote host for migration
    ssh_key.setup_ssh_key(remote_ip, username, host_pwd)

    sys_image = vm.get_first_disk_devices()
    sys_image_source = sys_image["source"]
    sys_image_info = utils_misc.get_image_info(sys_image_source)
    logging.debug("System image information:\n%s", sys_image_info)
    sys_image_fmt = sys_image_info["format"]
    created_img_path = os.path.join(os.path.dirname(sys_image_source),
                                    "vsmimages")

    migrate_in_advance = "yes" == params.get("migrate_in_advance", "no")

    status_error = "yes" == params.get("status_error", "no")
    if source_type == "file" and device_type == "lun":
        status_error = True

    try:
        # For safety and easily reasons, we'd better define a new vm
        new_vm_name = "%s_vsmtest" % vm.name
        mig = utlv.MigrationTest()
        if vm.is_alive():
            vm.destroy()
        utlv.define_new_vm(vm.name, new_vm_name)
        vm = libvirt_vm.VM(new_vm_name, vm.params, vm.root_dir,
                           vm.address_cache)

        # Change the disk of the vm to shared disk
        # Detach exist devices
        devices = vm.get_blk_devices()
        for device in devices:
            s_detach = virsh.detach_disk(vm.name,
                                         device,
                                         "--config",
                                         debug=True)
            if not s_detach:
                test.error("Detach %s failed before test.", device)

        # Attach system image as vda
        # Then added scsi disks will be sda,sdb...
        attach_args = "--subdriver %s --config" % sys_image_fmt
        virsh.attach_disk(vm.name,
                          sys_image_source,
                          "vda",
                          attach_args,
                          debug=True)

        vms = [vm]

        def start_check_vm(vm):
            try:
                vm.start()
            except virt_vm.VMStartError as detail:
                if status_error:
                    logging.debug("Expected failure:%s", detail)
                    return None, None
                else:
                    raise
            vm.wait_for_login()

            # Confirm VM can be accessed through network.
            # And this ip will be used on remote after migration
            vm_ip = vm.get_address()
            vm_pwd = params.get("password")
            s_ping, o_ping = utils_test.ping(vm_ip, count=2, timeout=60)
            logging.info(o_ping)
            if s_ping != 0:
                test.fail("%s did not respond after several "
                          "seconds with attaching new devices." % vm.name)
            return vm_ip, vm_pwd

        options = "--live --unsafe"
        # Do migration before attaching new devices
        if migrate_in_advance:
            vm_ip, vm_pwd = start_check_vm(vm)
            cleanup_ssh_config(vm)
            mig_thread = threading.Thread(target=mig.thread_func_migration,
                                          args=(vm, dsturi, options))
            mig_thread.start()
            # Make sure migration is running
            time.sleep(2)

        # Attach other disks
        params['added_disk_target'] = "scsi"
        params['target_bus'] = "scsi"
        params['device_type'] = device_type
        params['type_name'] = source_type
        params['added_disk_format'] = disk_format
        if migrate_in_advance:
            params["attach_disk_config"] = "no"
            attach_disk_config = False
        else:
            params["attach_disk_config"] = "yes"
            attach_disk_config = True
        try:
            if source_type == "file":
                utlv.attach_disks(vm, "%s/image" % created_img_path, None,
                                  params)
            else:
                ret = utlv.attach_additional_device(vm.name,
                                                    "sda",
                                                    block_device,
                                                    params,
                                                    config=attach_disk_config)
                if ret.exit_status:
                    test.fail(ret)
        except (exceptions.TestFail, process.CmdError) as detail:
            if status_error:
                logging.debug("Expected failure:%s", detail)
                return
            else:
                raise

        if migrate_in_advance:
            mig_thread.join(60)
            if mig_thread.isAlive():
                mig.RET_LOCK.acquire()
                mig.MIGRATION = False
                mig.RET_LOCK.release()
        else:
            vm_ip, vm_pwd = start_check_vm(vm)

        # Have got expected failures when starting vm, end the test
        if vm_ip is None and status_error:
            return

        # Start checking before migration and go on checking after migration
        disks = []
        for target in list(vm.get_disk_devices().keys()):
            if target != "vda":
                disks.append("/dev/%s" % target)

        checked_count = int(params.get("checked_count", 0))
        disks_before = disks[:(checked_count // 2)]
        disks_after = disks[(checked_count // 2):checked_count]
        logging.debug(
            "Disks to be checked:\nBefore migration:%s\n"
            "After migration:%s", disks_before, disks_after)

        options = "--live --unsafe"
        if not migrate_in_advance:
            cleanup_ssh_config(vm)
            mig.do_migration(vms, None, dsturi, "orderly", options, 120)

        if mig.RET_MIGRATION:
            utils_test.check_dest_vm_network(vm, vm_ip, remote_ip, username,
                                             host_pwd)
            runner = remote.RemoteRunner(host=remote_ip,
                                         username=username,
                                         password=host_pwd)
            # After migration, config autologin to vm
            ssh_key.setup_remote_ssh_key(vm_ip, "root", vm_pwd)
            check_disks_in_vm(vm, vm_ip, disks_after, runner)

            if migrate_in_advance:
                test.fail("Migration before attaching successfully, "
                          "but not expected.")

    finally:
        # Cleanup remote vm
        if srcuri != dsturi:
            mig.cleanup_dest_vm(vm, srcuri, dsturi)
        # Cleanup created vm anyway
        if vm.is_alive():
            vm.destroy(gracefully=False)
        virsh.undefine(new_vm_name)

        # Cleanup iscsi device for block if it is necessary
        if source_type == "block":
            if params.get("disk_block_device",
                          "/dev/EXAMPLE").count("EXAMPLE"):
                rdm.iscsi_login_setup(local_host, target, is_login=False)
                utlv.setup_or_cleanup_iscsi(is_setup=False,
                                            emulated_image="emulated-iscsi")

        if runner:
            runner.session.close()
        process.run("rm -f %s/*vsmtest" % created_img_path, shell=True)
示例#21
0
def run(test, params, env):
    """
    Test command: virsh domjobabort.

    The command can abort the currently running domain job.
    1.Prepare test environment,destroy or suspend a VM.
    2.Do action to get a subprocess(dump, save, managedsave).
    3.Perform virsh domjobabort operation to abort VM's job.
    4.Recover the VM's status and wait for the subprocess over.
    5.Confirm the test result.
    """

    vm_name = params.get("main_vm", "vm1")
    vm = env.get_vm(vm_name)
    start_vm = params.get("start_vm")
    pre_vm_state = params.get("pre_vm_state", "start")
    if start_vm == "no" and vm.is_alive():
        vm.destroy()

    # Instead of "paused_after_start_vm", use "pre_vm_state".
    # After start the VM, wait for some time to make sure the job
    # can be created on this domain.
    if start_vm == "yes":
        vm.wait_for_login()
        if params.get("pre_vm_state") == "suspend":
            vm.pause()

    domid = vm.get_id()
    domuuid = vm.get_uuid()
    original_speed = virsh.migrate_getspeed(vm_name).stdout.strip()

    def get_subprocess(action, vm_name, file, remote_uri=None):
        """
        Execute background virsh command, return subprocess w/o waiting for exit()

        :param cmd : virsh command.
        :param guest_name : VM's name
        :param file_source : virsh command's file option.
        """
        args = ""
        if action == "managedsave":
            file = ""
        elif action == "migrate":
            # Slow down migration for domjobabort
            virsh.migrate_setspeed(vm_name, "1")
            file = remote_uri
            args = "--unsafe"
        command = "virsh %s %s %s %s" % (action, vm_name, file, args)
        logging.debug("Action: %s", command)
        p = subprocess.Popen(command,
                             shell=True,
                             stdout=subprocess.PIPE,
                             stderr=subprocess.PIPE)
        return p

    action = params.get("jobabort_action", "dump")
    dump_opt = params.get("dump_opt", None)
    status_error = params.get("status_error", "no")
    job = params.get("jobabort_job", "yes")
    tmp_file = os.path.join(data_dir.get_tmp_dir(), "domjobabort.tmp")
    tmp_pipe = os.path.join(data_dir.get_tmp_dir(), "domjobabort.fifo")
    vm_ref = params.get("jobabort_vm_ref")
    remote_uri = params.get("jobabort_remote_uri")
    remote_host = params.get("migrate_dest_host")
    remote_user = params.get("migrate_dest_user", "root")
    remote_pwd = params.get("migrate_dest_pwd")
    saved_data = None

    # Build job action
    if dump_opt:
        action = "dump --crash"

    if action == "managedsave":
        tmp_pipe = '/var/lib/libvirt/qemu/save/%s.save' % vm.name

    if action == "restore":
        virsh.save(vm_name, tmp_file, ignore_status=True)

    if action == "migrate":
        if remote_host.count("EXAMPLE"):
            test.cancel("Remote host should be configured " "for migrate.")
        else:
            # Config ssh autologin for remote host
            ssh_key.setup_ssh_key(remote_host,
                                  remote_user,
                                  remote_pwd,
                                  port=22)

    if vm_ref == "id":
        vm_ref = domid
    elif vm_ref == "hex_id":
        vm_ref = hex(int(domid))
    elif vm_ref == "uuid":
        vm_ref = domuuid
    elif vm_ref.find("invalid") != -1:
        vm_ref = params.get(vm_ref)
    elif vm_ref == "name":
        vm_ref = vm_name

    # Get the subprocess of VM.
    # The command's effect is to abort the currently running domain job.
    # So before do "domjobabort" action, we must create a job on the domain.
    process = None
    if job == "yes" and start_vm == "yes" and status_error == "no":
        if os.path.exists(tmp_pipe):
            os.unlink(tmp_pipe)
        os.mkfifo(tmp_pipe)

        process = get_subprocess(action, vm_name, tmp_pipe, remote_uri)

        saved_data = None
        if action == "restore":
            with open(tmp_file, 'r') as tmp_f:
                saved_data = tmp_f.read(10 * 1024 * 1024)
            f = open(tmp_pipe, 'w')
            f.write(saved_data[:1024 * 1024])
        elif action == "migrate":
            f = None
        else:
            f = open(tmp_pipe, 'rb')
            dummy = f.read(1024 * 1024).decode(locale.getpreferredencoding(),
                                               'ignore')

    # Give enough time for starting job
    t = 0
    while t < 5:
        jobtype = vm.get_job_type()
        if "None" == jobtype:
            t += 1
            time.sleep(1)
            continue
        elif jobtype is False:
            logging.error("Get job type failed.")
            break
        else:
            logging.debug("Job started: %s", jobtype)
            break
    ret = virsh.domjobabort(vm_ref, ignore_status=True, debug=True)
    status = ret.exit_status

    if process and f:
        if saved_data:
            f.write(saved_data[1024 * 1024:])
        else:
            dummy = f.read()
        f.close()

        try:
            os.unlink(tmp_pipe)
        except OSError as detail:
            logging.info("Can't remove %s: %s", tmp_pipe, detail)
        try:
            os.unlink(tmp_file)
        except OSError as detail:
            logging.info("Cant' remove %s: %s", tmp_file, detail)

    # Recover the environment.
    if pre_vm_state == "suspend":
        vm.resume()
    if process:
        if process.poll():
            try:
                process.kill()
            except OSError:
                pass

    if action == "migrate":
        # Recover migration speed
        virsh.migrate_setspeed(vm_name, original_speed)
        utlv.MigrationTest().cleanup_dest_vm(vm, None, remote_uri)

    # check status_error
    if status_error == "yes":
        if status == 0:
            test.fail("Run successfully with wrong command!")
    elif status_error == "no":
        if status != 0:
            test.fail("Run failed with right command")
    sys_image_info = utils_misc.get_image_info(sys_image_source)
    logging.debug("System image information:\n%s", sys_image_info)
    sys_image_fmt = sys_image_info["format"]
    created_img_path = os.path.join(os.path.dirname(sys_image_source),
                                    "vsmimages")

    migrate_in_advance = "yes" == params.get("migrate_in_advance", "no")

    status_error = "yes" == params.get("status_error", "no")
    if source_type == "file" and device_type == "lun":
        status_error = True

    try:
        # For safety and easily reasons, we'd better define a new vm
        new_vm_name = "%s_vsmtest" % vm.name
        mig = utlv.MigrationTest()
        if vm.is_alive():
            vm.destroy()
        utlv.define_new_vm(vm.name, new_vm_name)
        vm = libvirt_vm.VM(new_vm_name, vm.params, vm.root_dir,
                           vm.address_cache)

        # Change the disk of the vm to shared disk
        # Detach exist devices
        devices = vm.get_blk_devices()
        for device in devices:
            s_detach = virsh.detach_disk(vm.name,
                                         device,
                                         "--config",
                                         debug=True)
            if not s_detach:
示例#23
0
def run(test, params, env):
    """
    Test migration under stress.
    """
    vm_names = params.get("migration_vms").split()
    if len(vm_names) < 2:
        raise error.TestNAError("Provide enough vms for migration first.")

    src_uri = params.get("migrate_src_uri", "qemu+ssh://EXAMPLE/system")
    if src_uri.count('///') or src_uri.count('EXAMPLE'):
        raise error.TestNAError("The src_uri '%s' is invalid", src_uri)

    dest_uri = params.get("migrate_dest_uri", "qemu+ssh://EXAMPLE/system")
    if dest_uri.count('///') or dest_uri.count('EXAMPLE'):
        raise error.TestNAError("The dest_uri '%s' is invalid", dest_uri)

    # Migrated vms' instance
    vms = []
    for vm_name in vm_names:
        vms.append(
            libvirt_vm.VM(vm_name, params, test.bindir,
                          env.get("address_cache")))

    load_vm_names = params.get("load_vms").split()
    # vms for load
    load_vms = []
    for vm_name in load_vm_names:
        load_vms.append(
            libvirt_vm.VM(vm_name, params, test.bindir,
                          env.get("address_cache")))
    params['load_vms'] = load_vms

    cpu = int(params.get("smp", 1))
    memory = int(params.get("mem")) * 1024
    stress_type = params.get("migration_stress_type")
    vm_bytes = params.get("stress_vm_bytes")
    stress_args = params.get("stress_args")
    migration_type = params.get("migration_type")
    start_migration_vms = "yes" == params.get("start_migration_vms", "yes")
    thread_timeout = int(params.get("thread_timeout", 120))
    remote_host = params.get("migrate_dest_host")
    username = params.get("migrate_dest_user", "root")
    password = params.get("migrate_dest_pwd")
    prompt = params.get("shell_prompt", r"[\#\$]")

    # Set vm_bytes for start_cmd
    mem_total = utils_memory.memtotal()
    vm_reserved = len(vms) * memory
    if vm_bytes == "half":
        vm_bytes = (mem_total - vm_reserved) / 2
    elif vm_bytes == "shortage":
        vm_bytes = mem_total - vm_reserved + 524288
    if vm_bytes is not None:
        params["stress_args"] = stress_args % vm_bytes

    for vm in vms:
        # Keep vm dead for edit
        if vm.is_alive():
            vm.destroy()
        set_cpu_memory(vm.name, cpu, memory)

    try:
        vm_ipaddr = {}
        if start_migration_vms:
            for vm in vms:
                vm.start()
                vm.wait_for_login()
                vm_ipaddr[vm.name] = vm.get_address()
                # TODO: recover vm if start failed?
        # Config ssh autologin for remote host
        ssh_key.setup_ssh_key(remote_host, username, password, port=22)

        do_stress_migration(vms, src_uri, dest_uri, stress_type,
                            migration_type, params, thread_timeout)
        # Check network of vms on destination
        if start_migration_vms and migration_type != "cross":
            for vm in vms:
                check_dest_vm_network(vm, vm_ipaddr[vm.name], remote_host,
                                      username, password, prompt)
    finally:
        logging.debug("Cleanup vms...")
        for vm_name in vm_names:
            vm = libvirt_vm.VM(vm_name, params, test.bindir,
                               env.get("address_cache"))
            utlv.MigrationTest().cleanup_dest_vm(vm, None, dest_uri)
            if vm.is_alive():
                vm.destroy(gracefully=False)
        env.clean_objects()
示例#24
0
def run(test, params, env):
    """
    Test migration with glusterfs.
    """
    def create_or_clean_backend_dir(g_uri, params, session=None,
                                    is_clean=False):
        """
        Create/cleanup backend directory

        :params g_uri: glusterfs uri
        :params params: the parameters to be checked
        :params session: VM/remote session object
        :params is_cleanup: True for cleanup backend directory;
                            False for create one.
        :return: gluster_img if is_clean is equal to True
        """
        mount_point = params.get("gluster_mount_dir")
        is_symlink = params.get("gluster_create_symlink") == "yes"
        symlink_name = params.get("gluster_symlink")
        gluster_img = None
        if not is_clean:
            if not utils_misc.check_exists(mount_point, session):
                utils_misc.make_dirs(mount_point, session)

            if gluster.glusterfs_is_mounted(mount_point, session):
                gluster.glusterfs_umount(g_uri, mount_point, session)
            gluster.glusterfs_mount(g_uri, mount_point, session)

            gluster_img = os.path.join(mount_point, disk_img)
            if is_symlink:
                utils_misc.make_symlink(mount_point, symlink_name)
                utils_misc.make_symlink(mount_point, symlink_name, remote_session)
                gluster_img = os.path.join(symlink_name, disk_img)
            return gluster_img
        else:
            if is_symlink:
                utils_misc.rm_link(symlink_name, session)

            gluster.glusterfs_umount(g_uri, mount_point, session)
            if utils_misc.check_exists(mount_point, session):
                utils_misc.safe_rmdir(gluster_mount_dir, session=session)

    def do_migration(vm, dest_uri, options, extra):
        """
        Execute the migration with given parameters

        :param vm: the guest to be migrated
        :param dest_uri: the destination uri for migration
        :param options: options next to 'migrate' command
        :param extra: options in the end of the migrate command line

        :return: CmdResult object
        """
        # Migrate the guest.
        virsh_args.update({"ignore_status": True})
        migration_res = vm.migrate(dest_uri, options, extra, **virsh_args)
        if int(migration_res.exit_status) != 0:
            logging.error("Migration failed for %s.", vm_name)
            return migration_res

        if vm.is_alive():
            logging.info("VM is alive on destination %s.", dest_uri)
        else:
            test.fail("VM is not alive on destination %s" % dest_uri)

        # Throws exception if console shows panic message
        vm.verify_kernel_crash()
        return migration_res

    def check_migration_res(result):
        """
        Check if the migration result is as expected

        :param result: the output of migration
        :raise: test.fail if test is failed
        """
        if not result:
            test.error("No migration result is returned.")
        logging.info("Migration out: %s", result.stdout_text.strip())
        logging.info("Migration error: %s", result.stderr_text.strip())

        if status_error:  # Migration should fail
            if err_msg:   # Special error messages are expected
                if not re.search(err_msg, result.stderr_text.strip()):
                    test.fail("Can not find the expected patterns '%s' in "
                              "output '%s'" % (err_msg,
                                               result.stderr_text.strip()))
                else:
                    logging.debug("It is the expected error message")
            else:
                if int(result.exit_status) != 0:
                    logging.debug("Migration failure is expected result")
                else:
                    test.fail("Migration success is unexpected result")
        else:
            if int(result.exit_status) != 0:
                test.fail(result.stderr_text.strip())

    # Local variables
    virsh_args = {"debug": True}
    server_ip = params["server_ip"] = params.get("remote_ip")
    server_user = params["server_user"] = params.get("remote_user", "root")
    server_pwd = params["server_pwd"] = params.get("remote_pwd")
    client_ip = params["client_ip"] = params.get("local_ip")
    client_pwd = params["client_pwd"] = params.get("local_pwd")
    extra = params.get("virsh_migrate_extra")
    options = params.get("virsh_migrate_options")
    virsh_options = params.get("virsh_options", "--verbose --live")

    vol_name = params.get("vol_name")
    disk_format = params.get("disk_format", "qcow2")
    gluster_mount_dir = params.get("gluster_mount_dir")

    status_error = "yes" == params.get("status_error", "no")
    err_msg = params.get("err_msg")
    host_ip = params.get("gluster_server_ip", "")
    migr_vm_back = params.get("migrate_vm_back", "no") == "yes"

    selinux_local = params.get('set_sebool_local', 'yes') == "yes"
    selinux_remote = params.get('set_sebool_remote', 'no') == "yes"
    sebool_fusefs_local = params.get('set_sebool_fusefs_local', 'yes')
    sebool_fusefs_remote = params.get('set_sebool_fusefs_remote', 'yes')
    test_dict = dict(params)
    test_dict["local_boolean_varible"] = "virt_use_fusefs"
    test_dict["remote_boolean_varible"] = "virt_use_fusefs"

    remove_pkg = False
    seLinuxBool = None
    seLinuxfusefs = None
    gluster_uri = None
    mig_result = None

    # Make sure all of parameters are assigned a valid value
    check_parameters(test, params)

    # params for migration connection
    params["virsh_migrate_desturi"] = libvirt_vm.complete_uri(
                                       params.get("migrate_dest_host"))
    params["virsh_migrate_connect_uri"] = libvirt_vm.complete_uri(
                                       params.get("migrate_source_host"))
    src_uri = params.get("virsh_migrate_connect_uri")
    dest_uri = params.get("virsh_migrate_desturi")

    # For --postcopy enable
    postcopy_options = params.get("postcopy_options")
    if postcopy_options:
        virsh_options = "%s %s" % (virsh_options, postcopy_options)
        params['virsh_options'] = virsh_options

    vm_name = params.get("migrate_main_vm")
    vm = env.get_vm(vm_name)
    vm.verify_alive()

    # Back up xml file.
    new_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    orig_config_xml = new_xml.copy()

    migrate_setup = libvirt.MigrationTest()
    try:
        # Create a remote runner for later use
        runner_on_target = remote.RemoteRunner(host=server_ip,
                                               username=server_user,
                                               password=server_pwd)

        # Configure selinux
        if selinux_local or selinux_remote:
            seLinuxBool = utils_misc.SELinuxBoolean(params)
            seLinuxBool.setup()
            if sebool_fusefs_local or sebool_fusefs_remote:
                seLinuxfusefs = utils_misc.SELinuxBoolean(test_dict)
                seLinuxfusefs.setup()

        # Setup glusterfs and disk xml.
        disk_img = "gluster.%s" % disk_format
        params['disk_img'] = disk_img
        libvirt.set_vm_disk(vm, params)

        vm_xml_cxt = virsh.dumpxml(vm_name).stdout_text.strip()
        logging.debug("The VM XML with gluster disk source: \n%s", vm_xml_cxt)

        # Check if gluster server is deployed locally
        if not host_ip:
            logging.debug("Enable port 24007 and 49152:49216")
            migrate_setup.migrate_pre_setup(src_uri, params, ports="24007")
            migrate_setup.migrate_pre_setup(src_uri, params)
            gluster_uri = "{}:{}".format(client_ip, vol_name)
        else:
            gluster_uri = "{}:{}".format(host_ip, vol_name)

        remote_session = remote.wait_for_login('ssh', server_ip, '22',
                                               server_user, server_pwd,
                                               r"[\#\$]\s*$")

        if gluster_mount_dir:
            # The package 'glusterfs-fuse' is not installed on target
            # which makes issue when trying to 'mount -t glusterfs'
            pkg_name = 'glusterfs-fuse'
            logging.debug("Check if glusterfs-fuse is installed")
            pkg_mgr = utils_package.package_manager(remote_session, pkg_name)
            if not pkg_mgr.is_installed(pkg_name):
                logging.debug("glusterfs-fuse will be installed")
                if not pkg_mgr.install():
                    test.error("Package '%s' installation fails" % pkg_name)
                else:
                    remove_pkg = True

            gluster_img = create_or_clean_backend_dir(gluster_uri, params)
            create_or_clean_backend_dir(gluster_uri, params, remote_session)

            logging.debug("Gluster Image is %s", gluster_img)
            gluster_backend_disk = {'disk_source_name': gluster_img}
            # Update disk xml with gluster image in backend dir
            libvirt.set_vm_disk(vm, gluster_backend_disk)
        remote_session.close()

        mig_result = do_migration(vm, dest_uri, options, extra)
        check_migration_res(mig_result)

        if migr_vm_back:
            ssh_connection = utils_conn.SSHConnection(server_ip=client_ip,
                                                      server_pwd=client_pwd,
                                                      client_ip=server_ip,
                                                      client_pwd=server_pwd)
            try:
                ssh_connection.conn_check()
            except utils_conn.ConnectionError:
                ssh_connection.conn_setup()
                ssh_connection.conn_check()

            # Pre migration setup for local machine
            migrate_setup.migrate_pre_setup(src_uri, params)
            cmd = "virsh migrate %s %s %s" % (vm_name,
                                              virsh_options, src_uri)
            logging.debug("Start migrating: %s", cmd)
            cmd_result = remote.run_remote_cmd(cmd, params, runner_on_target)
            logging.info(cmd_result)

            if cmd_result.exit_status:
                destroy_cmd = "virsh destroy %s" % vm_name
                remote.run_remote_cmd(destroy_cmd, params, runner_on_target,
                                      ignore_status=False)
                test.fail("Failed to run '%s' on remote: %s"
                          % (cmd, cmd_result))

    finally:
        logging.info("Recovery test environment")
        orig_config_xml.sync()

        # Clean up of pre migration setup for local machine
        if migr_vm_back:
            if 'ssh_connection' in locals():
                ssh_connection.auto_recover = True
            migrate_setup.migrate_pre_setup(src_uri, params,
                                            cleanup=True)

        # Cleanup selinu configuration
        if seLinuxBool:
            seLinuxBool.cleanup()
            if seLinuxfusefs:
                seLinuxfusefs.cleanup()

        # Disable ports 24007 and 49152:49216
        if not host_ip:
            logging.debug("Disable 24007 and 49152:49216 in Firewall")
            migrate_setup.migrate_pre_setup(src_uri, params,
                                            cleanup=True, ports="24007")
            migrate_setup.migrate_pre_setup(src_uri, params,
                                            cleanup=True)

        gluster.setup_or_cleanup_gluster(False, **params)

        # Cleanup backend directory/symlink
        if gluster_mount_dir and gluster_uri:
            remote_session = remote.wait_for_login('ssh', server_ip, '22',
                                                   server_user, server_pwd,
                                                   r"[\#\$]\s*$")
            create_or_clean_backend_dir(gluster_uri, params, is_clean=True)
            create_or_clean_backend_dir(gluster_uri, params, remote_session,
                                        True)
            if remove_pkg:
                pkg_mgr = utils_package.package_manager(remote_session,
                                                        pkg_name)
                if pkg_mgr.is_installed(pkg_name):
                    logging.debug("glusterfs-fuse will be uninstalled")
                    if not pkg_mgr.remove():
                        logging.error("Package '%s' un-installation fails", pkg_name)
            remote_session.close()