Ejemplo n.º 1
0
def run(test, params, env):
    """
    Test virsh domblkerror in 2 types error
    1. unspecified error
    2. no space
    """

    if not virsh.has_help_command('domblkerror'):
        test.cancel("This version of libvirt does not support domblkerror "
                    "test")

    vm_name = params.get("main_vm", "avocado-vt-vm1")
    error_type = params.get("domblkerror_error_type")
    timeout = params.get("domblkerror_timeout", 240)
    mnt_dir = params.get("domblkerror_mnt_dir", "/home/test")
    export_file = params.get("nfs_export_file", "/etc/exports")
    img_name = params.get("domblkerror_img_name", "libvirt-disk")
    img_size = params.get("domblkerror_img_size")
    target_dev = params.get("domblkerror_target_dev", "vdb")
    pool_name = params.get("domblkerror_pool_name", "fs_pool")
    vol_name = params.get("domblkerror_vol_name", "vol1")
    ubuntu = distro.detect().name == 'Ubuntu'
    rhel = distro.detect().name == 'rhel'
    nfs_service_package = params.get("nfs_service_package", "nfs-kernel-server")
    nfs_service = None
    selinux_bool = None
    session = None
    selinux_bak = ""

    vm = env.get_vm(vm_name)
    if error_type == "unspecified error":
        selinux_local = params.get("setup_selinux_local", "yes") == "yes"
        if not ubuntu and not rhel:
            nfs_service_package = "nfs"
        elif rhel:
            nfs_service_package = "nfs-server"
        if not rhel and not utils_package.package_install(nfs_service_package):
            test.cancel("NFS package not available in host to test")
        # backup /etc/exports
        shutil.copyfile(export_file, "%s.bak" % export_file)
    # backup xml
    vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)

    try:
        # Gerenate tmp dir
        tmp_dir = data_dir.get_tmp_dir()
        img_dir = os.path.join(tmp_dir, 'images')
        if not os.path.exists(img_dir):
            os.mkdir(img_dir)
        # Generate attached disk
        process.run("qemu-img create %s %s" %
                    (os.path.join(img_dir, img_name), img_size),
                    shell=True, verbose=True)

        # Get unspecified error
        if error_type == "unspecified error":
            # In this situation, guest will attach a disk on nfs, stop nfs
            # service will cause guest paused and get unspecified error
            nfs_dir = os.path.join(tmp_dir, 'mnt')
            if not os.path.exists(nfs_dir):
                os.mkdir(nfs_dir)
            mount_opt = "rw,no_root_squash,async"
            res = libvirt.setup_or_cleanup_nfs(is_setup=True,
                                               mount_dir=nfs_dir,
                                               is_mount=False,
                                               export_options=mount_opt,
                                               export_dir=img_dir)
            if not ubuntu:
                selinux_bak = res["selinux_status_bak"]
            process.run("mount -o nolock,soft,timeo=1,retrans=1,retry=0 "
                        "127.0.0.1:%s %s" % (img_dir, nfs_dir), shell=True,
                        verbose=True)
            img_path = os.path.join(nfs_dir, img_name)
            nfs_service = Factory.create_service(nfs_service_package)
            if not ubuntu and selinux_local:
                params['set_sebool_local'] = "yes"
                params['local_boolean_varible'] = "virt_use_nfs"
                params['local_boolean_value'] = "on"
                selinux_bool = utils_misc.SELinuxBoolean(params)
                selinux_bool.setup()

        elif error_type == "no space":
            # Steps to generate no space block error:
            # 1. Prepare a iscsi disk and build fs pool with it
            # 2. Create vol with larger capacity and 0 allocation
            # 3. Attach this disk in guest
            # 4. In guest, create large image in the vol, which may cause
            # guest paused

            _pool_vol = None
            pool_target = os.path.join(tmp_dir, pool_name)
            _pool_vol = libvirt.PoolVolumeTest(test, params)
            _pool_vol.pre_pool(pool_name, "fs", pool_target, img_name,
                               image_size=img_size)
            _pool_vol.pre_vol(vol_name, "raw", "100M", "0", pool_name)
            img_path = os.path.join(pool_target, vol_name)

        # Generate disk xml
        # Guest will attach a disk with cache=none and error_policy=stop
        img_disk = Disk(type_name="file")
        img_disk.device = "disk"
        img_disk.source = img_disk.new_disk_source(
            **{'attrs': {'file': img_path}})
        img_disk.driver = {'name': "qemu",
                           'type': "raw",
                           'cache': "none",
                           'error_policy': "stop"}
        img_disk.target = {'dev': target_dev,
                           'bus': "virtio"}
        logging.debug("disk xml is %s", img_disk.xml)

        # Start guest and get session
        if not vm.is_alive():
            vm.start()
        session = vm.wait_for_login()
        # Get disk list before operation
        get_disks_cmd = "fdisk -l|grep '^Disk /dev'|cut -d: -f1|cut -d' ' -f2"
        bef_list = str(session.cmd_output(get_disks_cmd)).strip().split("\n")
        logging.debug("disk_list_debug = %s", bef_list)

        # Attach disk to guest
        ret = virsh.attach_device(vm_name, img_disk.xml)
        if ret.exit_status != 0:
            test.fail("Fail to attach device %s" % ret.stderr)
        time.sleep(2)
        logging.debug("domain xml is %s", virsh.dumpxml(vm_name))
        # get disk list after attach
        aft_list = str(session.cmd_output(get_disks_cmd)).strip().split("\n")
        logging.debug("disk list after attaching - %s", aft_list)
        # Find new disk after attach
        new_disk = "".join(list(set(bef_list) ^ set(aft_list)))
        logging.debug("new disk is %s", new_disk)

        def create_large_image():
            """
            Create large image in guest
            """
            # install dependent packages
            pkg_list = ["parted", "e2fsprogs"]
            for pkg in pkg_list:
                if not utils_package.package_install(pkg, session):
                    test.error("Failed to install dependent package %s" % pkg)

            # create partition and file system
            session.cmd("parted -s %s mklabel msdos" % new_disk)
            session.cmd("parted -s %s mkpart primary ext3 '0%%' '100%%'" %
                        new_disk)
            # mount disk and write file in it
            session.cmd("mkfs.ext3 %s1" % new_disk)
            session.cmd("mkdir -p %s && mount %s1 %s" %
                        (mnt_dir, new_disk, mnt_dir))

            # The following step may cause guest paused before it return
            try:
                session.cmd("dd if=/dev/zero of=%s/big_file bs=1024 "
                            "count=51200 && sync" % mnt_dir)
            except Exception as err:
                logging.debug("Expected Fail %s", err)
            session.close()

        create_large_image()
        if error_type == "unspecified error":
            # umount nfs to trigger error after create large image
            if nfs_service is not None:
                nfs_service.stop()
                logging.debug("nfs status is %s", nfs_service.status())

        # wait and check the guest status with timeout
        def _check_state():
            """
            Check domain state
            """
            return (vm.state() == "paused")

        if not utils_misc.wait_for(_check_state, timeout):
            # If not paused, perform one more IO operation to the mnt disk
            session = vm.wait_for_login()
            session.cmd("echo 'one more write to big file' > %s/big_file" % mnt_dir)
            if not utils_misc.wait_for(_check_state, 60):
                test.fail("Guest does not paused, it is %s now" % vm.state())
        else:
            logging.info("Now domain state changed to paused status")
            output = virsh.domblkerror(vm_name)
            if output.exit_status == 0:
                expect_result = "%s: %s" % (img_disk.target['dev'], error_type)
                if output.stdout.strip() == expect_result:
                    logging.info("Get expect result: %s", expect_result)
                else:
                    test.fail("Failed to get expect result, get %s" %
                              output.stdout.strip())
            else:
                test.fail("Fail to get domblkerror info:%s" % output.stderr)
    finally:
        logging.info("Do clean steps")
        if session:
            session.close()
        if error_type == "unspecified error":
            if nfs_service is not None:
                nfs_service.start()
            vm.destroy()
            if os.path.isfile("%s.bak" % export_file):
                shutil.move("%s.bak" % export_file, export_file)
            res = libvirt.setup_or_cleanup_nfs(is_setup=False,
                                               mount_dir=nfs_dir,
                                               export_dir=img_dir,
                                               restore_selinux=selinux_bak)
            if selinux_bool:
                selinux_bool.cleanup(keep_authorized_keys=True)
        elif error_type == "no space":
            vm.destroy()
            if _pool_vol:
                _pool_vol.cleanup_pool(pool_name, "fs", pool_target, img_name)
        vmxml_backup.sync()
        data_dir.clean_tmp_files()
Ejemplo n.º 2
0
def run(test, params, env):
    """
    Test migration with glusterfs.
    """
    def create_or_clean_backend_dir(g_uri,
                                    params,
                                    session=None,
                                    is_clean=False):
        """
        Create/cleanup backend directory

        :params g_uri: glusterfs uri
        :params params: the parameters to be checked
        :params session: VM/remote session object
        :params is_cleanup: True for cleanup backend directory;
                            False for create one.
        :return: gluster_img if is_clean is equal to True
        """
        mount_point = params.get("gluster_mount_dir")
        is_symlink = params.get("gluster_create_symlink") == "yes"
        symlink_name = params.get("gluster_symlink")
        gluster_img = None
        if not is_clean:
            if not utils_misc.check_exists(mount_point, session):
                utils_misc.make_dirs(mount_point, session)

            if gluster.glusterfs_is_mounted(mount_point, session):
                gluster.glusterfs_umount(g_uri, mount_point, session)
            gluster.glusterfs_mount(g_uri, mount_point, session)

            gluster_img = os.path.join(mount_point, disk_img)
            if is_symlink:
                utils_misc.make_symlink(mount_point, symlink_name)
                utils_misc.make_symlink(mount_point, symlink_name,
                                        remote_session)
                gluster_img = os.path.join(symlink_name, disk_img)
            return gluster_img
        else:
            if is_symlink:
                utils_misc.rm_link(symlink_name, session)

            gluster.glusterfs_umount(g_uri, mount_point, session)
            if utils_misc.check_exists(mount_point, session):
                utils_misc.safe_rmdir(gluster_mount_dir, session=session)

    # Local variables
    virsh_args = {"debug": True}
    server_ip = params["server_ip"] = params.get("remote_ip")
    server_user = params["server_user"] = params.get("remote_user", "root")
    server_pwd = params["server_pwd"] = params.get("remote_pwd")
    client_ip = params["client_ip"] = params.get("local_ip")
    client_pwd = params["client_pwd"] = params.get("local_pwd")
    extra = params.get("virsh_migrate_extra")
    options = params.get("virsh_migrate_options", "--live --p2p --verbose")
    virsh_options = params.get("virsh_options", "")

    vol_name = params.get("vol_name")
    disk_format = params.get("disk_format", "qcow2")
    gluster_mount_dir = params.get("gluster_mount_dir")

    status_error = "yes" == params.get("status_error", "no")
    err_msg = params.get("err_msg")
    host_ip = params.get("gluster_server_ip", "")
    migrate_vm_back = params.get("migrate_vm_back", "no") == "yes"

    selinux_local = params.get('set_sebool_local', 'yes') == "yes"
    selinux_remote = params.get('set_sebool_remote', 'no') == "yes"
    sebool_fusefs_local = params.get('set_sebool_fusefs_local', 'yes')
    sebool_fusefs_remote = params.get('set_sebool_fusefs_remote', 'yes')
    test_dict = dict(params)
    test_dict["local_boolean_varible"] = "virt_use_fusefs"
    test_dict["remote_boolean_varible"] = "virt_use_fusefs"
    remote_dargs = {
        'server_ip': server_ip,
        'server_user': server_user,
        'server_pwd': server_pwd,
        'file_path': "/etc/libvirt/libvirt.conf"
    }

    remove_pkg = False
    seLinuxBool = None
    seLinuxfusefs = None
    gluster_uri = None
    mig_result = None
    remove_dict = {}
    remote_libvirt_file = None
    src_libvirt_file = None

    # Make sure all of parameters are assigned a valid value
    migrate_test = migration.MigrationTest()
    migrate_test.check_parameters(params)
    extra_args = migrate_test.update_virsh_migrate_extra_args(params)

    # params for migration connection
    params["virsh_migrate_desturi"] = libvirt_vm.complete_uri(
        params.get("migrate_dest_host"))
    params["virsh_migrate_connect_uri"] = libvirt_vm.complete_uri(
        params.get("migrate_source_host"))
    src_uri = params.get("virsh_migrate_connect_uri")
    dest_uri = params.get("virsh_migrate_desturi")

    # For --postcopy enable
    postcopy_options = params.get("postcopy_options")
    if postcopy_options:
        extra = "%s %s" % (virsh_options, postcopy_options)
        func_name = virsh.migrate_postcopy

    vm_name = params.get("migrate_main_vm")
    vm = env.get_vm(vm_name)
    vm.verify_alive()

    # Back up xml file.
    new_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    orig_config_xml = new_xml.copy()

    try:
        # Create a remote runner for later use
        runner_on_target = remote.RemoteRunner(host=server_ip,
                                               username=server_user,
                                               password=server_pwd)

        # Configure selinux
        if selinux_local or selinux_remote:
            seLinuxBool = utils_misc.SELinuxBoolean(params)
            seLinuxBool.setup()
            if sebool_fusefs_local or sebool_fusefs_remote:
                seLinuxfusefs = utils_misc.SELinuxBoolean(test_dict)
                seLinuxfusefs.setup()

        # Setup glusterfs
        disk_img = "gluster.%s" % disk_format
        params['disk_img'] = disk_img
        host_ip = gluster.setup_or_cleanup_gluster(is_setup=True, **params)
        logging.debug("host ip: %s ", host_ip)

        # Check if gluster server is deployed locally
        if not host_ip:
            logging.debug("Enable port 24007 and 49152:49216")
            migrate_test.migrate_pre_setup(src_uri, params, ports="24007")
            migrate_test.migrate_pre_setup(src_uri, params)
            gluster_uri = "{}:{}".format(client_ip, vol_name)
        else:
            gluster_uri = "{}:{}".format(host_ip, vol_name)

        remote_session = remote.wait_for_login('ssh', server_ip, '22',
                                               server_user, server_pwd,
                                               r"[\#\$]\s*$")

        if gluster_mount_dir:
            # The package 'glusterfs-fuse' is not installed on target
            # which makes issue when trying to 'mount -t glusterfs'
            pkg_name = 'glusterfs-fuse'
            logging.debug("Check if glusterfs-fuse is installed")
            pkg_mgr = utils_package.package_manager(remote_session, pkg_name)
            if not pkg_mgr.is_installed(pkg_name):
                logging.debug("glusterfs-fuse will be installed")
                if not pkg_mgr.install():
                    test.error("Package '%s' installation fails" % pkg_name)
                else:
                    remove_pkg = True

            gluster_img = create_or_clean_backend_dir(gluster_uri, params)
            create_or_clean_backend_dir(gluster_uri, params, remote_session)

            # Get the image path
            image_source = vm.get_first_disk_devices()['source']
            image_info = utils_misc.get_image_info(image_source)
            if image_info["format"] == disk_format:
                disk_cmd = "cp -f %s %s" % (image_source, gluster_img)
            else:
                # Convert the disk format
                disk_cmd = ("qemu-img convert -f %s -O %s %s %s" %
                            (image_info["format"], disk_format, image_source,
                             gluster_img))
            process.run("%s; chmod a+rw %s" % (disk_cmd, gluster_mount_dir),
                        shell=True)

            logging.debug("Gluster Image is %s", gluster_img)
            gluster_backend_disk = {'disk_source_name': gluster_img}
            # Update disk xml with gluster image in backend dir
            libvirt.set_vm_disk(vm, gluster_backend_disk)
        remote_session.close()

        vm_xml_cxt = virsh.dumpxml(vm_name).stdout_text.strip()
        logging.debug("The VM XML with gluster disk source: \n%s", vm_xml_cxt)

        vm.wait_for_login().close()
        migrate_test.ping_vm(vm, params)

        remove_dict = {"do_search": '{"%s": "ssh:/"}' % dest_uri}
        src_libvirt_file = libvirt_config.remove_key_for_modular_daemon(
            remove_dict)

        vms = [vm]
        migrate_test.do_migration(vms,
                                  None,
                                  dest_uri,
                                  'orderly',
                                  options,
                                  thread_timeout=900,
                                  ignore_status=True,
                                  virsh_opt=virsh_options,
                                  extra_opts=extra,
                                  **extra_args)
        migrate_test.ping_vm(vm, params, dest_uri)

        if migrate_vm_back:
            ssh_connection = utils_conn.SSHConnection(server_ip=client_ip,
                                                      server_pwd=client_pwd,
                                                      client_ip=server_ip,
                                                      client_pwd=server_pwd)
            try:
                ssh_connection.conn_check()
            except utils_conn.ConnectionError:
                ssh_connection.conn_setup()
                ssh_connection.conn_check()

            # Pre migration setup for local machine
            migrate_test.migrate_pre_setup(src_uri, params)
            remove_dict = {"do_search": ('{"%s": "ssh:/"}' % src_uri)}
            remote_libvirt_file = libvirt_config\
                .remove_key_for_modular_daemon(remove_dict, remote_dargs)

            cmd = "virsh migrate %s %s %s %s" % (vm_name, options,
                                                 virsh_options, src_uri)
            logging.debug("Start migrating: %s", cmd)
            cmd_result = remote.run_remote_cmd(cmd, params, runner_on_target)
            logging.info(cmd_result)

            if cmd_result.exit_status:
                destroy_cmd = "virsh destroy %s" % vm_name
                remote.run_remote_cmd(destroy_cmd,
                                      params,
                                      runner_on_target,
                                      ignore_status=False)
                test.fail("Failed to run '%s' on remote: %s" %
                          (cmd, cmd_result))

    finally:
        logging.info("Recover test environment")
        migrate_test.cleanup_vm(vm, dest_uri)

        orig_config_xml.sync()

        if src_libvirt_file:
            src_libvirt_file.restore()
        if remote_libvirt_file:
            del remote_libvirt_file

        # Clean up of pre migration setup for local machine
        if migrate_vm_back:
            if 'ssh_connection' in locals():
                ssh_connection.auto_recover = True
            migrate_test.migrate_pre_setup(src_uri, params, cleanup=True)

        # Cleanup selinu configuration
        if seLinuxBool:
            seLinuxBool.cleanup()
            if seLinuxfusefs:
                seLinuxfusefs.cleanup()

        # Disable ports 24007 and 49152:49216
        if not host_ip:
            logging.debug("Disable 24007 and 49152:49216 in Firewall")
            migrate_test.migrate_pre_setup(src_uri,
                                           params,
                                           cleanup=True,
                                           ports="24007")
            migrate_test.migrate_pre_setup(src_uri, params, cleanup=True)

        gluster.setup_or_cleanup_gluster(False, **params)

        # Cleanup backend directory/symlink
        if gluster_mount_dir and gluster_uri:
            remote_session = remote.wait_for_login('ssh', server_ip, '22',
                                                   server_user, server_pwd,
                                                   r"[\#\$]\s*$")
            create_or_clean_backend_dir(gluster_uri, params, is_clean=True)
            create_or_clean_backend_dir(gluster_uri, params, remote_session,
                                        True)
            if remove_pkg:
                pkg_mgr = utils_package.package_manager(
                    remote_session, pkg_name)
                if pkg_mgr.is_installed(pkg_name):
                    logging.debug("glusterfs-fuse will be uninstalled")
                    if not pkg_mgr.remove():
                        logging.error("Package '%s' un-installation fails",
                                      pkg_name)
            remote_session.close()
def run(test, params, env):
    """
    Test virsh migrate-setmaxdowntime command.

    1) Prepare migration environment
    2) Start migration and set migrate-maxdowntime
    3) Cleanup environment(migrated vm on destination)
    4) Check result
    """
    dest_uri = params.get("virsh_migrate_dest_uri",
                          "qemu+ssh://MIGRATE_EXAMPLE/system")
    src_uri = params.get("virsh_migrate_src_uri",
                         "qemu+ssh://MIGRATE_EXAMPLE/system")
    if dest_uri.count('///') or dest_uri.count('MIGRATE_EXAMPLE'):
        raise error.TestNAError("Set your destination uri first.")
    if src_uri.count('MIGRATE_EXAMPLE'):
        raise error.TestNAError("Set your source uri first.")
    if src_uri == dest_uri:
        raise error.TestNAError("You should not set dest uri same as local.")
    vm_ref = params.get("setmmdt_vm_ref", "domname")
    pre_vm_state = params.get("pre_vm_state", "running")
    status_error = "yes" == params.get("status_error", "no")
    do_migrate = "yes" == params.get("do_migrate", "yes")
    migrate_maxdowntime = params.get("migrate_maxdowntime", 1.000)
    if (migrate_maxdowntime == ""):
        downtime = ""
    else:
        downtime = int(float(migrate_maxdowntime)) * 1000
    extra = params.get("setmmdt_extra")

    # A delay between threads
    delay_time = int(params.get("delay_time", 1))
    # timeout of threads
    thread_timeout = 180

    vm_name = params.get("migrate_main_vm")
    vm = env.get_vm(vm_name)
    domuuid = vm.get_uuid()

    grep_str_local = params.get("grep_str_from_local_libvirt_log", "")

    # For safety reasons, we'd better back up original guest xml
    orig_config_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    if not orig_config_xml:
        raise error.TestError("Backing up xmlfile failed.")

    # Params to configure libvirtd.conf
    log_file = "/var/log/libvirt/libvirtd.log"
    log_level = "1"
    log_filters = '"1:json 1:libvirt 1:qemu 1:monitor 3:remote 4:event"'
    libvirtd_conf_dict = {
        "log_level": log_level,
        "log_filters": log_filters,
        "log_outputs": '"%s:file:%s"' % (log_level, log_file)
    }

    # Update libvirtd config with new parameters
    libvirtd = utils_libvirtd.Libvirtd()
    libvirtd_conf = config_libvirt(libvirtd_conf_dict)
    libvirtd.restart()

    # Params to update disk using shared storage
    params["disk_type"] = "file"
    params["disk_source_protocol"] = "netfs"
    params["mnt_path_name"] = params.get("nfs_mount_dir")

    # Params to setup SSH connection
    params["server_ip"] = params.get("migrate_dest_host")
    params["server_pwd"] = params.get("migrate_dest_pwd")
    params["client_ip"] = params.get("migrate_source_host")
    params["client_pwd"] = params.get("migrate_source_pwd")
    params["nfs_client_ip"] = params.get("migrate_dest_host")
    params["nfs_server_ip"] = params.get("migrate_source_host")

    # Params to enable SELinux boolean on remote host
    params["remote_boolean_varible"] = "virt_use_nfs"
    params["remote_boolean_value"] = "on"
    params["set_sebool_remote"] = "yes"

    remote_host = params.get("migrate_dest_host")
    username = params.get("migrate_dest_user", "root")
    password = params.get("migrate_dest_pwd")
    # Config ssh autologin for remote host
    ssh_key.setup_ssh_key(remote_host, username, password, port=22)

    setmmdt_dargs = {'debug': True, 'ignore_status': True, 'uri': src_uri}
    migrate_dargs = {'debug': True, 'ignore_status': True}

    seLinuxBool = None
    nfs_client = None
    local_selinux_bak = ""

    try:
        # Update the disk using shared storage
        libvirt.set_vm_disk(vm, params)

        # Backup the SELinux status on local host for recovering
        local_selinux_bak = params.get("selinux_status_bak", "")

        # Configure NFS client on remote host
        nfs_client = nfs.NFSClient(params)
        nfs_client.setup()

        logging.info("Enable virt NFS SELinux boolean on target host")
        seLinuxBool = utils_misc.SELinuxBoolean(params)
        seLinuxBool.setup()

        if not vm.is_alive():
            vm.start()
        vm.wait_for_login()
        domid = vm.get_id()

        # Confirm how to reference a VM.
        if vm_ref == "domname":
            vm_ref = vm_name
        elif vm_ref == "domid":
            vm_ref = domid
        elif vm_ref == "domuuid":
            vm_ref = domuuid

        # Prepare vm state
        if pre_vm_state == "paused":
            vm.pause()
        elif pre_vm_state == "shutoff":
            vm.destroy(gracefully=False)
            # Ensure VM in 'shut off' status
            utils_misc.wait_for(lambda: vm.state() == "shut off", 30)

        # Set max migration downtime must be during migration
        # Using threads for synchronization
        threads = []
        if do_migrate:
            threads.append(
                threading.Thread(target=thread_func_live_migration,
                                 args=(vm, dest_uri, migrate_dargs)))

        threads.append(
            threading.Thread(target=thread_func_setmmdt,
                             args=(vm_ref, downtime, extra, setmmdt_dargs)))
        for thread in threads:
            thread.start()
            # Migration must be executing before setting maxdowntime
            time.sleep(delay_time)
        # Wait until thread is over
        for thread in threads:
            thread.join(thread_timeout)

        if (status_error is False or do_migrate is False):
            logging.debug("To match the expected pattern '%s' ...",
                          grep_str_local)
            cmd = "grep -E '%s' %s" % (grep_str_local, log_file)
            cmdResult = process.run(cmd, shell=True, verbose=False)
            logging.debug(cmdResult)

    finally:
        # Clean up.
        if do_migrate:
            logging.debug("Cleanup VM on remote host...")
            cleanup_dest(vm, src_uri, dest_uri)

        if orig_config_xml:
            logging.debug("Recover VM XML...")
            orig_config_xml.sync()

        if seLinuxBool:
            logging.info("Recover NFS SELinux boolean on remote host...")
            seLinuxBool.cleanup(True)

        if nfs_client:
            logging.info("Cleanup NFS client environment...")
            nfs_client.cleanup()

        logging.info("Remove the NFS image...")
        source_file = params.get("source_file")
        libvirt.delete_local_disk("file", path=source_file)

        logging.info("Cleanup NFS server environment...")
        exp_dir = params.get("export_dir")
        mount_dir = params.get("mnt_path_name")
        libvirt.setup_or_cleanup_nfs(False,
                                     export_dir=exp_dir,
                                     mount_dir=mount_dir,
                                     restore_selinux=local_selinux_bak)

        # Recover libvirtd service configuration on local
        if libvirtd_conf:
            logging.debug("Recover local libvirtd configuration...")
            libvirtd_conf.restore()
            libvirtd.restart()
            os.remove(log_file)

    # Check results.
    if status_error:
        if ret_setmmdt:
            if not do_migrate and libvirt_version.version_compare(1, 2, 9):
                # https://bugzilla.redhat.com/show_bug.cgi?id=1146618
                # Commit fe808d9 fix it and allow setting migration
                # max downtime any time since libvirt-1.2.9
                logging.info("Libvirt version is newer than 1.2.9,"
                             "Allow set maxdowntime while VM isn't migrating")
            else:
                raise error.TestFail("virsh migrate-setmaxdowntime succeed "
                                     "but not expected.")
    else:
        if do_migrate and not ret_migration:
            raise error.TestFail("Migration failed.")

        if not ret_setmmdt:
            raise error.TestFail("virsh migrate-setmaxdowntime failed.")
Ejemplo n.º 4
0
def run(test, params, env):
    """
    Test command: virsh blockcopy.

    This command can copy a disk backing image chain to dest.
    1. Positive testing
        1.1 Copy a disk to a new image file.
        1.2 Reuse existing destination copy.
        1.3 Valid blockcopy timeout and bandwidth test.
    2. Negative testing
        2.1 Copy a disk to a non-exist directory.
        2.2 Copy a disk with invalid options.
        2.3 Do block copy for a persistent domain.
    """

    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    target = params.get("target_disk", "")
    replace_vm_disk = "yes" == params.get("replace_vm_disk", "no")
    disk_source_protocol = params.get("disk_source_protocol")
    disk_type = params.get("disk_type")
    pool_name = params.get("pool_name")
    image_size = params.get("image_size")
    emu_image = params.get("emulated_image")
    copy_to_nfs = "yes" == params.get("copy_to_nfs", "no")
    mnt_path_name = params.get("mnt_path_name")
    options = params.get("blockcopy_options", "")
    bandwidth = params.get("blockcopy_bandwidth", "")
    bandwidth_byte = "yes" == params.get("bandwidth_byte", "no")
    reuse_external = "yes" == params.get("reuse_external", "no")
    persistent_vm = params.get("persistent_vm", "no")
    status_error = "yes" == params.get("status_error", "no")
    active_error = "yes" == params.get("active_error", "no")
    active_snap = "yes" == params.get("active_snap", "no")
    active_save = "yes" == params.get("active_save", "no")
    check_state_lock = "yes" == params.get("check_state_lock", "no")
    check_finish_job = "yes" == params.get("check_finish_job", "yes")
    with_shallow = "yes" == params.get("with_shallow", "no")
    with_blockdev = "yes" == params.get("with_blockdev", "no")
    setup_libvirt_polkit = "yes" == params.get('setup_libvirt_polkit')
    bug_url = params.get("bug_url", "")
    timeout = int(params.get("timeout", 1200))
    relative_path = params.get("relative_path")
    rerun_flag = 0
    blkdev_n = None
    back_n = 'blockdev-backing-iscsi'
    snapshot_external_disks = []
    snapshots_take = int(params.get("snapshots_take", '0'))
    external_disk_only_snapshot = "yes" == params.get(
        "external_disk_only_snapshot", "no")
    enable_iscsi_auth = "yes" == params.get("enable_iscsi_auth", "no")
    selinux_local = "yes" == params.get("set_sebool_local", "no")

    # Set selinux
    if selinux_local:
        selinux_bool = utils_misc.SELinuxBoolean(params)
        selinux_bool.setup()

    # Skip/Fail early
    if with_blockdev and not libvirt_version.version_compare(1, 2, 13):
        raise exceptions.TestSkipError("--blockdev option not supported in "
                                       "current version")
    if not target:
        raise exceptions.TestSkipError("Require target disk to copy")
    if setup_libvirt_polkit and not libvirt_version.version_compare(1, 1, 1):
        raise exceptions.TestSkipError("API acl test not supported in current"
                                       " libvirt version")
    if copy_to_nfs and not libvirt_version.version_compare(1, 1, 1):
        raise exceptions.TestSkipError("Bug will not fix: %s" % bug_url)
    if bandwidth_byte and not libvirt_version.version_compare(1, 3, 3):
        raise exceptions.TestSkipError("--bytes option not supported in "
                                       "current version")
    if relative_path == "yes" and not libvirt_version.version_compare(3, 0, 0):
        test.cancel(
            "Forbid using relative path or file name only is added since libvirt-3.0.0"
        )

    if "--transient-job" in options and not libvirt_version.version_compare(
            4, 5, 0):
        test.cancel(
            "--transient-job option is supported until libvirt 4.5.0 version")

    # Check the source disk
    if vm_xml.VMXML.check_disk_exist(vm_name, target):
        logging.debug("Find %s in domain %s", target, vm_name)
    else:
        raise exceptions.TestFail("Can't find %s in domain %s" %
                                  (target, vm_name))

    original_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    tmp_dir = data_dir.get_data_dir()

    # Prepare dest path params
    dest_path = params.get("dest_path", "")
    dest_format = params.get("dest_format", "")
    # Ugh... this piece of chicanery brought to you by the QemuImg which
    # will "add" the 'dest_format' extension during the check_format code.
    # So if we create the file with the extension and then remove it when
    # doing the check_format later, then we avoid erroneous failures.
    dest_extension = ""
    if dest_format != "":
        dest_extension = ".%s" % dest_format

    # Prepare for --reuse-external option
    if reuse_external:
        options += "--reuse-external --wait"
        # Set rerun_flag=1 to do blockcopy twice, and the first time created
        # file can be reused in the second time if no dest_path given
        # This will make sure the image size equal to original disk size
        if dest_path == "/path/non-exist":
            if os.path.exists(dest_path) and not os.path.isdir(dest_path):
                os.remove(dest_path)
        else:
            rerun_flag = 1

    # Prepare other options
    if dest_format == "raw":
        options += " --raw"
    if with_blockdev:
        options += " --blockdev"
    if len(bandwidth):
        options += " --bandwidth %s" % bandwidth
    if bandwidth_byte:
        options += " --bytes"
    if with_shallow:
        options += " --shallow"

    # Prepare acl options
    uri = params.get("virsh_uri")
    unprivileged_user = params.get('unprivileged_user')
    if unprivileged_user:
        if unprivileged_user.count('EXAMPLE'):
            unprivileged_user = '******'

    extra_dict = {
        'uri': uri,
        'unprivileged_user': unprivileged_user,
        'debug': True,
        'ignore_status': True,
        'timeout': timeout
    }

    libvirtd_utl = utils_libvirtd.Libvirtd('virtqemud')
    libvirtd_log_path = os.path.join(data_dir.get_tmp_dir(),
                                     "libvirt_daemons.log")
    libvirtd_conf_dict = {
        "log_filter": '"3:json 1:libvirt 1:qemu"',
        "log_outputs": '"1:file:%s"' % libvirtd_log_path
    }
    logging.debug("the libvirtd conf file content is :\n %s" %
                  libvirtd_conf_dict)
    libvirtd_conf = utl.customize_libvirt_config(libvirtd_conf_dict)

    def check_format(dest_path, dest_extension, expect):
        """
        Check the image format

        :param dest_path: Path of the copy to create
        :param expect: Expect image format
        """
        # And now because the QemuImg will add the extension for us
        # we have to remove it here.
        path_noext = dest_path.strip(dest_extension)
        params['image_name'] = path_noext
        params['image_format'] = expect
        image = qemu_storage.QemuImg(params, "/", path_noext)
        if image.get_format() == expect:
            logging.debug("%s format is %s", dest_path, expect)
        else:
            raise exceptions.TestFail("%s format is not %s" %
                                      (dest_path, expect))

    def _blockjob_and_libvirtd_chk(cmd_result):
        """
        Raise TestFail when blockcopy fail with block-job-complete error or
        blockcopy hang with state change lock.
        This is a specific bug verify, so ignore status_error here.
        """
        failure_msg = ""
        err_msg = "internal error: unable to execute QEMU command"
        err_msg += " 'block-job-complete'"
        if err_msg in cmd_result.stderr:
            failure_msg += "Virsh cmd error happened: %s\n" % err_msg
        err_pattern = "Timed out during operation: cannot acquire"
        err_pattern += " state change lock"
        ret = chk_libvirtd_log(libvirtd_log_path, err_pattern, "error")
        if ret:
            failure_msg += "Libvirtd log error happened: %s\n" % err_pattern
        if failure_msg:
            if not libvirt_version.version_compare(1, 3, 2):
                bug_url_ = "https://bugzilla.redhat.com/show_bug.cgi?id=1197592"
                failure_msg += "Hit on bug: %s " % bug_url_
            test.fail(failure_msg)

    def _make_snapshot(snapshot_numbers_take):
        """
        Make external disk snapshot

        :param snapshot_numbers_take: snapshot numbers.
        """
        for count in range(0, snapshot_numbers_take):
            snap_xml = snapshot_xml.SnapshotXML()
            snapshot_name = "blockcopy_snap"
            snap_xml.snap_name = snapshot_name + "_%s" % count
            snap_xml.description = "blockcopy snapshot"

            # Add all disks into xml file.
            vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
            disks = vmxml.devices.by_device_tag('disk')
            # Remove non-storage disk such as 'cdrom'
            for disk in disks:
                if disk.device != 'disk':
                    disks.remove(disk)
            new_disks = []
            src_disk_xml = disks[0]
            disk_xml = snap_xml.SnapDiskXML()
            disk_xml.xmltreefile = src_disk_xml.xmltreefile
            del disk_xml.device
            del disk_xml.address
            disk_xml.snapshot = "external"
            disk_xml.disk_name = disk_xml.target['dev']

            # Only qcow2 works as external snapshot file format, update it
            # here
            driver_attr = disk_xml.driver
            driver_attr.update({'type': 'qcow2'})
            disk_xml.driver = driver_attr

            new_attrs = disk_xml.source.attrs
            if 'file' in disk_xml.source.attrs:
                new_file = os.path.join(tmp_dir,
                                        "blockcopy_shallow_%s.snap" % count)
                snapshot_external_disks.append(new_file)
                new_attrs.update({'file': new_file})
                hosts = None
            elif ('dev' in disk_xml.source.attrs
                  or 'name' in disk_xml.source.attrs
                  or 'pool' in disk_xml.source.attrs):
                if (disk_xml.type_name == 'block'
                        or disk_source_protocol == 'iscsi'):
                    disk_xml.type_name = 'block'
                    if 'name' in new_attrs:
                        del new_attrs['name']
                        del new_attrs['protocol']
                    elif 'pool' in new_attrs:
                        del new_attrs['pool']
                        del new_attrs['volume']
                        del new_attrs['mode']
                    back_path = utl.setup_or_cleanup_iscsi(
                        is_setup=True,
                        is_login=True,
                        image_size="1G",
                        emulated_image=back_n)
                    emulated_iscsi.append(back_n)
                    cmd = "qemu-img create -f qcow2 %s 1G" % back_path
                    process.run(cmd, shell=True)
                    new_attrs.update({'dev': back_path})
                    hosts = None

            new_src_dict = {"attrs": new_attrs}
            if hosts:
                new_src_dict.update({"hosts": hosts})
            disk_xml.source = disk_xml.new_disk_source(**new_src_dict)

            new_disks.append(disk_xml)

            snap_xml.set_disks(new_disks)
            snapshot_xml_path = snap_xml.xml
            logging.debug("The snapshot xml is: %s" % snap_xml.xmltreefile)

            options = "--disk-only --xmlfile %s " % snapshot_xml_path

            snapshot_result = virsh.snapshot_create(vm_name,
                                                    options,
                                                    debug=True)

            if snapshot_result.exit_status != 0:
                raise exceptions.TestFail(snapshot_result.stderr)

    snap_path = ''
    save_path = ''
    emulated_iscsi = []
    nfs_cleanup = False
    try:
        # Prepare dest_path
        tmp_file = time.strftime("%Y-%m-%d-%H.%M.%S.img")
        tmp_file += dest_extension
        if not dest_path:
            if enable_iscsi_auth:
                libvirt_secret.clean_up_secrets()
                setup_auth_enabled_iscsi_disk(vm, params)
                dest_path = os.path.join(tmp_dir, tmp_file)
            elif with_blockdev:
                blkdev_n = 'blockdev-iscsi'
                dest_path = utl.setup_or_cleanup_iscsi(is_setup=True,
                                                       is_login=True,
                                                       image_size=image_size,
                                                       emulated_image=blkdev_n)
                emulated_iscsi.append(blkdev_n)
                # Make sure the new disk show up
                utils_misc.wait_for(lambda: os.path.exists(dest_path), 5)
            else:
                if copy_to_nfs:
                    tmp_dir = "%s/%s" % (tmp_dir, mnt_path_name)
                dest_path = os.path.join(tmp_dir, tmp_file)

        # Domain disk replacement with desire type
        if replace_vm_disk:
            # Calling 'set_vm_disk' is bad idea as it left lots of cleanup jobs
            # after test, such as pool, volume, nfs, iscsi and so on
            # TODO: remove this function in the future
            if disk_source_protocol == 'iscsi':
                emulated_iscsi.append(emu_image)
            if disk_source_protocol == 'netfs':
                nfs_cleanup = True
            utl.set_vm_disk(vm, params, tmp_dir, test)
            new_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)

        if with_shallow or external_disk_only_snapshot or enable_iscsi_auth:
            _make_snapshot(snapshots_take)

        # Prepare transient/persistent vm
        if persistent_vm == "no" and vm.is_persistent():
            vm.undefine()
        elif persistent_vm == "yes" and not vm.is_persistent():
            new_xml.define()

        # Run blockcopy command to create destination file
        if rerun_flag == 1:
            options1 = "--wait %s --finish --verbose" % dest_format
            if with_blockdev:
                options1 += " --blockdev"
            if with_shallow:
                options1 += " --shallow"
            cmd_result = virsh.blockcopy(vm_name, target, dest_path, options1,
                                         **extra_dict)
            status = cmd_result.exit_status
            if status != 0:
                raise exceptions.TestFail("Run blockcopy command fail: %s" %
                                          cmd_result.stdout.strip() +
                                          cmd_result.stderr)
            elif not os.path.exists(dest_path):
                raise exceptions.TestFail("Cannot find the created copy")

        if "--transient-job" in options:
            pool = ThreadPool(processes=1)
            async_result = pool.apply_async(
                blockcopy_thread, (vm_name, target, dest_path, options))
            kill_blockcopy_process()
            utl.check_blockjob(vm_name, target)
            return

        # Run the real testing command
        cmd_result = virsh.blockcopy(vm_name, target, dest_path, options,
                                     **extra_dict)

        # check BZ#1197592
        _blockjob_and_libvirtd_chk(cmd_result)
        status = cmd_result.exit_status

        if not libvirtd_utl.is_running():
            raise exceptions.TestFail("Libvirtd service is dead")

        if not status_error:
            if status == 0:
                ret = utils_misc.wait_for(
                    lambda: check_xml(vm_name, target, dest_path, options), 5)
                if not ret:
                    raise exceptions.TestFail("Domain xml not expected after"
                                              " blockcopy")
                if options.count("--bandwidth"):
                    if options.count('--bytes'):
                        bandwidth += 'B'
                    else:
                        bandwidth += 'M'
                    if not (bandwidth
                            in ['0B', '0M']) and not utl.check_blockjob(
                                vm_name, target, "bandwidth", bandwidth):
                        raise exceptions.TestFail("Check bandwidth failed")
                val = options.count("--pivot") + options.count("--finish")
                # Don't wait for job finish when using --byte option
                val += options.count('--bytes')
                if val == 0 and check_finish_job:
                    try:
                        finish_job(vm_name, target, timeout)
                    except JobTimeout as excpt:
                        raise exceptions.TestFail("Run command failed: %s" %
                                                  excpt)
                if options.count("--raw") and not with_blockdev:
                    check_format(dest_path, dest_extension, dest_format)
                if active_snap:
                    snap_path = "%s/%s.snap" % (tmp_dir, vm_name)
                    snap_opt = "--disk-only --atomic --no-metadata "
                    snap_opt += "vda,snapshot=external,file=%s" % snap_path
                    ret = virsh.snapshot_create_as(vm_name,
                                                   snap_opt,
                                                   ignore_status=True,
                                                   debug=True)
                    utl.check_exit_status(ret, active_error)
                if active_save:
                    save_path = "%s/%s.save" % (tmp_dir, vm_name)
                    ret = virsh.save(vm_name,
                                     save_path,
                                     ignore_status=True,
                                     debug=True)
                    utl.check_exit_status(ret, active_error)
                if check_state_lock:
                    # Run blockjob pivot in subprocess as it will hang
                    # for a while, run blockjob info again to check
                    # job state
                    command = "virsh blockjob %s %s --pivot" % (vm_name,
                                                                target)
                    session = aexpect.ShellSession(command)
                    ret = virsh.blockjob(vm_name, target, "--info")
                    err_info = "cannot acquire state change lock"
                    if err_info in ret.stderr:
                        raise exceptions.TestFail("Hit on bug: %s" % bug_url)
                    utl.check_exit_status(ret, status_error)
                    session.close()
            else:
                raise exceptions.TestFail(cmd_result.stdout.strip() +
                                          cmd_result.stderr)
        else:
            if status:
                logging.debug("Expect error: %s", cmd_result.stderr)
            else:
                # Commit id '4c297728' changed how virsh exits when
                # unexpectedly failing due to timeout from a fail (1)
                # to a success(0), so we need to look for a different
                # marker to indicate the copy aborted. As "stdout: Now
                # in mirroring phase" could be in stdout which fail the
                # check, so also do check in libvirtd log to confirm.
                if options.count("--timeout") and options.count("--wait"):
                    log_pattern = "Copy aborted"
                    if (re.search(log_pattern, cmd_result.stdout.strip())
                            or chk_libvirtd_log(libvirtd_log_path, log_pattern,
                                                "debug")):
                        logging.debug("Found success a timed out block copy")
                else:
                    raise exceptions.TestFail("Expect fail, but run "
                                              "successfully: %s" % bug_url)
    finally:
        # Recover VM may fail unexpectedly, we need using try/except to
        # proceed the following cleanup steps
        try:
            # Abort exist blockjob to avoid any possible lock error
            virsh.blockjob(vm_name, target, '--abort', ignore_status=True)
            vm.destroy(gracefully=False)
            # It may take a long time to shutdown the VM which has
            # blockjob running
            utils_misc.wait_for(
                lambda: virsh.domstate(vm_name, ignore_status=True).
                exit_status, 180)
            if virsh.domain_exists(vm_name):
                if active_snap or with_shallow:
                    option = "--snapshots-metadata"
                else:
                    option = None
                original_xml.sync(option)
            else:
                original_xml.define()
        except Exception as e:
            logging.error(e)
        for disk in snapshot_external_disks:
            if os.path.exists(disk):
                os.remove(disk)
        # Clean up libvirt pool, which may be created by 'set_vm_disk'
        if disk_type == 'volume':
            virsh.pool_destroy(pool_name, ignore_status=True, debug=True)
        # Restore libvirtd conf and restart libvirtd
        libvirtd_conf.restore()
        libvirtd_utl.restart()
        if libvirtd_log_path and os.path.exists(libvirtd_log_path):
            os.unlink(libvirtd_log_path)
        # Clean up NFS
        try:
            if nfs_cleanup:
                utl.setup_or_cleanup_nfs(is_setup=False)
        except Exception as e:
            logging.error(e)
        # Clean up iSCSI
        try:
            for iscsi_n in list(set(emulated_iscsi)):
                utl.setup_or_cleanup_iscsi(is_setup=False,
                                           emulated_image=iscsi_n)
                # iscsid will be restarted, so give it a break before next loop
                time.sleep(5)
        except Exception as e:
            logging.error(e)
        if os.path.exists(dest_path):
            os.remove(dest_path)
        if os.path.exists(snap_path):
            os.remove(snap_path)
        if os.path.exists(save_path):
            os.remove(save_path)
        # Restart virtlogd service to release VM log file lock
        try:
            path.find_command('virtlogd')
            process.run('systemctl reset-failed virtlogd')
            process.run('systemctl restart virtlogd ')
        except path.CmdNotFoundError:
            pass
        if selinux_local:
            selinux_bool.cleanup(keep_authorized_keys=True)
Ejemplo n.º 5
0
def run(test, params, env):
    """
    Test migration with glusterfs.
    """
    def create_or_clean_backend_dir(g_uri, params, session=None,
                                    is_clean=False):
        """
        Create/cleanup backend directory

        :params g_uri: glusterfs uri
        :params params: the parameters to be checked
        :params session: VM/remote session object
        :params is_cleanup: True for cleanup backend directory;
                            False for create one.
        :return: gluster_img if is_clean is equal to True
        """
        mount_point = params.get("gluster_mount_dir")
        is_symlink = params.get("gluster_create_symlink") == "yes"
        symlink_name = params.get("gluster_symlink")
        gluster_img = None
        if not is_clean:
            if not utils_misc.check_exists(mount_point, session):
                utils_misc.make_dirs(mount_point, session)

            if gluster.glusterfs_is_mounted(mount_point, session):
                gluster.glusterfs_umount(g_uri, mount_point, session)
            gluster.glusterfs_mount(g_uri, mount_point, session)

            gluster_img = os.path.join(mount_point, disk_img)
            if is_symlink:
                utils_misc.make_symlink(mount_point, symlink_name)
                utils_misc.make_symlink(mount_point, symlink_name, remote_session)
                gluster_img = os.path.join(symlink_name, disk_img)
            return gluster_img
        else:
            if is_symlink:
                utils_misc.rm_link(symlink_name, session)

            gluster.glusterfs_umount(g_uri, mount_point, session)
            if utils_misc.check_exists(mount_point, session):
                utils_misc.safe_rmdir(gluster_mount_dir, session=session)

    def do_migration(vm, dest_uri, options, extra):
        """
        Execute the migration with given parameters

        :param vm: the guest to be migrated
        :param dest_uri: the destination uri for migration
        :param options: options next to 'migrate' command
        :param extra: options in the end of the migrate command line

        :return: CmdResult object
        """
        # Migrate the guest.
        virsh_args.update({"ignore_status": True})
        migration_res = vm.migrate(dest_uri, options, extra, **virsh_args)
        if int(migration_res.exit_status) != 0:
            logging.error("Migration failed for %s.", vm_name)
            return migration_res

        if vm.is_alive():
            logging.info("VM is alive on destination %s.", dest_uri)
        else:
            test.fail("VM is not alive on destination %s" % dest_uri)

        # Throws exception if console shows panic message
        vm.verify_kernel_crash()
        return migration_res

    def check_migration_res(result):
        """
        Check if the migration result is as expected

        :param result: the output of migration
        :raise: test.fail if test is failed
        """
        if not result:
            test.error("No migration result is returned.")
        logging.info("Migration out: %s", result.stdout_text.strip())
        logging.info("Migration error: %s", result.stderr_text.strip())

        if status_error:  # Migration should fail
            if err_msg:   # Special error messages are expected
                if not re.search(err_msg, result.stderr_text.strip()):
                    test.fail("Can not find the expected patterns '%s' in "
                              "output '%s'" % (err_msg,
                                               result.stderr_text.strip()))
                else:
                    logging.debug("It is the expected error message")
            else:
                if int(result.exit_status) != 0:
                    logging.debug("Migration failure is expected result")
                else:
                    test.fail("Migration success is unexpected result")
        else:
            if int(result.exit_status) != 0:
                test.fail(result.stderr_text.strip())

    # Local variables
    virsh_args = {"debug": True}
    server_ip = params["server_ip"] = params.get("remote_ip")
    server_user = params["server_user"] = params.get("remote_user", "root")
    server_pwd = params["server_pwd"] = params.get("remote_pwd")
    client_ip = params["client_ip"] = params.get("local_ip")
    client_pwd = params["client_pwd"] = params.get("local_pwd")
    extra = params.get("virsh_migrate_extra")
    options = params.get("virsh_migrate_options")
    virsh_options = params.get("virsh_options", "--verbose --live")

    vol_name = params.get("vol_name")
    disk_format = params.get("disk_format", "qcow2")
    gluster_mount_dir = params.get("gluster_mount_dir")

    status_error = "yes" == params.get("status_error", "no")
    err_msg = params.get("err_msg")
    host_ip = params.get("gluster_server_ip", "")
    migr_vm_back = params.get("migrate_vm_back", "no") == "yes"

    selinux_local = params.get('set_sebool_local', 'yes') == "yes"
    selinux_remote = params.get('set_sebool_remote', 'no') == "yes"
    sebool_fusefs_local = params.get('set_sebool_fusefs_local', 'yes')
    sebool_fusefs_remote = params.get('set_sebool_fusefs_remote', 'yes')
    test_dict = dict(params)
    test_dict["local_boolean_varible"] = "virt_use_fusefs"
    test_dict["remote_boolean_varible"] = "virt_use_fusefs"

    remove_pkg = False
    seLinuxBool = None
    seLinuxfusefs = None
    gluster_uri = None
    mig_result = None

    # Make sure all of parameters are assigned a valid value
    check_parameters(test, params)

    # params for migration connection
    params["virsh_migrate_desturi"] = libvirt_vm.complete_uri(
                                       params.get("migrate_dest_host"))
    params["virsh_migrate_connect_uri"] = libvirt_vm.complete_uri(
                                       params.get("migrate_source_host"))
    src_uri = params.get("virsh_migrate_connect_uri")
    dest_uri = params.get("virsh_migrate_desturi")

    # For --postcopy enable
    postcopy_options = params.get("postcopy_options")
    if postcopy_options:
        virsh_options = "%s %s" % (virsh_options, postcopy_options)
        params['virsh_options'] = virsh_options

    vm_name = params.get("migrate_main_vm")
    vm = env.get_vm(vm_name)
    vm.verify_alive()

    # Back up xml file.
    new_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    orig_config_xml = new_xml.copy()

    migrate_setup = libvirt.MigrationTest()
    try:
        # Create a remote runner for later use
        runner_on_target = remote.RemoteRunner(host=server_ip,
                                               username=server_user,
                                               password=server_pwd)

        # Configure selinux
        if selinux_local or selinux_remote:
            seLinuxBool = utils_misc.SELinuxBoolean(params)
            seLinuxBool.setup()
            if sebool_fusefs_local or sebool_fusefs_remote:
                seLinuxfusefs = utils_misc.SELinuxBoolean(test_dict)
                seLinuxfusefs.setup()

        # Setup glusterfs and disk xml.
        disk_img = "gluster.%s" % disk_format
        params['disk_img'] = disk_img
        libvirt.set_vm_disk(vm, params)

        vm_xml_cxt = virsh.dumpxml(vm_name).stdout_text.strip()
        logging.debug("The VM XML with gluster disk source: \n%s", vm_xml_cxt)

        # Check if gluster server is deployed locally
        if not host_ip:
            logging.debug("Enable port 24007 and 49152:49216")
            migrate_setup.migrate_pre_setup(src_uri, params, ports="24007")
            migrate_setup.migrate_pre_setup(src_uri, params)
            gluster_uri = "{}:{}".format(client_ip, vol_name)
        else:
            gluster_uri = "{}:{}".format(host_ip, vol_name)

        remote_session = remote.wait_for_login('ssh', server_ip, '22',
                                               server_user, server_pwd,
                                               r"[\#\$]\s*$")

        if gluster_mount_dir:
            # The package 'glusterfs-fuse' is not installed on target
            # which makes issue when trying to 'mount -t glusterfs'
            pkg_name = 'glusterfs-fuse'
            logging.debug("Check if glusterfs-fuse is installed")
            pkg_mgr = utils_package.package_manager(remote_session, pkg_name)
            if not pkg_mgr.is_installed(pkg_name):
                logging.debug("glusterfs-fuse will be installed")
                if not pkg_mgr.install():
                    test.error("Package '%s' installation fails" % pkg_name)
                else:
                    remove_pkg = True

            gluster_img = create_or_clean_backend_dir(gluster_uri, params)
            create_or_clean_backend_dir(gluster_uri, params, remote_session)

            logging.debug("Gluster Image is %s", gluster_img)
            gluster_backend_disk = {'disk_source_name': gluster_img}
            # Update disk xml with gluster image in backend dir
            libvirt.set_vm_disk(vm, gluster_backend_disk)
        remote_session.close()

        mig_result = do_migration(vm, dest_uri, options, extra)
        check_migration_res(mig_result)

        if migr_vm_back:
            ssh_connection = utils_conn.SSHConnection(server_ip=client_ip,
                                                      server_pwd=client_pwd,
                                                      client_ip=server_ip,
                                                      client_pwd=server_pwd)
            try:
                ssh_connection.conn_check()
            except utils_conn.ConnectionError:
                ssh_connection.conn_setup()
                ssh_connection.conn_check()

            # Pre migration setup for local machine
            migrate_setup.migrate_pre_setup(src_uri, params)
            cmd = "virsh migrate %s %s %s" % (vm_name,
                                              virsh_options, src_uri)
            logging.debug("Start migrating: %s", cmd)
            cmd_result = remote.run_remote_cmd(cmd, params, runner_on_target)
            logging.info(cmd_result)

            if cmd_result.exit_status:
                destroy_cmd = "virsh destroy %s" % vm_name
                remote.run_remote_cmd(destroy_cmd, params, runner_on_target,
                                      ignore_status=False)
                test.fail("Failed to run '%s' on remote: %s"
                          % (cmd, cmd_result))

    finally:
        logging.info("Recovery test environment")
        orig_config_xml.sync()

        # Clean up of pre migration setup for local machine
        if migr_vm_back:
            if 'ssh_connection' in locals():
                ssh_connection.auto_recover = True
            migrate_setup.migrate_pre_setup(src_uri, params,
                                            cleanup=True)

        # Cleanup selinu configuration
        if seLinuxBool:
            seLinuxBool.cleanup()
            if seLinuxfusefs:
                seLinuxfusefs.cleanup()

        # Disable ports 24007 and 49152:49216
        if not host_ip:
            logging.debug("Disable 24007 and 49152:49216 in Firewall")
            migrate_setup.migrate_pre_setup(src_uri, params,
                                            cleanup=True, ports="24007")
            migrate_setup.migrate_pre_setup(src_uri, params,
                                            cleanup=True)

        gluster.setup_or_cleanup_gluster(False, **params)

        # Cleanup backend directory/symlink
        if gluster_mount_dir and gluster_uri:
            remote_session = remote.wait_for_login('ssh', server_ip, '22',
                                                   server_user, server_pwd,
                                                   r"[\#\$]\s*$")
            create_or_clean_backend_dir(gluster_uri, params, is_clean=True)
            create_or_clean_backend_dir(gluster_uri, params, remote_session,
                                        True)
            if remove_pkg:
                pkg_mgr = utils_package.package_manager(remote_session,
                                                        pkg_name)
                if pkg_mgr.is_installed(pkg_name):
                    logging.debug("glusterfs-fuse will be uninstalled")
                    if not pkg_mgr.remove():
                        logging.error("Package '%s' un-installation fails", pkg_name)
            remote_session.close()