예제 #1
0
def do_stress_migration(vms, srcuri, desturi, migration_type, test, params,
                        thread_timeout=60):
    """
    Migrate vms with stress.

    :param vms: migrated vms.
    :param srcuri: connect uri for source machine
    :param desturi: connect uri for destination machine
    :param migration_type: type of migration to be performed
    :param params: Test dict params
    :param thread_timeout: default timeout for migration thread

    :raise: test.fail if migration fails
    """
    migrate_setup = migration.MigrationTest()
    options = params.get("migrate_options")
    ping_count = int(params.get("ping_count", 10))
    migrate_times = 1
    migrate_back = params.get("virsh_migrate_back", "no") == "yes"
    if migrate_back:
        migrate_times = int(params.get("virsh_migrate_there_and_back", 1))
    uptime = {}

    for vm in vms:
        uptime[vm.name] = vm.uptime()
        logging.info("uptime of VM %s: %s", vm.name, uptime[vm.name])
        migrate_setup.ping_vm(vm, params, ping_count=ping_count)
    logging.debug("Starting migration...")
    migrate_options = ("%s --timeout %s"
                       % (options, params.get("virsh_migrate_timeout", 60)))
    for each_time in range(migrate_times):
        logging.debug("Migrating vms from %s to %s for %s time", srcuri,
                      desturi, each_time + 1)
        try:
            migrate_setup.do_migration(vms, srcuri, desturi, migration_type,
                                       options=migrate_options,
                                       thread_timeout=thread_timeout)
        except Exception as info:
            test.fail(info)

        uptime = migrate_setup.post_migration_check(vms, params, uptime,
                                                    uri=desturi)
        if migrate_back and "cross" not in migration_type:
            migrate_setup.migrate_pre_setup(srcuri, params)
            logging.debug("Migrating back to source from %s to %s for %s time",
                          desturi, srcuri, each_time + 1)
            try:
                migrate_setup.do_migration(vms, desturi, srcuri, migration_type,
                                           options=migrate_options,
                                           thread_timeout=thread_timeout,
                                           virsh_uri=desturi)
            except Exception as info:
                test.fail(info)
            uptime = migrate_setup.post_migration_check(vms, params, uptime)
            migrate_setup.migrate_pre_setup(srcuri, params, cleanup=True)
예제 #2
0
def copied_migration(test, vms, vms_ip, params):
    """
    Migrate vms with storage copied.

    :param test: The test object
    :param vms: VM list
    :param vms_ip: Dict for VM's IP
    :param params: The parameters for the migration
    :return: MigrationTest object
    :raise: test.fail if anything goes wrong
    """
    dest_uri = params.get("migrate_dest_uri")
    remote_host = params.get("migrate_dest_host")
    copy_option = params.get("copy_storage_option", "")
    username = params.get("migrate_dest_user", "root")
    password = params.get("migrate_dest_pwd")
    timeout = int(params.get("thread_timeout", 1200))
    status_error = params.get("status_error", "no")
    options = "--live %s" % copy_option

    cp_mig = migration.MigrationTest()
    cp_mig.do_migration(vms,
                        None,
                        dest_uri,
                        "orderly",
                        options,
                        timeout,
                        ignore_status=True,
                        status_error=status_error)
    check_ip_failures = []

    if cp_mig.RET_MIGRATION:
        for vm in vms:
            try:
                utils_test.check_dest_vm_network(vm, vms_ip[vm.name],
                                                 remote_host, username,
                                                 password)
            except exceptions.TestFail as detail:
                check_ip_failures.append(str(detail))
    else:
        for vm in vms:
            cp_mig.cleanup_dest_vm(vm, None, dest_uri)
        check_output(test, str(cp_mig.ret), params)
        test.fail("Migrate vms with storage copied failed.")
    if len(check_ip_failures):
        test.fail("Check IP failed:%s" % check_ip_failures)

    return cp_mig
예제 #3
0
def run_migration_back(params, test):
    """
    Execute migration back from target host to source host

    :param params: dict, test parameters
    :param test: test object
    """
    migrate_vm_back = "yes" == params.get("migrate_vm_back", "no")
    vm_name = params.get("migrate_main_vm")
    options = params.get("virsh_migrate_options", "--live --verbose")
    if migrate_vm_back:
        ssh_connection = utils_conn.SSHConnection(
            server_ip=params.get("client_ip"),
            server_pwd=params.get("local_pwd"),
            client_ip=params.get("server_ip"),
            client_pwd=params.get("server_pwd"))
        try:
            ssh_connection.conn_check()
        except utils_conn.ConnectionError:
            ssh_connection.conn_setup()
            ssh_connection.conn_check()

        # Pre migration setup for local machine
        src_full_uri = libvirt_vm.complete_uri(
            params.get("migrate_source_host"))

        migration_test = migration.MigrationTest()
        migration_test.migrate_pre_setup(src_full_uri, params)
        cmd = "virsh migrate %s %s %s" % (vm_name, options, src_full_uri)
        test.log.debug("Start migration: %s", cmd)
        runner_on_target = remote.RemoteRunner(
            host=params.get("remote_ip"),
            username=params.get("remote_user"),
            password=params.get("remote_pwd"))
        cmd_result = remote.run_remote_cmd(cmd, params, runner_on_target)
        test.log.info(cmd_result)
        if cmd_result.exit_status:
            destroy_cmd = "virsh destroy %s" % vm_name
            remote.run_remote_cmd(destroy_cmd,
                                  params,
                                  runner_on_target,
                                  ignore_status=False)
            test.fail("Failed to run '%s' on remote: %s" % (cmd, cmd_result))
    else:
        test.log.debug("No need to migrate back")
예제 #4
0
def cleanup_test_default(vm, params, test):
    """
    Cleanup steps by default

    :param vm:  VM object
    :param params: dict, test parameters
    :param test: test object
    """
    test.log.debug("Recover test environment")
    # Clean VM on destination and source
    try:
        migration_test = migration.MigrationTest()
        dest_uri = params.get("virsh_migrate_desturi")

        migration_test.cleanup_dest_vm(vm, vm.connect_uri, dest_uri)
        if vm.is_alive():
            vm.destroy(gracefully=False)
    except Exception as err:
        test.log.error(err)
예제 #5
0
def run(test, params, env):
    """
    Test migration under stress.
    """
    vm_names = params.get("vms").split()
    if len(vm_names) < 2:
        test.cancel("Provide enough vms for migration")

    src_uri = "qemu:///system"
    dest_uri = libvirt_vm.complete_uri(params.get("migrate_dest_host",
                                                  "EXAMPLE"))
    if dest_uri.count('///') or dest_uri.count('EXAMPLE'):
        test.cancel("The dest_uri '%s' is invalid" % dest_uri)

    # Migrated vms' instance
    vms = env.get_all_vms()
    params["load_vms"] = list(vms)

    cpu = int(params.get("smp", 1))
    memory = int(params.get("mem")) * 1024
    stress_tool = params.get("stress_tool", "")
    remote_stress = params.get("migration_stress_remote", "no") == "yes"
    host_stress = params.get("migration_stress_host", "no") == "yes"
    vms_stress = params.get("migration_stress_vms", "no") == "yes"
    vm_bytes = params.get("stress_vm_bytes", "128M")
    stress_args = params.get("%s_args" % stress_tool)
    migration_type = params.get("migration_type")
    start_migration_vms = params.get("start_migration_vms", "yes") == "yes"
    thread_timeout = int(params.get("thread_timeout", 120))
    ubuntu_dep = ['build-essential', 'git']
    hstress = rstress = None
    vstress = {}

    # Set vm_bytes for start_cmd
    mem_total = utils_memory.memtotal()
    vm_reserved = len(vms) * memory
    if vm_bytes == "half":
        vm_bytes = (mem_total - vm_reserved) / 2
    elif vm_bytes == "shortage":
        vm_bytes = mem_total - vm_reserved + 524288
    if "vm-bytes" in stress_args:
        params["%s_args" % stress_tool] = stress_args % vm_bytes

    # Ensure stress tool is available in host
    if host_stress:
        # remove package manager installed tool to avoid conflict
        if not utils_package.package_remove(stress_tool):
            logging.error("Existing %s is not removed")
        if "stress-ng" in stress_tool and 'Ubuntu' in utils_misc.get_distro():
            params['stress-ng_dependency_packages_list'] = ubuntu_dep
        try:
            hstress = utils_test.HostStress(stress_tool, params)
            hstress.load_stress_tool()
        except utils_test.StressError as info:
            test.error(info)

    if remote_stress:
        try:
            server_ip = params['remote_ip']
            server_pwd = params['remote_pwd']
            server_user = params.get('remote_user', 'root')
            remote_session = remote.wait_for_login('ssh', server_ip, '22', server_user,
                                                   server_pwd, r"[\#\$]\s*$")
            # remove package manager installed tool to avoid conflict
            if not utils_package.package_remove(stress_tool, session=remote_session):
                logging.error("Existing %s is not removed")
            if("stess-ng" in stress_tool and
               'Ubuntu' in utils_misc.get_distro(session=remote_session)):
                params['stress-ng_dependency_packages_list'] = ubuntu_dep

            rstress = utils_test.HostStress(stress_tool, params, remote_server=True)
            rstress.load_stress_tool()
            remote_session.close()
        except utils_test.StressError as info:
            remote_session.close()
            test.error(info)

    for vm in vms:
        # Keep vm dead for edit
        if vm.is_alive():
            vm.destroy()
        set_cpu_memory(vm.name, cpu, memory)

    try:
        if start_migration_vms:
            for vm in vms:
                vm.start()
                session = vm.wait_for_login()
                # remove package manager installed tool to avoid conflict
                if not utils_package.package_remove(stress_tool, session=session):
                    logging.error("Existing %s is not removed")
                # configure stress in VM
                if vms_stress:
                    if("stress-ng" in stress_tool and
                       'Ubuntu' in utils_misc.get_distro(session=session)):
                        params['stress-ng_dependency_packages_list'] = ubuntu_dep
                    try:
                        vstress[vm.name] = utils_test.VMStress(vm, stress_tool, params)
                        vstress[vm.name].load_stress_tool()
                    except utils_test.StressError as info:
                        session.close()
                        test.error(info)
                session.close()

        do_stress_migration(vms, src_uri, dest_uri, migration_type, test,
                            params, thread_timeout)
    finally:
        logging.debug("Cleanup vms...")
        for vm in vms:
            migration.MigrationTest().cleanup_dest_vm(vm, None, dest_uri)
            # Try to start vms in source once vms in destination are
            # cleaned up
            if not vm.is_alive():
                vm.start()
                vm.wait_for_login()
            try:
                if vstress[vm.name]:
                    vstress[vm.name].unload_stress()
            except KeyError:
                continue

        if rstress:
            rstress.unload_stress()

        if hstress:
            hstress.unload_stress()
예제 #6
0
def run(test, params, env):
    """
    Test storage migration
    1) Do storage migration(copy-storage-all/copy-storage-inc) with
    TLS encryption - NBD transport
    2) Cancel storage migration with TLS encryption
    3) Copy only the top image for storage migration with backing chain

    :param test: test object
    :param params: Dictionary with the test parameters
    :param env: Dictionary with test environment.
    """

    def prepare_nfs_backingfile(vm, params):
        """
        Create an image using nfs type backing_file

        :param vm: The guest
        :param params: the parameters used
        """
        mnt_path_name = params.get("nfs_mount_dir", "nfs-mount")
        exp_opt = params.get("export_options", "rw,no_root_squash,fsid=0")
        exp_dir = params.get("export_dir", "nfs-export")
        backingfile_img = params.get("source_dist_img", "nfs-img")
        disk_format = params.get("disk_format", "qcow2")
        img_name = params.get("img_name", "test.img")
        precreation = "yes" == params.get("precreation", "yes")
        vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm.name)
        disk_xml = vmxml.devices.by_device_tag('disk')[0]
        src_disk_format = disk_xml.xmltreefile.find('driver').get('type')
        first_disk = vm.get_first_disk_devices()
        blk_source = first_disk['source']
        disk_img = os.path.join(os.path.dirname(blk_source), img_name)

        res = libvirt.setup_or_cleanup_nfs(True, mnt_path_name, is_mount=True,
                                           export_options=exp_opt,
                                           export_dir=exp_dir)
        mnt_path = res["mount_dir"]
        params["selinux_status_bak"] = res["selinux_status_bak"]

        if vm.is_alive():
            vm.destroy(gracefully=False)

        disk_cmd = ("qemu-img convert -f %s -O %s %s %s/%s" %
                    (src_disk_format, disk_format,
                     blk_source, mnt_path, backingfile_img))
        process.run(disk_cmd, ignore_status=False, verbose=True)
        local_image_list.append("%s/%s" % (mnt_path, backingfile_img))
        logging.debug("Create a local image backing on NFS.")
        disk_cmd = ("qemu-img create -f %s -b %s/%s %s" %
                    (disk_format, mnt_path, backingfile_img, disk_img))
        process.run(disk_cmd, ignore_status=False, verbose=True)
        local_image_list.append(disk_img)
        if precreation:
            logging.debug("Create an image backing on NFS on remote host.")
            remote_session = remote.remote_login("ssh", server_ip, "22",
                                                 server_user, server_pwd,
                                                 r'[$#%]')
            utils_misc.make_dirs(os.path.dirname(blk_source), remote_session)
            status, stdout = utils_misc.cmd_status_output(
                disk_cmd, session=remote_session)
            logging.debug("status: {}, stdout: {}".format(status, stdout))
            remote_image_list.append("%s/%s" % (mnt_path, backingfile_img))
            remote_image_list.append(disk_img)
            remote_session.close()

        params.update({'disk_source_name': disk_img,
                       'disk_type': 'file',
                       'disk_source_protocol': 'file'})
        libvirt.set_vm_disk(vm, params)

    migration_test = migration.MigrationTest()
    migration_test.check_parameters(params)

    # Local variables
    server_ip = params["server_ip"] = params.get("remote_ip")
    server_user = params["server_user"] = params.get("remote_user", "root")
    server_pwd = params["server_pwd"] = params.get("remote_pwd")
    client_ip = params["client_ip"] = params.get("local_ip")
    client_pwd = params["client_pwd"] = params.get("local_pwd")
    virsh_options = params.get("virsh_options", "")
    copy_storage_option = params.get("copy_storage_option")
    extra = params.get("virsh_migrate_extra", "")
    options = params.get("virsh_migrate_options", "--live --verbose")
    backingfile_type = params.get("backingfile_type")
    check_str_local_log = params.get("check_str_local_log", "")
    disk_format = params.get("disk_format", "qcow2")
    log_file = params.get("log_outputs", "/var/log/libvirt/libvirtd.log")
    daemon_conf_dict = eval(params.get("daemon_conf_dict", '{}'))
    cancel_migration = "yes" == params.get("cancel_migration", "no")
    migrate_again = "yes" == params.get("migrate_again", "no")
    precreation = "yes" == params.get("precreation", "yes")
    tls_recovery = "yes" == params.get("tls_auto_recovery", "yes")
    func_params_exists = "yes" == params.get("func_params_exists", "no")
    status_error = "yes" == params.get("status_error", "no")

    local_image_list = []
    remote_image_list = []
    tls_obj = None

    func_name = None
    daemon_conf = None
    mig_result = None
    remote_session = None
    vm_session = None

    # params for migration connection
    params["virsh_migrate_desturi"] = libvirt_vm.complete_uri(
                                       params.get("migrate_dest_host"))
    dest_uri = params.get("virsh_migrate_desturi")

    vm_name = params.get("migrate_main_vm")
    vm = env.get_vm(vm_name)
    vm.verify_alive()

    extra = "{} {}".format(extra, copy_storage_option)

    extra_args = {}
    if func_params_exists:
        extra_args.update({'func_params': params})
    if cancel_migration:
        func_name = migration_test.do_cancel

    # For safety reasons, we'd better back up  xmlfile.
    vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    orig_config_xml = vmxml.copy()

    try:
        if backingfile_type:
            if backingfile_type == "nfs":
                prepare_nfs_backingfile(vm, params)

        if extra.count("copy-storage-all") and precreation:
            blk_source = vm.get_first_disk_devices()['source']
            vsize = utils_misc.get_image_info(blk_source).get("vsize")
            remote_session = remote.remote_login("ssh", server_ip, "22",
                                                 server_user, server_pwd,
                                                 r'[$#%]')
            utils_misc.make_dirs(os.path.dirname(blk_source), remote_session)
            disk_cmd = ("qemu-img create -f %s %s %s" %
                        (disk_format, blk_source, vsize))
            status, stdout = utils_misc.cmd_status_output(
                disk_cmd, session=remote_session)
            logging.debug("status: {}, stdout: {}".format(status, stdout))
            remote_image_list.append(blk_source)
            remote_session.close()

        # Update libvirtd configuration
        if daemon_conf_dict:
            if os.path.exists(log_file):
                os.remove(log_file)
            daemon_conf = libvirt.customize_libvirt_config(daemon_conf_dict)

        if extra.count("--tls"):
            tls_obj = TLSConnection(params)
            if tls_recovery:
                tls_obj.auto_recover = True
                tls_obj.conn_setup()

        if not vm.is_alive():
            vm.start()

        logging.debug("Guest xml after starting:\n%s",
                      vm_xml.VMXML.new_from_dumpxml(vm_name))
        # Check local guest network connection before migration
        vm_session = vm.wait_for_login(restart_network=True)
        migration_test.ping_vm(vm, params)

        # Execute migration process
        vms = [vm]

        migration_test.do_migration(vms, None, dest_uri, 'orderly',
                                    options, thread_timeout=900,
                                    ignore_status=True, virsh_opt=virsh_options,
                                    extra_opts=extra, func=func_name,
                                    **extra_args)

        mig_result = migration_test.ret
        migration_test.check_result(mig_result, params)

        if migrate_again and status_error:
            logging.debug("Sleeping 10 seconds before rerun migration")
            time.sleep(10)
            if cancel_migration:
                func_name = None
            params["status_error"] = "no"
            migration_test.do_migration(vms, None, dest_uri, 'orderly',
                                        options, thread_timeout=900,
                                        ignore_status=True,
                                        virsh_opt=virsh_options,
                                        extra_opts=extra, func=func_name,
                                        **extra_args)

            mig_result = migration_test.ret
            migration_test.check_result(mig_result, params)
        if int(mig_result.exit_status) == 0:
            migration_test.ping_vm(vm, params, uri=dest_uri)

        if check_str_local_log:
            libvirt.check_logfile(check_str_local_log, log_file)

    finally:
        logging.debug("Recover test environment")
        # Clean VM on destination and source
        try:
            migration_test.cleanup_dest_vm(vm, vm.connect_uri, dest_uri)
            if vm.is_alive():
                vm.destroy(gracefully=False)
        except Exception as err:
            logging.error(err)

        logging.info("Recovery VM XML configration")
        orig_config_xml.sync()

        if daemon_conf:
            logging.debug("Recover the configurations")
            libvirt.customize_libvirt_config(None, is_recover=True,
                                             config_object=daemon_conf)
        if tls_obj:
            logging.debug("Clean up local objs")
            del tls_obj
        for source_file in local_image_list:
            libvirt.delete_local_disk("file", path=source_file)
        for img in remote_image_list:
            remote.run_remote_cmd("rm -rf %s" % img, params)

        if remote_session:
            remote_session.close()
예제 #7
0
def run(test, params, env):
    """
    Run the test

    :param test: test object
    :param params: Dictionary with the test parameters
    :param env: Dictionary with test environment.
    """
    libvirt_version.is_libvirt_feature_supported(params)

    vm_name = params.get("migrate_main_vm")
    vm = env.get_vm(vm_name)
    vm.verify_alive()
    bk_uri = vm.connect_uri

    migration_test = migration.MigrationTest()
    migration_test.check_parameters(params)
    extra_args = migration_test.update_virsh_migrate_extra_args(params)

    extra = params.get("virsh_migrate_extra")
    postcopy_options = params.get("postcopy_options")
    if postcopy_options:
        extra = "%s %s" % (extra, postcopy_options)
    params["virsh_migrate_desturi"] = libvirt_vm.complete_uri(
        params.get("migrate_dest_host"))
    dest_uri = params.get("virsh_migrate_desturi")
    options = params.get("virsh_migrate_options",
                         "--live --p2p --persistent --verbose")
    virsh_options = params.get("virsh_options", "")
    stress_package = params.get("stress_package")
    action_during_mig = params.get("action_during_mig")
    migrate_speed = params.get("migrate_speed")
    migrate_speed_again = params.get("migrate_speed_again")
    migrate_again = "yes" == params.get("migrate_again", "no")
    vm_state_after_abort = params.get("vm_state_after_abort")
    return_port = "yes" == params.get("return_port", "no")
    params['server_pwd'] = params.get("migrate_dest_pwd")
    params['server_ip'] = params.get("migrate_dest_host")
    params['server_user'] = params.get("remote_user", "root")
    is_storage_migration = True if extra.count('--copy-storage-all') else False
    setup_tls = "yes" == params.get("setup_tls", "no")
    qemu_conf_dest = params.get("qemu_conf_dest", "{}")
    migrate_tls_force_default = "yes" == params.get("migrate_tls_force_default", "no")
    poweroff_src_vm = "yes" == params.get("poweroff_src_vm", "no")
    check_port = "yes" == params.get("check_port", "no")
    server_ip = params.get("migrate_dest_host")
    server_user = params.get("remote_user", "root")
    server_pwd = params.get("migrate_dest_pwd")
    server_params = {'server_ip': server_ip,
                     'server_user': server_user,
                     'server_pwd': server_pwd}
    qemu_conf_list = eval(params.get("qemu_conf_list", "[]"))
    qemu_conf_path = params.get("qemu_conf_path")
    min_port = params.get("min_port")

    vm_session = None
    qemu_conf_remote = None
    (remove_key_local, remove_key_remote) = (None, None)

    # For safety reasons, we'd better back up  xmlfile.
    new_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    orig_config_xml = new_xml.copy()
    local_both_conf_obj = None
    remote_file_list = []
    conn_obj_list = []

    try:
        # Setup default value for migrate_tls_force
        if migrate_tls_force_default:
            value_list = ["migrate_tls_force"]
            # Setup migrate_tls_force default value on remote
            server_params['file_path'] = "/etc/libvirt/qemu.conf"
            remove_key_remote = libvirt_config.remove_key_in_conf(value_list,
                                                                  "qemu",
                                                                  remote_params=server_params)
            # Setup migrate_tls_force default value on local
            remove_key_local = libvirt_config.remove_key_in_conf(value_list,
                                                                 "qemu")

        if check_port:
            server_params['file_path'] = qemu_conf_path
            remove_key_remote = libvirt_config.remove_key_in_conf(qemu_conf_list,
                                                                  "qemu",
                                                                  remote_params=server_params)

        # Update only remote qemu conf
        if qemu_conf_dest:
            qemu_conf_remote = libvirt_remote.update_remote_file(
                server_params, qemu_conf_dest, "/etc/libvirt/qemu.conf")
        # Update local or both sides configuration files
        local_both_conf_obj = update_local_or_both_conf_file(params)
        # Setup TLS
        if setup_tls:
            conn_obj_list.append(migration_base.setup_conn_obj('tls',
                                                               params,
                                                               test))
        # Update guest disk xml
        if not is_storage_migration:
            libvirt.set_vm_disk(vm, params)
        else:
            remote_file_list.append(libvirt_disk.create_remote_disk_by_same_metadata(vm,
                                                                                     params))
        if check_port:
            # Create a remote runner
            runner_on_target = remote_old.RemoteRunner(host=server_ip,
                                                       username=server_user,
                                                       password=server_pwd)
            cmd = "nc -l -p %s &" % min_port
            remote_old.run_remote_cmd(cmd, params, runner_on_target, ignore_status=False)

        if not vm.is_alive():
            vm.start()

        logging.debug("Guest xml after starting:\n%s",
                      vm_xml.VMXML.new_from_dumpxml(vm_name))

        vm_session = vm.wait_for_login()
        if action_during_mig:
            if poweroff_src_vm:
                params.update({'vm_session': vm_session})
            action_during_mig = migration_base.parse_funcs(action_during_mig,
                                                           test, params)

        if stress_package:
            migration_test.run_stress_in_vm(vm, params)
        mode = 'both' if '--postcopy' in postcopy_options else 'precopy'
        if migrate_speed:
            migration_test.control_migrate_speed(vm_name,
                                                 int(migrate_speed),
                                                 mode)
        # Execute migration process
        migration_base.do_migration(vm, migration_test, None, dest_uri,
                                    options, virsh_options, extra,
                                    action_during_mig,
                                    extra_args)

        func_returns = dict(migration_test.func_ret)
        migration_test.func_ret.clear()
        logging.debug("Migration returns function results:%s", func_returns)
        if return_port:
            port_used = get_used_port(func_returns)
        if check_port:
            port_used = get_used_port(func_returns)
            if int(port_used) != int(min_port) + 1:
                test.fail("Wrong port for migration.")

        if vm_state_after_abort:
            check_vm_state_after_abort(vm_name, vm_state_after_abort,
                                       bk_uri, dest_uri, test)

        if migrate_again:
            if not vm.is_alive():
                vm.start()
            vm_session = vm.wait_for_login()
            action_during_mig = migration_base.parse_funcs(params.get('action_during_mig_again'),
                                                           test, params)
            extra_args['status_error'] = params.get("migrate_again_status_error", "no")

            if params.get("virsh_migrate_extra_mig_again"):
                extra = params.get("virsh_migrate_extra_mig_again")

            if params.get('scp_list_client_again'):
                params['scp_list_client'] = params.get('scp_list_client_again')
                # Recreate tlsconnection object using new parameter values
                conn_obj_list.append(migration_base.setup_conn_obj('tls',
                                                                   params,
                                                                   test))

            if migrate_speed_again:
                migration_test.control_migrate_speed(vm_name,
                                                     int(migrate_speed_again),
                                                     mode)

            migration_base.do_migration(vm, migration_test, None, dest_uri,
                                        options, virsh_options,
                                        extra, action_during_mig,
                                        extra_args)
            if return_port:
                func_returns = dict(migration_test.func_ret)
                logging.debug("Migration returns function "
                              "results:%s", func_returns)
                port_second = get_used_port(func_returns)
                if port_used != port_second:
                    test.fail("Expect same port '{}' is used as previous one, "
                              "but found new one '{}'".format(port_used,
                                                              port_second))
                else:
                    logging.debug("Same port '%s' was used as "
                                  "expected", port_second)
        if int(migration_test.ret.exit_status) == 0:
            migration_test.post_migration_check([vm], params, uri=dest_uri)
    finally:
        logging.info("Recover test environment")
        vm.connect_uri = bk_uri
        if vm_session:
            vm_session.close()
        # Clean VM on destination and source
        migration_test.cleanup_vm(vm, dest_uri)
        # Restore remote qemu conf and restart libvirtd
        if qemu_conf_remote:
            logging.debug("Recover remote qemu configurations")
            del qemu_conf_remote
        # Restore local or both sides conf and restart libvirtd

        recover_config_file(local_both_conf_obj, params)
        if remove_key_remote:
            del remove_key_remote
        if remove_key_local:
            libvirt.customize_libvirt_config(None,
                                             config_object=remove_key_local,
                                             config_type='qemu')
        # Clean up connection object, like TLS
        migration_base.cleanup_conn_obj(conn_obj_list, test)

        for one_file in remote_file_list:
            if one_file:
                remote_old.run_remote_cmd("rm -rf %s" % one_file, params)

        orig_config_xml.sync()
예제 #8
0
    def __init__(self, test, env, params, *args, **dargs):
        """
        Init params and other necessary variables
        """
        for k, v in iteritems(dict(*args, **dargs)):
            params[k] = v
        self.params = params
        self.test = test
        self.env = env

        # Check whether there are unset parameters
        for v in list(itervalues(self.params)):
            if isinstance(v, string_types) and v.count("EXAMPLE"):
                self.test.cancel("Please set real value for %s" % v)

        # Initiate the params
        self.uri_type = params.get("uri_type", "qemu_system")
        self.migrate_desturi_proto = params.get("migrate_desturi_proto", "tcp")
        self.virsh_migrate_options = params.get("virsh_migrate_options",
                                                "--live --p2p")
        self.tunnelled = params.get("tunnelled", False)
        self.compressed = params.get("compressed", False)
        self.auto_converge = params.get("auto_converge", False)
        self.postcopy = params.get("postcopy", False)
        self.native_tls = params.get("native_tls", False)
        self.persistdest = params.get("persistdest", False)
        self.undefinesource = params.get("undefinesource", False)
        self.migrate_thread_timeout = int(
            params.get("migrate_thread_timeout", "900"))
        self.migrate_vm_back = params.get("migrate_vm_back", "yes")
        self.migrate_main_vm_name = params.get("migrate_main_vm",
                                               "avocado-vt-vm1")
        self.migrate_vms_name = params.get("migrate_vms", "")
        self.storage_type = params.get("storage_type")
        self.nfs_mount_dir = params.get("nfs_mount_dir")
        self.local_ip = params.get("local_ip")
        self.remote_ip = params.get('remote_ip')
        self.remote_user = params.get('remote_user')
        self.remote_pwd = params.get('remote_pwd')
        self.migrate_source_host_cn = params.get("migrate_source_host_cn")
        self.migrate_source_host = params.get("migrate_source_host")
        self.migrate_source_pwd = params.get('migrate_source_pwd')
        self.migrate_dest_host_cn = params.get("migrate_dest_host_cn")
        self.migrate_dest_host = params.get("migrate_dest_host")
        self.migrate_dest_pwd = params.get('migrate_dest_pwd')
        self.selinux_state = params.get("selinux_state", "enforcing")

        # Set libvirtd remote access port
        self.remote_port = None
        remote_port_dict = {'tls': '16514', 'tcp': '16509'}
        self.remote_port = remote_port_dict.get(self.migrate_desturi_proto)

        # Set migration src and dest uri
        self.dest_uri = get_uri(
            self.uri_type, self.migrate_desturi_proto,
            (self.migrate_dest_host_cn
             if self.migrate_dest_host_cn else self.migrate_dest_host))
        self.src_uri = get_uri(self.uri_type)
        self.src_uri_full = get_uri(
            self.uri_type, self.migrate_desturi_proto,
            (self.migrate_source_host_cn
             if self.migrate_source_host_cn else self.migrate_source_host))

        # Set virsh migrate options
        self.virsh_migrate_options += self._extra_migrate_options()

        # Set migrate flags
        self.migrate_flags = self._migrate_flags()

        # Remote dict
        self.remote_dict = {
            'server_ip': self.remote_ip,
            'server_user': self.remote_user,
            'server_pwd': self.remote_pwd
        }

        # Create a session to remote host
        self.remote_session = remote.wait_for_login('ssh', self.remote_ip,
                                                    '22', self.remote_user,
                                                    self.remote_pwd,
                                                    r"[\#\$]\s*$")
        # Get vm objects
        self.vms = []
        self.main_vm = env.get_vm(self.migrate_main_vm_name.strip())
        if not self.main_vm:
            self.test.error("Can't get vm object for vm: %s",
                            self.migrate_main_vm_name)
        self.vms.append(self.main_vm)
        for vm_name in self.migrate_vms_name.split():
            if vm_name != self.migrate_main_vm_name.strip():
                vm = env.get_vm(vm_name)
                if not vm:
                    self.test.error("Can't get vm object for vm: %s", vm_name)
                self.vms.append(vm)

        # Get a migration object
        self.obj_migration = migration.MigrationTest()

        # Variable: vm xml backup for vms recovery
        self.vm_xml_backup = []

        # Variable: conf files objects to be restored in cleanup()
        self.local_conf_objs = []
        self.remote_conf_objs = []

        # Variable: objects(ssh, tls and tcp, etc) to be cleaned up in cleanup()
        self.objs_list = []

        # Variable: enabled firewalld port lists on local host
        self.opened_ports_local = []

        # Variable: enabled firewalld port lists on remote host
        self.opened_ports_remote = []
예제 #9
0
def run(test, params, env):
    """
    Test remote access with TCP, TLS connection
    """
    test_dict = dict(params)
    vm_name = test_dict.get("main_vm")
    vm = env.get_vm(vm_name)
    start_vm = test_dict.get("start_vm", "no")

    # Server and client parameters
    server_ip = test_dict.get("server_ip")
    server_user = test_dict.get("server_user")
    server_pwd = test_dict.get("server_pwd")
    client_ip = test_dict.get("client_ip")
    client_user = test_dict.get("client_user")
    client_pwd = test_dict.get("client_pwd")
    server_cn = test_dict.get("server_cn")
    client_cn = test_dict.get("client_cn")
    target_ip = test_dict.get("target_ip", "")
    # generate remote IP
    if target_ip == "":
        if server_cn:
            target_ip = server_cn
        elif server_ip:
            target_ip = server_ip
        else:
            target_ip = target_ip
    remote_virsh_dargs = {
        'remote_ip': server_ip,
        'remote_user': server_user,
        'remote_pwd': server_pwd,
        'unprivileged_user': None,
        'ssh_remote_auth': True
    }

    # Ceph disk parameters
    driver = test_dict.get("test_driver", "qemu")
    transport = test_dict.get("transport")
    plus = test_dict.get("conn_plus", "+")
    source_type = test_dict.get("vm_disk_source_type", "file")
    virsh_options = test_dict.get("virsh_options", "--verbose --live")
    vol_name = test_dict.get("vol_name")
    disk_src_protocol = params.get("disk_source_protocol")
    source_file = test_dict.get("disk_source_file")
    disk_format = test_dict.get("disk_format", "qcow2")
    mon_host = params.get("mon_host")
    ceph_key_opt = ""
    attach_disk = False
    # Disk XML file
    disk_xml = None
    # Define ceph_disk conditional variable
    ceph_disk = "yes" == test_dict.get("ceph_disk")

    # For --postcopy enable
    postcopy_options = test_dict.get("postcopy_options")
    if postcopy_options and not virsh_options.count(postcopy_options):
        virsh_options = "%s %s" % (virsh_options, postcopy_options)
        test_dict['virsh_options'] = virsh_options

    # For bi-directional and tls reverse test
    uri_port = test_dict.get("uri_port", ":22")
    uri_path = test_dict.get("uri_path", "/system")
    src_uri = test_dict.get("migration_source_uri", "qemu:///system")
    uri = "%s%s%s://%s%s%s" % (driver, plus, transport, target_ip, uri_port,
                               uri_path)
    test_dict["desuri"] = uri

    # Make sure all of parameters are assigned a valid value
    check_parameters(test, test_dict)
    # Set up SSH key

    #ssh_key.setup_ssh_key(server_ip, server_user, server_pwd, port=22)
    remote_session = remote.wait_for_login('ssh', server_ip, '22', server_user,
                                           server_pwd, r"[\#\$]\s*$")
    remote_session.close()
    #ssh_key.setup_ssh_key(server_ip, server_user, server_pwd, port=22)

    # Set up remote ssh key and remote /etc/hosts file for bi-direction migration
    migrate_vm_back = "yes" == test_dict.get("migrate_vm_back", "no")
    if migrate_vm_back:
        ssh_key.setup_remote_ssh_key(server_ip, server_user, server_pwd)
        ssh_key.setup_remote_known_hosts_file(client_ip, server_ip,
                                              server_user, server_pwd)
    # Reset Vm state if needed
    if vm.is_alive() and start_vm == "no":
        vm.destroy(gracefully=False)

    # Back up xml file.
    vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)

    # Setup migration context
    migrate_setup = migration.MigrationTest()
    migrate_setup.migrate_pre_setup(test_dict["desuri"], params)

    # Install ceph-common on remote host machine.
    remote_ssh_session = remote.remote_login("ssh", server_ip, "22",
                                             server_user, server_pwd,
                                             r"[\#\$]\s*$")
    if not utils_package.package_install(["ceph-common"], remote_ssh_session):
        test.error("Failed to install required packages on remote host")
    remote_ssh_session.close()
    try:
        # Create a remote runner for later use
        runner_on_target = remote.RemoteRunner(host=server_ip,
                                               username=server_user,
                                               password=server_pwd)
        # Get initial Selinux config flex bit
        LOCAL_SELINUX_ENFORCING_STATUS = utils_selinux.get_status()
        logging.info("previous local enforce :%s",
                     LOCAL_SELINUX_ENFORCING_STATUS)
        cmd_result = remote.run_remote_cmd('getenforce', params,
                                           runner_on_target)
        REMOTE_SELINUX_ENFORCING_STATUS = cmd_result.stdout_text
        logging.info("previous remote enforce :%s",
                     REMOTE_SELINUX_ENFORCING_STATUS)

        if ceph_disk:
            logging.info(
                "Put local SELinux in permissive mode when test ceph migrating"
            )
            utils_selinux.set_status("enforcing")

            logging.info("Put remote SELinux in permissive mode")
            cmd = "setenforce enforcing"
            cmd_result = remote.run_remote_cmd(cmd, params, runner_on_target)
            status, output = cmd_result.exit_status, cmd_result.stdout_text.strip(
            )
            if status:
                test.Error("Failed to set SELinux " "in permissive mode")

            # Prepare ceph disk.
            key_file = os.path.join(data_dir.get_tmp_dir(), "ceph.key")
            test_dict['key_file'] = key_file
            test_dict['first_disk'] = vm.get_first_disk_devices()
            ceph_key_opt, secret_uuid = prepare_ceph_disk(
                test_dict, remote_virsh_dargs, test, runner_on_target)
            host_ip = test_dict.get('mon_host')
            disk_image = test_dict.get('disk_img')

            # Build auth information.
            auth_attrs = {}
            auth_attrs['auth_user'] = params.get("auth_user")
            auth_attrs['secret_type'] = params.get("secret_type")
            auth_attrs['secret_uuid'] = secret_uuid
            build_disk_xml(vm_name,
                           disk_format,
                           host_ip,
                           disk_src_protocol,
                           vol_name,
                           disk_image,
                           auth=auth_attrs)

            vm_xml_cxt = process.run("virsh dumpxml %s" % vm_name,
                                     shell=True).stdout_text
            logging.debug("The VM XML with ceph disk source: \n%s", vm_xml_cxt)
            try:
                if vm.is_dead():
                    vm.start()
            except virt_vm.VMStartError as e:
                logging.info("Failed to start VM")
                test.fail("Failed to start VM: %s" % vm_name)

        # Ensure the same VM name doesn't exist on remote host before migrating.
        destroy_vm_cmd = "virsh destroy %s" % vm_name
        remote.run_remote_cmd(cmd, params, runner_on_target)

        # Trigger migration
        migrate_vm(test, test_dict)

        if migrate_vm_back:
            ssh_connection = utils_conn.SSHConnection(server_ip=client_ip,
                                                      server_pwd=client_pwd,
                                                      client_ip=server_ip,
                                                      client_pwd=server_pwd)
            try:
                ssh_connection.conn_check()
            except utils_conn.ConnectionError:
                ssh_connection.conn_setup()
                ssh_connection.conn_check()
            # Pre migration setup for local machine
            migrate_setup.migrate_pre_setup(src_uri, params)
            cmd = "virsh migrate %s %s %s" % (vm_name, virsh_options, src_uri)
            logging.debug("Start migrating: %s", cmd)
            cmd_result = remote.run_remote_cmd(cmd, params, runner_on_target)
            status, output = cmd_result.exit_status, cmd_result.stdout_text.strip(
            )
            logging.info(output)
            if status:
                destroy_cmd = "virsh destroy %s" % vm_name
                remote.run_remote_cmd(destroy_cmd, params, runner_on_target)
                test.fail("Failed to run '%s' on remote: %s" % (cmd, output))
    finally:
        logging.info("Recovery test environment")
        # Clean up of pre migration setup for local machine
        if migrate_vm_back:
            migrate_setup.migrate_pre_setup(src_uri, params, cleanup=True)
        # Ensure VM can be cleaned up on remote host even migrating fail.
        destroy_vm_cmd = "virsh destroy %s" % vm_name
        remote.run_remote_cmd(destroy_vm_cmd, params, runner_on_target)

        logging.info("Recovery VM XML configuration")
        vmxml_backup.sync()
        logging.debug("The current VM XML:\n%s", vmxml_backup.xmltreefile)

        # Clean up ceph environment.
        if disk_src_protocol == "rbd":
            # Clean up secret
            secret_list = get_secret_list()
            if secret_list:
                for secret_uuid in secret_list:
                    virsh.secret_undefine(secret_uuid)
            # Clean up dirty secrets on remote host if testing involve in ceph auth.
            client_name = test_dict.get('client_name')
            client_key = test_dict.get("client_key")
            if client_name and client_key:
                try:
                    remote_virsh = virsh.VirshPersistent(**remote_virsh_dargs)
                    remote_dirty_secret_list = get_secret_list(remote_virsh)
                    for dirty_secret_uuid in remote_dirty_secret_list:
                        remote_virsh.secret_undefine(dirty_secret_uuid)
                except (process.CmdError, remote.SCPError) as detail:
                    test.Error(detail)
                finally:
                    remote_virsh.close_session()
            # Delete the disk if it exists.
            disk_src_name = "%s/%s" % (vol_name, test_dict.get('disk_img'))
            cmd = ("rbd -m {0} {1} info {2} && rbd -m {0} {1} rm "
                   "{2}".format(mon_host, ceph_key_opt, disk_src_name))
            process.run(cmd, ignore_status=True, shell=True)

        if LOCAL_SELINUX_ENFORCING_STATUS:
            logging.info("Restore SELinux in original mode")
            utils_selinux.set_status(LOCAL_SELINUX_ENFORCING_STATUS)
        if REMOTE_SELINUX_ENFORCING_STATUS:
            logging.info("Put remote SELinux in original mode")
            cmd = "yes yes | setenforce %s" % REMOTE_SELINUX_ENFORCING_STATUS
            remote.run_remote_cmd(cmd, params, runner_on_target)

        # Remove known hosts on local host
        cmd = "ssh-keygen -R  %s" % server_ip
        process.run(cmd, ignore_status=True, shell=True)

        # Remove known hosts on remote host
        cmd = "ssh-keygen -R  %s" % client_ip
        remote.run_remote_cmd(cmd, params, runner_on_target)
예제 #10
0
def copied_migration(test, vm, params, blockjob_type=None, block_target="vda"):
    """
    Migrate vms with storage copied under some stress.
    And during it, some qemu-monitor-command will be sent.
    """
    dest_uri = params.get("migrate_dest_uri")
    remote_host = params.get("migrate_dest_host")
    copy_option = params.get("copy_storage_option", "")
    username = params.get("remote_user")
    password = params.get("migrate_dest_pwd")
    timeout = int(params.get("thread_timeout", 1200))
    options = "--live %s --unsafe" % copy_option

    # Get vm ip for remote checking
    if vm.is_dead():
        vm.start()
    vm.wait_for_login()
    vms_ip = {}
    vms_ip[vm.name] = vm.get_address()
    logging.debug("VM %s IP: %s", vm.name, vms_ip[vm.name])

    # Start to load stress
    stress_type = params.get("migrate_stress_type")
    if stress_type == "cpu":
        params['stress_args'] = "--cpu 2 --quiet --timeout 60"
    elif stress_type == "memory":
        params['stress_args'] = "--vm 2 --vm-bytes 256M --vm-keep --timeout 60"
    if stress_type is not None:
        utils_test.load_stress("stress_in_vms", params=params, vms=[vm])

    cp_mig = migration.MigrationTest()
    migration_thread = threading.Thread(target=cp_mig.thread_func_migration,
                                        args=(vm, dest_uri, options))
    migration_thread.start()
    # Wait for migration launched
    time.sleep(5)
    job_ret = virsh.domjobinfo(vm.name, debug=True)
    if job_ret.exit_status:
        test.error("Prepare migration for blockjob failed.")

    # Execute some qemu monitor commands
    pause_cmd = "block-job-pause %s" % block_target
    resume_cmd = "block-job-resume %s" % block_target
    cancel_cmd = "block-job-cancel %s" % block_target
    complete_cmd = "block-job-complete %s" % block_target

    blockjob_failures = []
    try:
        if blockjob_type == "cancel":
            virsh.qemu_monitor_command(vm.name,
                                       cancel_cmd,
                                       debug=True,
                                       ignore_status=False)
        elif blockjob_type == "pause_resume":
            virsh.qemu_monitor_command(vm.name,
                                       pause_cmd,
                                       debug=True,
                                       ignore_status=False)
            # TODO: Check whether it is paused.
            virsh.qemu_monitor_command(vm.name,
                                       resume_cmd,
                                       debug=True,
                                       ignore_status=False)
        elif blockjob_type == "complete":
            virsh.qemu_monitor_command(vm.name,
                                       complete_cmd,
                                       debug=True,
                                       ignore_status=False)
    except process.CmdError as detail:
        blockjob_failures.append(str(detail))

    # Job info FYI
    virsh.domjobinfo(vm.name, debug=True)

    if len(blockjob_failures):
        timeout = 30

    migration_thread.join(timeout)
    if migration_thread.isAlive():
        logging.error("Migrate %s timeout.", migration_thread)
        cp_mig.RET_LOCK.acquire()
        cp_mig.RET_MIGRATION = False
        cp_mig.RET_LOCK.release()

    if len(blockjob_failures):
        cp_mig.cleanup_dest_vm(vm, None, dest_uri)
        test.fail("Run qemu monitor command failed %s" % blockjob_failures)

    check_ip_failures = []
    if cp_mig.RET_MIGRATION:
        try:
            utils_test.check_dest_vm_network(vm, vms_ip[vm.name], remote_host,
                                             username, password)
        except exceptions.TestFail as detail:
            check_ip_failures.append(str(detail))
        cp_mig.cleanup_dest_vm(vm, None, dest_uri)
        if blockjob_type in ["cancel", "complete"]:
            test.fail("Storage migration passed even after " "cancellation.")
    else:
        cp_mig.cleanup_dest_vm(vm, None, dest_uri)
        if blockjob_type in ["cancel", "complete"]:
            logging.error("Expected Migration Error for %s", blockjob_type)
            return
        else:
            test.fail("Command blockjob does not work well under "
                      "storage copied migration.")

    if len(check_ip_failures):
        test.fail("Check IP failed:%s" % check_ip_failures)
예제 #11
0
    def verify_migration_speed(test, params, env):
        """
        Check if migration speed is effective with twice migration.
        """
        vms = env.get_all_vms()
        src_uri = params.get("migrate_src_uri", "qemu+ssh://EXAMPLE/system")
        dest_uri = params.get("migrate_dest_uri", "qemu+ssh://EXAMPLE/system")

        if not len(vms):
            test.cancel("Please provide migrate_vms for test.")

        if src_uri.count('///') or src_uri.count('EXAMPLE'):
            test.cancel("The src_uri '%s' is invalid" % src_uri)

        if dest_uri.count('///') or dest_uri.count('EXAMPLE'):
            test.cancel("The dest_uri '%s' is invalid" % dest_uri)

        remote_host = params.get("migrate_dest_host")
        username = params.get("migrate_dest_user", "root")
        password = params.get("migrate_dest_pwd")
        # Config ssh autologin for remote host
        ssh_key.setup_ssh_key(remote_host, username, password, port=22)

        # Check migrated vms' state
        for vm in vms:
            if vm.is_dead():
                vm.start()

        load_vm_names = params.get("load_vms").split()
        # vms for load
        load_vms = []
        for vm_name in load_vm_names:
            load_vms.append(libvirt_vm.VM(vm_name, params, test.bindir,
                                          env.get("address_cache")))
        params["load_vms"] = load_vms

        bandwidth = int(params.get("bandwidth", "4"))
        stress_type = params.get("stress_type", "load_vms_booting")
        migration_type = params.get("migration_type", "orderly")
        thread_timeout = int(params.get("thread_timeout", "60"))
        delta = float(params.get("allowed_delta", "0.1"))
        virsh_migrate_timeout = int(params.get("virsh_migrate_timeout", "60"))
        # virsh migrate options
        virsh_migrate_options = "--live --unsafe --timeout %s" % virsh_migrate_timeout
        # Migrate vms to remote host
        mig_first = migration.MigrationTest()
        virsh_dargs = {"debug": True}
        for vm in vms:
            set_get_speed(vm.name, bandwidth, virsh_dargs=virsh_dargs)
            vm.wait_for_login()
        utils_test.load_stress(stress_type, params=params, vms=vms)
        mig_first.do_migration(vms, src_uri, dest_uri, migration_type,
                               options=virsh_migrate_options, thread_timeout=thread_timeout)
        for vm in vms:
            mig_first.cleanup_dest_vm(vm, None, dest_uri)
            # Keep it clean for second migration
            if vm.is_alive():
                vm.destroy()

        # Migrate vms again with new bandwidth
        second_bandwidth = params.get("second_bandwidth", "times")
        if second_bandwidth == "half":
            second_bandwidth = bandwidth / 2
            speed_times = 2
        elif second_bandwidth == "times":
            second_bandwidth = bandwidth * 2
            speed_times = 0.5
        elif second_bandwidth == "same":
            second_bandwidth = bandwidth
            speed_times = 1

        # Migrate again
        for vm in vms:
            if vm.is_dead():
                vm.start()
            vm.wait_for_login()
            set_get_speed(vm.name, second_bandwidth, virsh_dargs=virsh_dargs)
        utils_test.load_stress(stress_type, params=params, vms=vms)
        mig_second = migration.MigrationTest()
        mig_second.do_migration(vms, src_uri, dest_uri, migration_type,
                                options=virsh_migrate_options, thread_timeout=thread_timeout)
        for vm in vms:
            mig_second.cleanup_dest_vm(vm, None, dest_uri)

        fail_info = []
        # Check whether migration failed
        if len(fail_info):
            test.fail(fail_info)

        for vm in vms:
            first_time = mig_first.mig_time[vm.name]
            second_time = mig_second.mig_time[vm.name]
            logging.debug("Migration time for %s:\n"
                          "Time with Bandwidth '%s' first: %s\n"
                          "Time with Bandwidth '%s' second: %s", vm.name,
                          bandwidth, first_time, second_bandwidth, second_time)
            shift = float(abs(first_time * speed_times - second_time)) / float(second_time)
            logging.debug("Shift:%s", shift)
            if delta < shift:
                fail_info.append("Spent time for migrating %s is intolerable." % vm.name)

        # Check again for speed result
        if len(fail_info):
            test.fail(fail_info)
예제 #12
0
def run(test, params, env):
    """
    Run the test

    :param test: test object
    :param params: Dictionary with the test parameters
    :param env: Dictionary with test environment.
    """
    libvirt_version.is_libvirt_feature_supported(params)

    vm_name = params.get("migrate_main_vm")
    vm = env.get_vm(vm_name)
    vm.verify_alive()
    bk_uri = vm.connect_uri

    migration_test = migration.MigrationTest()
    migration_test.check_parameters(params)
    extra_args = migration_test.update_virsh_migrate_extra_args(params)

    extra = params.get("virsh_migrate_extra")
    postcopy_options = params.get("postcopy_options")
    if postcopy_options:
        extra = "%s %s" % (extra, postcopy_options)
    params["virsh_migrate_desturi"] = libvirt_vm.complete_uri(
        params.get("migrate_dest_host"))
    dest_uri = params.get("virsh_migrate_desturi")
    options = params.get("virsh_migrate_options",
                         "--live --p2p --persistent --verbose")
    virsh_options = params.get("virsh_options", "")
    action_during_mig = params.get("action_during_mig")
    migrate_speed = params.get("migrate_speed")
    migrate_again = "yes" == params.get("migrate_again", "no")
    vm_state_after_abort = params.get("vm_state_after_abort")

    kill_service = "yes" == params.get("kill_service", "no")
    expected_image_ownership = params.get("expected_image_ownership")
    service_name = params.get("service_name", "libvirtd")
    service_on_dst = "yes" == params.get("service_on_dst", "no")
    server_ip = params.get("remote_ip")
    server_user = params.get("remote_user", "root")
    server_pwd = params.get("remote_pwd")

    # For safety reasons, we'd better back up  xmlfile.
    new_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    orig_config_xml = new_xml.copy()

    try:
        # Update guest disk xml
        libvirt.set_vm_disk(vm, params)

        logging.debug("Guest xml after starting:\n%s",
                      vm_xml.VMXML.new_from_dumpxml(vm_name))

        vm.wait_for_login().close()

        if kill_service:
            check_image_ownership(vm_name, expected_image_ownership, test)
            if service_name == "libvirtd":
                if service_on_dst:
                    remote_session = remote.wait_for_login(
                        'ssh', server_ip, '22', server_user, server_pwd,
                        r"[\#\$]\s*$")
                    service_name = utils_libvirtd.Libvirtd(
                        session=remote_session).service_name
                    remote_session.close()
                else:
                    service_name = utils_libvirtd.Libvirtd().service_name
                params.update({'service_name': service_name})

        if migrate_speed:
            mode = 'both' if '--postcopy' in postcopy_options else 'precopy'
            migration_test.control_migrate_speed(vm_name, int(migrate_speed),
                                                 mode)

        if action_during_mig:
            action_during_mig = migration_base.parse_funcs(
                action_during_mig, test, params)

        # Execute migration process
        migration_base.do_migration(vm, migration_test, None, dest_uri,
                                    options, virsh_options, extra,
                                    action_during_mig, extra_args)

        func_returns = dict(migration_test.func_ret)
        migration_test.func_ret.clear()
        logging.debug("Migration returns function results:%s", func_returns)

        if vm_state_after_abort:
            check_vm_state_after_abort(vm_name, vm_state_after_abort, bk_uri,
                                       dest_uri, test)

        if kill_service:
            check_image_ownership(vm_name, expected_image_ownership, test)

        if migrate_again:
            action_during_mig = migration_base.parse_funcs(
                params.get('action_during_mig_again'), test, params)
            extra_args['status_error'] = params.get(
                "migrate_again_status_error", "no")
            migration_base.do_migration(vm, migration_test, None, dest_uri,
                                        options, virsh_options, extra,
                                        action_during_mig, extra_args)
        if int(migration_test.ret.exit_status) == 0:
            migration_test.post_migration_check([vm], params, uri=dest_uri)
    finally:
        logging.info("Recover test environment")
        vm.connect_uri = bk_uri
        # Clean VM on destination and source
        migration_test.cleanup_vm(vm, dest_uri)

        orig_config_xml.sync()
def run(test, params, env):
    """
    Test virsh migrate command.
    """
    migration_test = migration.MigrationTest()
    migration_test.check_parameters(params)

    # Params for NFS shared storage
    shared_storage = params.get("migrate_shared_storage", "")
    if shared_storage == "":
        default_guest_asset = defaults.get_default_guest_os_info()['asset']
        default_guest_asset = "%s.qcow2" % default_guest_asset
        shared_storage = os.path.join(params.get("nfs_mount_dir"),
                                      default_guest_asset)
        logging.debug("shared_storage:%s", shared_storage)

    # Params to update disk using shared storage
    params["disk_type"] = "file"
    params["disk_source_protocol"] = "netfs"
    params["mnt_path_name"] = params.get("nfs_mount_dir")

    # Local variables
    virsh_args = {"debug": True}
    virsh_options = params.get("virsh_options", "")

    server_ip = params.get("server_ip")
    server_user = params.get("server_user", "root")
    server_pwd = params.get("server_pwd")
    client_ip = params.get("client_ip")
    client_pwd = params.get("client_pwd")
    extra = params.get("virsh_migrate_extra")
    options = params.get("virsh_migrate_options")

    guest_src_url = params.get("guest_src_url")
    guest_src_path = params.get("guest_src_path",
                                "/var/lib/libvirt/images/guest.img")
    check_disk = "yes" == params.get("check_disk")
    disk_model = params.get("disk_model")
    disk_target = params.get("disk_target", "vda")
    controller_model = params.get("controller_model")

    check_interface = "yes" == params.get("check_interface")
    iface_type = params.get("iface_type", "network")
    iface_model = params.get("iface_model", "virtio")
    iface_params = {'type': iface_type,
                    'model': iface_model,
                    'del_addr': True,
                    'source': '{"network": "default"}'}

    check_memballoon = "yes" == params.get("check_memballoon")
    membal_model = params.get("membal_model")

    check_rng = "yes" == params.get("check_rng")
    rng_model = params.get("rng_model")

    migrate_vm_back = "yes" == params.get("migrate_vm_back", "no")
    status_error = "yes" == params.get("status_error", "no")
    remote_virsh_dargs = {'remote_ip': server_ip, 'remote_user': server_user,
                          'remote_pwd': server_pwd, 'unprivileged_user': None,
                          'ssh_remote_auth': True}
    remote_dargs = {'server_ip': server_ip, 'server_user': server_user,
                    'server_pwd': server_pwd,
                    'file_path': "/etc/libvirt/libvirt.conf"}

    xml_check_after_mig = params.get("guest_xml_check_after_mig")

    err_msg = params.get("err_msg")
    vm_session = None
    remote_virsh_session = None
    vm = None
    mig_result = None
    remove_dict = {}
    remote_libvirt_file = None
    src_libvirt_file = None

    if not libvirt_version.version_compare(5, 0, 0):
        test.cancel("This libvirt version doesn't support "
                    "virtio-transitional model.")

    # params for migration connection
    params["virsh_migrate_desturi"] = libvirt_vm.complete_uri(
                                       params.get("migrate_dest_host"))
    params["virsh_migrate_connect_uri"] = libvirt_vm.complete_uri(
                                       params.get("migrate_source_host"))
    src_uri = params.get("virsh_migrate_connect_uri")
    dest_uri = params.get("virsh_migrate_desturi")

    extra_args = migration_test.update_virsh_migrate_extra_args(params)

    vm_name = params.get("migrate_main_vm")
    vm = env.get_vm(vm_name)
    vm.verify_alive()

    # For safety reasons, we'd better back up  xmlfile.
    new_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    orig_config_xml = new_xml.copy()

    try:
        # Create a remote runner for later use
        runner_on_target = remote.RemoteRunner(host=server_ip,
                                               username=server_user,
                                               password=server_pwd)
        # download guest source and update interface model to keep guest up
        if guest_src_url:
            blk_source = download.get_file(guest_src_url, guest_src_path)
            if not blk_source:
                test.error("Fail to download image.")
            params["blk_source_name"] = blk_source
            if (not check_interface) and iface_model:
                iface_dict = {'model': iface_model}
                libvirt.modify_vm_iface(vm_name, "update_iface", iface_dict)
            if not check_disk:
                params["disk_model"] = "virtio-transitional"

        if check_interface:
            libvirt.modify_vm_iface(vm_name, "update_iface", iface_params)

        if check_memballoon:
            membal_dict = {'membal_model': membal_model}
            dom_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
            libvirt.update_memballoon_xml(dom_xml, membal_dict)

        if check_rng:
            rng_dict = {'rng_model': rng_model}
            rng_xml = libvirt.create_rng_xml(rng_dict)
            dom_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
            libvirt.add_vm_device(dom_xml, rng_xml)

        # Change the disk of the vm
        libvirt.set_vm_disk(vm, params)

        if not vm.is_alive():
            vm.start()

        logging.debug("Guest xml after starting:\n%s",
                      vm_xml.VMXML.new_from_dumpxml(vm_name))

        # Check local guest network connection before migration
        vm_session = vm.wait_for_login(restart_network=True)
        migration_test.ping_vm(vm, params)

        remove_dict = {"do_search": '{"%s": "ssh:/"}' % dest_uri}
        src_libvirt_file = libvirt_config.remove_key_for_modular_daemon(
            remove_dict)

        # Execute migration process
        vms = [vm]
        migration_test.do_migration(vms, None, dest_uri, 'orderly',
                                    options, thread_timeout=900,
                                    ignore_status=True, virsh_opt=virsh_options,
                                    extra_opts=extra, **extra_args)
        mig_result = migration_test.ret

        if int(mig_result.exit_status) == 0:
            migration_test.ping_vm(vm, params, dest_uri)

        if xml_check_after_mig:
            if not remote_virsh_session:
                remote_virsh_session = virsh.VirshPersistent(**remote_virsh_dargs)
            target_guest_dumpxml = (remote_virsh_session.dumpxml(vm_name,
                                                                 debug=True,
                                                                 ignore_status=True)
                                    .stdout_text.strip())
            if check_disk:
                check_str = disk_model if disk_model else controller_model
            if check_interface:
                check_str = iface_model
            if check_memballoon:
                check_str = membal_model
            if check_rng:
                check_str = rng_model

            xml_check_after_mig = "%s'%s'" % (xml_check_after_mig, check_str)
            if not re.search(xml_check_after_mig, target_guest_dumpxml):
                test.fail("Fail to search '%s' in target guest XML:\n%s"
                          % (xml_check_after_mig, target_guest_dumpxml))
            remote_virsh_session.close_session()

        # Execute migration from remote
        if migrate_vm_back:
            ssh_connection = utils_conn.SSHConnection(server_ip=client_ip,
                                                      server_pwd=client_pwd,
                                                      client_ip=server_ip,
                                                      client_pwd=server_pwd)
            try:
                ssh_connection.conn_check()
            except utils_conn.ConnectionError:
                ssh_connection.conn_setup()
                ssh_connection.conn_check()

            # Pre migration setup for local machine
            migration_test.migrate_pre_setup(src_uri, params)
            remove_dict = {"do_search": ('{"%s": "ssh:/"}' % src_uri)}
            remote_libvirt_file = libvirt_config\
                .remove_key_for_modular_daemon(remove_dict, remote_dargs)

            cmd = "virsh migrate %s %s %s" % (vm_name,
                                              options, src_uri)
            logging.debug("Start migration: %s", cmd)
            cmd_result = remote.run_remote_cmd(cmd, params, runner_on_target)
            logging.info(cmd_result)
            if cmd_result.exit_status:
                test.fail("Failed to run '%s' on remote: %s"
                          % (cmd, cmd_result))

    finally:
        logging.debug("Recover test environment")
        # Clean VM on destination
        migration_test.cleanup_vm(vm, dest_uri)

        logging.info("Recover VM XML configration")
        orig_config_xml.sync()
        logging.debug("The current VM XML:\n%s", orig_config_xml.xmltreefile)

        if src_libvirt_file:
            src_libvirt_file.restore()
        if remote_libvirt_file:
            del remote_libvirt_file

        # Clean up of pre migration setup for local machine
        if migrate_vm_back:
            if 'ssh_connection' in locals():
                ssh_connection.auto_recover = True
            migration_test.migrate_pre_setup(src_uri, params,
                                             cleanup=True)
        if remote_virsh_session:
            remote_virsh_session.close_session()

        logging.info("Remove local NFS image")
        source_file = params.get("source_file")
        libvirt.delete_local_disk("file", path=source_file)
        if guest_src_url and blk_source:
            libvirt.delete_local_disk("file", path=blk_source)
예제 #14
0
def run_migration(vm, params, test):
    """
    Execute migration from source host to target host

    :param vm: vm object
    :param params: dict, test parameters
    :param test: test object
    """
    migration_test = migration.MigrationTest()
    migration_test.check_parameters(params)

    params["disk_type"] = "file"
    virsh_options = params.get("virsh_options", "")
    options = params.get("virsh_migrate_options", "--live --verbose")
    func_params_exists = "yes" == params.get("func_params_exists", "yes")

    func_name = None
    mig_result = None

    # params for migration connection
    params["virsh_migrate_desturi"] = libvirt_vm.complete_uri(
        params.get("migrate_dest_host"))
    dest_uri = params.get("virsh_migrate_desturi")
    vm_name = params.get("migrate_main_vm")
    extra_args = {}

    if func_params_exists:
        extra_args.update({'func_params': params})

    remove_dict = {"do_search": '{"%s": "ssh:/"}' % dest_uri}
    src_libvirt_file = libvirt_config.remove_key_for_modular_daemon(
        remove_dict)

    try:
        if not vm.is_alive():
            vm.start()
    except virt_vm.VMStartError as e:
        test.fail("Failed to start VM %s: %s" % (vm_name, str(e)))

    test.log.debug("Guest xml after starting:\n%s",
                   vm_xml.VMXML.new_from_dumpxml(vm_name))

    # Check local guest network connection before migration
    vm.wait_for_login(restart_network=True).close()
    migration_test.ping_vm(vm, params)

    # Execute migration process
    vms = [vm]
    migration_test.do_migration(vms,
                                None,
                                dest_uri,
                                'orderly',
                                options,
                                thread_timeout=900,
                                ignore_status=True,
                                virsh_opt=virsh_options,
                                func=func_name,
                                **extra_args)

    mig_result = migration_test.ret
    if int(mig_result.exit_status) == 0:
        migration_test.post_migration_check([vm], params, uri=dest_uri)

    if src_libvirt_file:
        src_libvirt_file.restore()
예제 #15
0
def run(test, params, env):
    """
    Test virsh migrate when disks are virtio-scsi.
    """
    def check_vm_state(vm, state):
        """
        Return True if vm is in the correct state.
        """
        try:
            actual_state = vm.state()
        except process.CmdError:
            return False
        if actual_state == state:
            return True
        else:
            return False

    def check_disks_in_vm(vm, vm_ip, disks_list=[], runner=None):
        """
        Check disks attached to vm.
        """
        fail_list = []
        while len(disks_list):
            disk = disks_list.pop()
            if runner:
                check_cmd = ("ssh %s \"dd if=/dev/urandom of=%s bs=1 "
                             "count=1024\"" % (vm_ip, disk))
                try:
                    logging.debug(runner.run(check_cmd))
                    continue
                except process.CmdError as detail:
                    logging.debug("Remote checking failed:%s", detail)
                    fail_list.append(disk)
            else:
                check_cmd = "dd if=/dev/urandom of=%s bs=1 count=1024"
                session = vm.wait_for_login()
                cs = session.cmd_status(check_cmd)
                if cs:
                    fail_list.append(disk)
                session.close()
        if len(fail_list):
            test.fail("Checking attached devices failed:%s" % fail_list)

    def get_disk_id(device):
        """
        Show disk by id.
        """
        output = process.run("ls /dev/disk/by-id/", shell=True).stdout_text
        for line in output.splitlines():
            disk_ids = line.split()
            for disk_id in disk_ids:
                disk = os.path.basename(
                    process.run("readlink %s" % disk_id,
                                shell=True).stdout_text)
                if disk == os.path.basename(device):
                    return disk_id
        return None

    def cleanup_ssh_config(vm):
        session = vm.wait_for_login()
        session.cmd("rm -f ~/.ssh/authorized_keys")
        session.cmd("rm -f ~/.ssh/id_rsa*")
        session.close()

    vm = env.get_vm(params.get("migrate_main_vm"))
    source_type = params.get("disk_source_type", "file")
    device_type = params.get("disk_device_type", "disk")
    disk_format = params.get("disk_format_type", "raw")
    if source_type == "file":
        params['added_disk_type'] = "file"
    else:
        params['added_disk_type'] = "block"
        block_device = params.get("disk_block_device", "/dev/EXAMPLE")
        if block_device.count("EXAMPLE"):
            # Prepare host parameters
            local_host = params.get("migrate_source_host", "LOCAL.EXAMPLE")
            remote_host = params.get("migrate_dest_host", "REMOTE.EXAMPLE")
            remote_user = params.get("migrate_dest_user", "root")
            remote_passwd = params.get("migrate_dest_pwd")
            if remote_host.count("EXAMPLE") or local_host.count("EXAMPLE"):
                test.cancel("Config remote or local host first.")
            rdm_params = {
                'remote_ip': remote_host,
                'remote_user': remote_user,
                'remote_pwd': remote_passwd
            }
            rdm = utils_test.RemoteDiskManager(rdm_params)
            # Try to build an iscsi device
            # For local, target is a device name
            target = utlv.setup_or_cleanup_iscsi(
                is_setup=True, is_login=True, emulated_image="emulated-iscsi")
            logging.debug("Created target: %s", target)
            try:
                # Attach this iscsi device both local and remote
                remote_device = rdm.iscsi_login_setup(local_host, target)
            except Exception as detail:
                utlv.setup_or_cleanup_iscsi(is_setup=False)
                test.error("Attach iscsi device on remote failed:%s" % detail)

            # Use id to get same path on local and remote
            block_device = get_disk_id(target)
            if block_device is None:
                rdm.iscsi_login_setup(local_host, target, is_login=False)
                utlv.setup_or_cleanup_iscsi(is_setup=False)
                test.error("Set iscsi device couldn't find id?")

    srcuri = params.get("virsh_migrate_srcuri")
    dsturi = params.get("virsh_migrate_dsturi")
    remote_ip = params.get("remote_ip")
    username = params.get("remote_user", "root")
    host_pwd = params.get("remote_pwd")
    # Connection to remote, init here for cleanup
    runner = None
    # Identify easy config. mistakes early
    warning_text = ("Migration VM %s URI %s appears problematic "
                    "this may lead to migration problems. "
                    "Consider specifying vm.connect_uri using "
                    "fully-qualified network-based style.")

    if srcuri.count('///') or srcuri.count('EXAMPLE'):
        test.cancel(warning_text % ('source', srcuri))

    if dsturi.count('///') or dsturi.count('EXAMPLE'):
        test.cancel(warning_text % ('destination', dsturi))

    # Config auto-login to remote host for migration
    ssh_key.setup_ssh_key(remote_ip, username, host_pwd)

    sys_image = vm.get_first_disk_devices()
    sys_image_source = sys_image["source"]
    sys_image_info = utils_misc.get_image_info(sys_image_source)
    logging.debug("System image information:\n%s", sys_image_info)
    sys_image_fmt = sys_image_info["format"]
    created_img_path = os.path.join(os.path.dirname(sys_image_source),
                                    "vsmimages")

    migrate_in_advance = "yes" == params.get("migrate_in_advance", "no")

    status_error = "yes" == params.get("status_error", "no")
    if source_type == "file" and device_type == "lun":
        status_error = True

    try:
        # For safety and easily reasons, we'd better define a new vm
        new_vm_name = "%s_vsmtest" % vm.name
        mig = migration.MigrationTest()
        if vm.is_alive():
            vm.destroy()
        utlv.define_new_vm(vm.name, new_vm_name)
        vm = libvirt_vm.VM(new_vm_name, vm.params, vm.root_dir,
                           vm.address_cache)

        # Change the disk of the vm to shared disk
        # Detach exist devices
        devices = vm.get_blk_devices()
        for device in devices:
            s_detach = virsh.detach_disk(vm.name,
                                         device,
                                         "--config",
                                         debug=True)
            if not s_detach:
                test.error("Detach %s failed before test.", device)

        # Attach system image as vda
        # Then added scsi disks will be sda,sdb...
        attach_args = "--subdriver %s --config" % sys_image_fmt
        virsh.attach_disk(vm.name,
                          sys_image_source,
                          "vda",
                          attach_args,
                          debug=True)

        vms = [vm]

        def start_check_vm(vm):
            try:
                vm.start()
            except virt_vm.VMStartError as detail:
                if status_error:
                    logging.debug("Expected failure:%s", detail)
                    return None, None
                else:
                    raise
            vm.wait_for_login()

            # Confirm VM can be accessed through network.
            # And this ip will be used on remote after migration
            vm_ip = vm.get_address()
            vm_pwd = params.get("password")
            s_ping, o_ping = utils_test.ping(vm_ip, count=2, timeout=60)
            logging.info(o_ping)
            if s_ping != 0:
                test.fail("%s did not respond after several "
                          "seconds with attaching new devices." % vm.name)
            return vm_ip, vm_pwd

        options = "--live --unsafe"
        # Do migration before attaching new devices
        if migrate_in_advance:
            vm_ip, vm_pwd = start_check_vm(vm)
            cleanup_ssh_config(vm)
            mig_thread = threading.Thread(target=mig.thread_func_migration,
                                          args=(vm, dsturi, options))
            mig_thread.start()
            # Make sure migration is running
            time.sleep(2)

        # Attach other disks
        params['added_disk_target'] = "scsi"
        params['target_bus'] = "scsi"
        params['device_type'] = device_type
        params['type_name'] = source_type
        params['added_disk_format'] = disk_format
        if migrate_in_advance:
            params["attach_disk_config"] = "no"
            attach_disk_config = False
        else:
            params["attach_disk_config"] = "yes"
            attach_disk_config = True
        try:
            if source_type == "file":
                utlv.attach_disks(vm, "%s/image" % created_img_path, None,
                                  params)
            else:
                ret = utlv.attach_additional_device(vm.name,
                                                    "sda",
                                                    block_device,
                                                    params,
                                                    config=attach_disk_config)
                if ret.exit_status:
                    test.fail(ret)
        except (exceptions.TestFail, process.CmdError) as detail:
            if status_error:
                logging.debug("Expected failure:%s", detail)
                return
            else:
                raise

        if migrate_in_advance:
            mig_thread.join(60)
            if mig_thread.isAlive():
                mig.RET_LOCK.acquire()
                mig.MIGRATION = False
                mig.RET_LOCK.release()
        else:
            vm_ip, vm_pwd = start_check_vm(vm)

        # Have got expected failures when starting vm, end the test
        if vm_ip is None and status_error:
            return

        # Start checking before migration and go on checking after migration
        disks = []
        for target in list(vm.get_disk_devices().keys()):
            if target != "vda":
                disks.append("/dev/%s" % target)

        checked_count = int(params.get("checked_count", 0))
        disks_before = disks[:(checked_count // 2)]
        disks_after = disks[(checked_count // 2):checked_count]
        logging.debug(
            "Disks to be checked:\nBefore migration:%s\n"
            "After migration:%s", disks_before, disks_after)

        options = "--live --unsafe"
        if not migrate_in_advance:
            cleanup_ssh_config(vm)
            mig.do_migration(vms, None, dsturi, "orderly", options, 120)

        if mig.RET_MIGRATION:
            utils_test.check_dest_vm_network(vm, vm_ip, remote_ip, username,
                                             host_pwd)
            runner = remote.RemoteRunner(host=remote_ip,
                                         username=username,
                                         password=host_pwd)
            # After migration, config autologin to vm
            ssh_key.setup_remote_ssh_key(vm_ip, "root", vm_pwd)
            check_disks_in_vm(vm, vm_ip, disks_after, runner)

            if migrate_in_advance:
                test.fail("Migration before attaching successfully, "
                          "but not expected.")

    finally:
        # Cleanup remote vm
        if srcuri != dsturi:
            mig.cleanup_dest_vm(vm, srcuri, dsturi)
        # Cleanup created vm anyway
        if vm.is_alive():
            vm.destroy(gracefully=False)
        virsh.undefine(new_vm_name)

        # Cleanup iscsi device for block if it is necessary
        if source_type == "block":
            if params.get("disk_block_device",
                          "/dev/EXAMPLE").count("EXAMPLE"):
                rdm.iscsi_login_setup(local_host, target, is_login=False)
                utlv.setup_or_cleanup_iscsi(is_setup=False,
                                            emulated_image="emulated-iscsi")

        if runner:
            runner.session.close()
        process.run("rm -f %s/*vsmtest" % created_img_path, shell=True)
예제 #16
0
def run(test, params, env):
    """
    Test migration with special network settings
    1) migrate guest with bridge type interface connected to ovs bridge
    2) migrate guest with direct type interface when a macvtap device name
        exists on dest host

    :param test: test object
    :param params: Dictionary with the test parameters
    :param env: Dictionary with test environment.
    """
    def check_vm_network_accessed(ping_dest, session=None):
        """
        The operations to the VM need to be done before or after
        migration happens

        :param ping_dest: The destination to be ping
        :param session: The session object to the host
        :raise: test.fail when ping fails
        """
        # Confirm local/remote VM can be accessed through network.
        logging.info("Check VM network connectivity")
        status, output = utils_net.ping(ping_dest,
                                        count=10,
                                        timeout=20,
                                        output_func=logging.debug,
                                        session=session)
        if status != 0:
            test.fail("Ping failed, status: %s, output: %s" % (status, output))

    def vm_sync(vmxml, vm_name=None, virsh_instance=virsh):
        """
        A wrapper to sync vm xml on localhost and remote host

        :param vmxml: domain VMXML instance
        :param vm_name: The name of VM
        :param virsh_instance: virsh instance object
        """
        if vm_name and virsh_instance != virsh:
            remote.scp_to_remote(server_ip, '22', server_user,
                                 server_pwd,
                                 vmxml.xml, vmxml.xml)
            if virsh_instance.domain_exists(vm_name):
                if virsh_instance.is_alive(vm_name):
                    virsh_instance.destroy(vm_name, ignore_status=True)
                virsh_instance.undefine(vmxml.xml, ignore_status=True)
            virsh_instance.define(vmxml.xml, debug=True)
        else:
            vmxml.sync()

    def update_iface_xml(vm_name, iface_dict, virsh_instance=virsh):
        """
        Update interfaces for guest

        :param vm_name: The name of VM
        :param iface_dict: The interface configurations params
        :param virsh_instance: virsh instance object
        """
        logging.debug("update iface xml")
        vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(
            vm_name, virsh_instance=virsh_instance)
        vmxml.remove_all_device_by_type('interface')
        vm_sync(vmxml, vm_name, virsh_instance=virsh_instance)
        iface = interface.Interface('network')
        iface.xml = libvirt.modify_vm_iface(vm_name, "get_xml", iface_dict,
                                            virsh_instance=virsh_instance)
        vmxml.add_device(iface)
        vmxml.xmltreefile.write()
        vm_sync(vmxml, vm_name, virsh_instance=virsh_instance)
        logging.debug("VM XML after updating interface: %s" % vmxml)

    def update_net_dict(net_dict, runner=utils_net.local_runner):
        """
        Update network dict

        :param net_dict: The network dict to be updated
        :param runner: Command runner
        :return: Updated network dict
        """
        if net_dict.get("net_name", "") == "direct-macvtap":
            logging.info("Updating network iface name")
            iface_name = utils_net.get_net_if(runner=runner, state="UP")[0]
            net_dict.update({"forward_iface": iface_name})
        else:
            # TODO: support other types
            logging.info("No need to update net_dict. We only support to "
                         "update direct-macvtap type for now.")
        logging.debug("net_dict is %s" % net_dict)
        return net_dict

    def get_remote_direct_mode_vm_mac(vm_name, uri):
        """
        Get mac of remote direct mode VM

        :param vm_name: The name of VM
        :param uri: The uri on destination
        :return: mac
        :raise: test.fail when the result of virsh domiflist is incorrect
        """
        vm_mac = None
        res = virsh.domiflist(
            vm_name, uri=uri, ignore_status=False).stdout_text.strip().split("\n")
        if len(res) < 2:
            test.fail("Unable to get remote VM's mac: %s" % res)
        else:
            vm_mac = res[-1].split()[-1]
        return vm_mac

    def create_fake_tap(remote_session):
        """
        Create a fake macvtap on destination host.

        :param remote_session: The session to the destination host.
        :return: The new tap device
        """
        tap_cmd = "ls /dev/tap* |awk -F 'tap' '{print $NF}'"
        tap_idx = remote_session.cmd_output(tap_cmd).strip()
        if not tap_idx:
            test.fail("Unable to get tap index using %s."
                      % tap_cmd)
        fake_tap_dest = 'tap'+str(int(tap_idx)+1)
        logging.debug("creating a fake tap %s...", fake_tap_dest)
        cmd = "touch /dev/%s" % fake_tap_dest
        remote_session.cmd(cmd)
        return fake_tap_dest

    migration_test = migration.MigrationTest()
    migration_test.check_parameters(params)

    libvirt_version.is_libvirt_feature_supported(params)

    # Params to update disk using shared storage
    params["disk_type"] = "file"
    params["disk_source_protocol"] = "netfs"
    params["mnt_path_name"] = params.get("nfs_mount_dir")

    # Local variables
    virsh_args = {"debug": True}
    server_ip = params.get("server_ip")
    server_user = params.get("server_user", "root")
    server_pwd = params.get("server_pwd")
    client_ip = params.get("client_ip")
    client_pwd = params.get("client_pwd")
    virsh_options = params.get("virsh_options", "")
    extra = params.get("virsh_migrate_extra")
    options = params.get("virsh_migrate_options", "--live --p2p --verbose")
    restart_dhclient = params.get("restart_dhclient", "dhclient -r; dhclient")
    ping_dest = params.get("ping_dest", "www.baidu.com")
    extra_args = migration_test.update_virsh_migrate_extra_args(params)

    migrate_vm_back = "yes" == params.get("migrate_vm_back", "no")

    target_vm_name = params.get("target_vm_name")
    direct_mode = "yes" == params.get("direct_mode", "no")
    check_macvtap_exists = "yes" == params.get("check_macvtap_exists", "no")
    create_fake_tap_dest = "yes" == params.get("create_fake_tap_dest", "no")
    macvtap_cmd = params.get("macvtap_cmd")
    modify_target_vm = "yes" == params.get("modify_target_vm", "no")
    ovs_bridge_name = params.get("ovs_bridge_name")
    network_dict = eval(params.get("network_dict", '{}'))
    iface_dict = eval(params.get("iface_dict", '{}'))
    remote_virsh_dargs = {'remote_ip': server_ip, 'remote_user': server_user,
                          'remote_pwd': server_pwd, 'unprivileged_user': None,
                          'ssh_remote_auth': True}
    cmd_parms = {'server_ip': server_ip, 'server_user': server_user,
                 'server_pwd': server_pwd}

    virsh_session_remote = None
    libvirtd_conf = None
    mig_result = None
    target_org_xml = None
    target_vm_session = None
    target_vm = None
    exp_macvtap = []
    fake_tap_dest = None

    # params for migration connection
    params["virsh_migrate_desturi"] = libvirt_vm.complete_uri(
        params.get("migrate_dest_host"))
    params["virsh_migrate_connect_uri"] = libvirt_vm.complete_uri(
        params.get("migrate_source_host"))
    src_uri = params.get("virsh_migrate_connect_uri")
    dest_uri = params.get("virsh_migrate_desturi")

    vm_name = params.get("migrate_main_vm")
    vm = env.get_vm(vm_name)
    vm.verify_alive()
    bk_uri = vm.connect_uri

    postcopy_options = params.get("postcopy_options")
    action_during_mig = None
    if postcopy_options:
        extra = "%s %s" % (extra, postcopy_options)
        action_during_mig = virsh.migrate_postcopy

    # For safety reasons, we'd better back up  xmlfile.
    new_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    orig_config_xml = new_xml.copy()

    try:
        # Create a remote runner for later use
        runner_on_target = remote.RemoteRunner(host=server_ip,
                                               username=server_user,
                                               password=server_pwd)
        remote_session = remote.remote_login("ssh", server_ip, "22",
                                             server_user, server_pwd,
                                             r'[$#%]')
        virsh_session_remote = virsh.VirshPersistent(**remote_virsh_dargs)

        if target_vm_name:
            target_vm = libvirt_vm.VM(target_vm_name, params, vm.root_dir,
                                      vm.address_cache)
            target_vm.connect_uri = dest_uri
            if not virsh_session_remote.domain_exists(target_vm_name):
                test.error("VM %s should be installed on %s."
                           % (target_vm_name, server_ip))
            # Backup guest's xml on remote
            target_org_xml = vm_xml.VMXML.new_from_inactive_dumpxml(
                target_vm_name, virsh_instance=virsh_session_remote)
            # Scp original xml to remote for restoration
            remote.scp_to_remote(server_ip, '22', server_user,
                                 server_pwd,
                                 target_org_xml.xml, target_org_xml.xml)
            logging.debug("target xml is %s" % target_org_xml)

        if ovs_bridge_name:
            status, stdout = utils_net.create_ovs_bridge(ovs_bridge_name)
            if status:
                test.fail("Failed to create ovs bridge on local. Status: %s"
                          "Stdout: %s" % (status, stdout))
            status, stdout = utils_net.create_ovs_bridge(
                ovs_bridge_name, session=remote_session)
            if status:
                test.fail("Failed to create ovs bridge on remote. Status: %s"
                          "Stdout: %s" % (status, stdout))
        if network_dict:
            update_net_dict(network_dict, runner=remote_session.cmd)
            libvirt_network.create_or_del_network(
                network_dict, remote_args=remote_virsh_dargs)
            logging.info("dest: network created")
            update_net_dict(network_dict)
            libvirt_network.create_or_del_network(network_dict)
            logging.info("localhost: network created")

        if target_vm_name:
            if modify_target_vm and iface_dict:
                logging.info("Updating remote VM's interface")
                update_iface_xml(target_vm_name, iface_dict,
                                 virsh_instance=virsh_session_remote)
            target_vm.start()
            target_vm_session = target_vm.wait_for_serial_login(timeout=240)
            check_vm_network_accessed(ping_dest, session=target_vm_session)
            if check_macvtap_exists and macvtap_cmd:
                # Get macvtap device's index on remote after target_vm started
                idx = remote_session.cmd_output(macvtap_cmd).strip()
                if not idx:
                    test.fail("Unable to get macvtap index using %s."
                              % macvtap_cmd)
                # Generate the expected macvtap devices' index list
                exp_macvtap = ['macvtap'+idx, 'macvtap'+str(int(idx)+1)]
                if create_fake_tap_dest:
                    fake_tap_dest = create_fake_tap(remote_session)

        remote_session.close()
        # Change domain network xml
        if iface_dict:
            if "mac" not in iface_dict:
                mac = utils_net.generate_mac_address_simple()
                iface_dict.update({'mac': mac})
            else:
                mac = iface_dict["mac"]

            update_iface_xml(vm_name, iface_dict)

        # Change the disk of the vm
        libvirt.set_vm_disk(vm, params)

        if not vm.is_alive():
            try:
                vm.start()
            except virt_vm.VMStartError as err:
                test.fail("Failed to start VM: %s" % err)

        logging.debug("Guest xml after starting:\n%s",
                      vm_xml.VMXML.new_from_dumpxml(vm_name))

        # Check local guest network connection before migration
        if vm.serial_console is not None:
            vm.cleanup_serial_console()
        vm.create_serial_console()
        vm_session = vm.wait_for_serial_login(timeout=240)
        if not utils_package.package_install('dhcp-client', session=vm_session):
            test.error("Failed to install dhcp-client on guest.")
        utils_net.restart_guest_network(vm_session)
        vm_ip = utils_net.get_guest_ip_addr(vm_session, mac)
        logging.debug("VM IP Addr: %s", vm_ip)

        if direct_mode:
            check_vm_network_accessed(ping_dest, session=vm_session)
        else:
            check_vm_network_accessed(vm_ip)

        # Execute migration process
        vms = [vm]

        migration_test.do_migration(vms, None, dest_uri, 'orderly',
                                    options, thread_timeout=900,
                                    ignore_status=True, virsh_opt=virsh_options,
                                    func=action_during_mig,
                                    extra_opts=extra,
                                    **extra_args)

        mig_result = migration_test.ret

        # Check network accessibility after migration
        if int(mig_result.exit_status) == 0:
            vm.connect_uri = dest_uri
            if vm.serial_console is not None:
                vm.cleanup_serial_console()
            vm.create_serial_console()
            vm_session_after_mig = vm.wait_for_serial_login(timeout=240)
            vm_session_after_mig.cmd(restart_dhclient)
            check_vm_network_accessed(ping_dest, session=vm_session_after_mig)

            if check_macvtap_exists and macvtap_cmd:
                remote_session = remote.remote_login("ssh", server_ip, "22",
                                                     server_user, server_pwd,
                                                     r'[$#%]')
                # Check macvtap devices' index after migration
                idx = remote_session.cmd_output(macvtap_cmd)
                act_macvtap = ['macvtap'+i for i in idx.strip().split("\n")]
                if act_macvtap != exp_macvtap:
                    test.fail("macvtap devices after migration are incorrect!"
                              " Actual: %s, Expected: %s. "
                              % (act_macvtap, exp_macvtap))
        else:
            if fake_tap_dest:
                res = remote.run_remote_cmd("ls /dev/%s" % fake_tap_dest,
                                            params, runner_on_target)
                libvirt.check_exit_status(res)

        if target_vm_session:
            check_vm_network_accessed(ping_dest, session=target_vm_session)
        # Execute migration from remote
        if migrate_vm_back:
            ssh_connection = utils_conn.SSHConnection(server_ip=client_ip,
                                                      server_pwd=client_pwd,
                                                      client_ip=server_ip,
                                                      client_pwd=server_pwd)
            try:
                ssh_connection.conn_check()
            except utils_conn.ConnectionError:
                ssh_connection.conn_setup()
                ssh_connection.conn_check()

            # Pre migration setup for local machine
            migration_test.migrate_pre_setup(src_uri, params)

            cmd = "virsh migrate %s %s %s" % (vm_name, options, src_uri)
            logging.debug("Start migration: %s", cmd)
            cmd_result = remote.run_remote_cmd(cmd, params, runner_on_target)
            logging.info(cmd_result)
            if cmd_result.exit_status:
                test.fail("Failed to run '%s' on remote: %s" % (cmd, cmd_result))
            logging.debug("VM is migrated back.")

            vm.connect_uri = bk_uri
            if vm.serial_console is not None:
                vm.cleanup_serial_console()
            vm.create_serial_console()
            vm_session_after_mig_bak = vm.wait_for_serial_login(timeout=240)
            vm_session_after_mig_bak.cmd(restart_dhclient)
            check_vm_network_accessed(ping_dest, vm_session_after_mig_bak)
    finally:
        logging.debug("Recover test environment")
        vm.connect_uri = bk_uri
        migration_test.cleanup_vm(vm, dest_uri)

        logging.info("Recovery VM XML configration")
        orig_config_xml.sync()
        remote_session = remote.remote_login("ssh", server_ip, "22",
                                             server_user, server_pwd,
                                             r'[$#%]')
        if target_vm and target_vm.is_alive():
            target_vm.destroy(gracefully=False)

        if target_org_xml and target_vm_name:
            logging.info("Recovery XML configration for %s.", target_vm_name)
            virsh_session_remote = virsh.VirshPersistent(**remote_virsh_dargs)
            vm_sync(target_org_xml, vm_name=target_vm_name,
                    virsh_instance=virsh_session_remote)
            virsh_session_remote.close_session()

        if fake_tap_dest:
            remote_session.cmd_output_safe("rm -rf /dev/%s" % fake_tap_dest)

        if network_dict:
            libvirt_network.create_or_del_network(
                network_dict, is_del=True, remote_args=remote_virsh_dargs)
            libvirt_network.create_or_del_network(network_dict, is_del=True)
        if ovs_bridge_name:
            utils_net.delete_ovs_bridge(ovs_bridge_name)
            utils_net.delete_ovs_bridge(ovs_bridge_name, session=remote_session)

        remote_session.close()
        if target_vm_session:
            target_vm_session.close()

        if virsh_session_remote:
            virsh_session_remote.close_session()

        if migrate_vm_back:
            if 'ssh_connection' in locals():
                ssh_connection.auto_recover = True
            migration_test.migrate_pre_setup(src_uri, params,
                                             cleanup=True)
        logging.info("Remove local NFS image")
        source_file = params.get("source_file")
        if source_file:
            libvirt.delete_local_disk("file", path=source_file)
예제 #17
0
def run(test, params, env):
    """
    Test migration with special network settings
    1) migrate guest with bridge type interface connected to ovs bridge

    :param test: test object
    :param params: Dictionary with the test parameters
    :param env: Dictionary with test environment.
    """
    def check_vm_network_accessed(ping_dest, session=None):
        """
        The operations to the VM need to be done before or after
        migration happens

        :param ping_dest: The destination to be ping
        :param session: The session object to the host
        :raise: test.fail when ping fails
        """
        # Confirm local/remote VM can be accessed through network.
        logging.info("Check VM network connectivity")
        status, output = utils_net.ping(ping_dest,
                                        count=10,
                                        timeout=20,
                                        output_func=logging.debug,
                                        session=session)
        if status != 0:
            test.fail("Ping failed, status: %s, output: %s" % (status, output))

    def update_iface_xml(vm_name, iface_dict):
        """
        Update interfaces for guest

        :param vm_name: The name of VM
        :param iface_dict: The interface configurations params
        """
        logging.debug("update iface xml")
        vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
        vmxml.remove_all_device_by_type('interface')
        vmxml.sync()

        iface = interface.Interface('network')
        iface.xml = libvirt.modify_vm_iface(vm.name, "get_xml", iface_dict)
        libvirt.add_vm_device(vmxml, iface)

    migration_test = migration.MigrationTest()
    migration_test.check_parameters(params)

    # Params for NFS shared storage
    shared_storage = params.get("migrate_shared_storage", "")
    if shared_storage == "":
        default_guest_asset = defaults.get_default_guest_os_info()['asset']
        default_guest_asset = "%s.qcow2" % default_guest_asset
        shared_storage = os.path.join(params.get("nfs_mount_dir"),
                                      default_guest_asset)
        logging.debug("shared_storage:%s", shared_storage)

    # Params to update disk using shared storage
    params["disk_type"] = "file"
    params["disk_source_protocol"] = "netfs"
    params["mnt_path_name"] = params.get("nfs_mount_dir")

    # Local variables
    virsh_args = {"debug": True}
    server_ip = params.get("server_ip")
    server_user = params.get("server_user", "root")
    server_pwd = params.get("server_pwd")
    client_ip = params.get("client_ip")
    client_pwd = params.get("client_pwd")
    virsh_options = params.get("virsh_options", "")
    extra = params.get("virsh_migrate_extra")
    options = params.get("virsh_migrate_options", "--live --p2p --verbose")
    func_params_exists = "yes" == params.get("func_params_exists", "no")
    migr_vm_back = "yes" == params.get("migr_vm_back", "no")

    ovs_bridge_name = params.get("ovs_bridge_name")
    network_dict = eval(params.get("network_dict", '{}'))
    iface_dict = eval(params.get("iface_dict", '{}'))
    remote_virsh_dargs = {
        'remote_ip': server_ip,
        'remote_user': server_user,
        'remote_pwd': server_pwd,
        'unprivileged_user': None,
        'ssh_remote_auth': True
    }
    func_name = None
    libvirtd_conf = None
    mig_result = None

    # params for migration connection
    params["virsh_migrate_desturi"] = libvirt_vm.complete_uri(
        params.get("migrate_dest_host"))
    params["virsh_migrate_connect_uri"] = libvirt_vm.complete_uri(
        params.get("migrate_source_host"))
    src_uri = params.get("virsh_migrate_connect_uri")
    dest_uri = params.get("virsh_migrate_desturi")

    vm_name = params.get("migrate_main_vm")
    vm = env.get_vm(vm_name)
    vm.verify_alive()

    extra_args = {}
    if func_params_exists:
        extra_args.update({'func_params': params})
    postcopy_options = params.get("postcopy_options")
    if postcopy_options:
        extra = "%s %s" % (extra, postcopy_options)
        func_name = virsh.migrate_postcopy

    # For safety reasons, we'd better back up  xmlfile.
    new_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    orig_config_xml = new_xml.copy()

    try:
        # Create a remote runner for later use
        runner_on_target = remote.RemoteRunner(host=server_ip,
                                               username=server_user,
                                               password=server_pwd)
        remote_session = remote.remote_login("ssh", server_ip, "22",
                                             server_user, server_pwd, r'[$#%]')
        if ovs_bridge_name:
            status, stdout = utils_net.create_ovs_bridge(ovs_bridge_name)
            if status:
                test.fail("Failed to create ovs bridge on local. Status: %s"
                          "Stdout: %s" % (status, stdout))
            status, stdout = utils_net.create_ovs_bridge(
                ovs_bridge_name, session=remote_session)
            if status:
                test.fail("Failed to create ovs bridge on remote. Status: %s"
                          "Stdout: %s" % (status, stdout))
        if network_dict:
            libvirt_network.create_or_del_network(
                network_dict, remote_args=remote_virsh_dargs)
            libvirt_network.create_or_del_network(network_dict)

        remote_session.close()
        # Change domain network xml
        if iface_dict:
            if "mac" not in iface_dict:
                mac = utils_net.generate_mac_address_simple()
                iface_dict.update({'mac': mac})
            else:
                mac = iface_dict["mac"]

            update_iface_xml(vm_name, iface_dict)

        # Change the disk of the vm
        libvirt.set_vm_disk(vm, params)

        if not vm.is_alive():
            try:
                vm.start()
            except virt_vm.VMStartError as err:
                test.fail("Failed to start VM: %s" % err)

        logging.debug("Guest xml after starting:\n%s",
                      vm_xml.VMXML.new_from_dumpxml(vm_name))

        # Check local guest network connection before migration
        if vm.serial_console is not None:
            vm.cleanup_serial_console()
        vm.create_serial_console()
        vm_session = vm.wait_for_serial_login(timeout=240)
        utils_net.restart_guest_network(vm_session)
        vm_ip = utils_net.get_guest_ip_addr(vm_session, mac)
        logging.debug("VM IP Addr: %s", vm_ip)

        check_vm_network_accessed(vm_ip)

        # Execute migration process
        vms = [vm]

        migration_test.do_migration(vms,
                                    None,
                                    dest_uri,
                                    'orderly',
                                    options,
                                    thread_timeout=900,
                                    ignore_status=True,
                                    virsh_opt=virsh_options,
                                    func=func_name,
                                    extra_opts=extra,
                                    **extra_args)

        mig_result = migration_test.ret
        migration_test.check_result(mig_result, params)

        if int(mig_result.exit_status) == 0:
            remote_session = remote.remote_login("ssh", server_ip, "22",
                                                 server_user, server_pwd,
                                                 r'[$#%]')
            check_vm_network_accessed(vm_ip, session=remote_session)
            remote_session.close()

        # Execute migration from remote
        if migr_vm_back:
            ssh_connection = utils_conn.SSHConnection(server_ip=client_ip,
                                                      server_pwd=client_pwd,
                                                      client_ip=server_ip,
                                                      client_pwd=server_pwd)
            try:
                ssh_connection.conn_check()
            except utils_conn.ConnectionError:
                ssh_connection.conn_setup()
                ssh_connection.conn_check()

            # Pre migration setup for local machine
            migration_test.migrate_pre_setup(src_uri, params)

            cmd = "virsh migrate %s %s %s" % (vm_name, options, src_uri)
            logging.debug("Start migration: %s", cmd)
            cmd_result = remote.run_remote_cmd(cmd, params, runner_on_target)
            logging.info(cmd_result)
            if cmd_result.exit_status:
                test.fail("Failed to run '%s' on remote: %s" %
                          (cmd, cmd_result))
            logging.debug("VM is migrated back.")
            check_vm_network_accessed(vm_ip)
    finally:
        logging.debug("Recover test environment")
        # Clean VM on destination and source
        try:
            migration_test.cleanup_dest_vm(vm, vm.connect_uri, dest_uri)
        except Exception as err:
            logging.error(err)
        if vm.is_alive():
            vm.destroy(gracefully=False)

        logging.info("Recovery VM XML configration")
        orig_config_xml.sync()
        remote_session = remote.remote_login("ssh", server_ip, "22",
                                             server_user, server_pwd, r'[$#%]')
        if network_dict:
            libvirt_network.create_or_del_network(
                network_dict, is_del=True, remote_args=remote_virsh_dargs)
            libvirt_network.create_or_del_network(network_dict, is_del=True)
        if ovs_bridge_name:
            utils_net.delete_ovs_bridge(ovs_bridge_name)
            utils_net.delete_ovs_bridge(ovs_bridge_name,
                                        session=remote_session)

        remote_session.close()
        if migr_vm_back:
            if 'ssh_connection' in locals():
                ssh_connection.auto_recover = True
            migration_test.migrate_pre_setup(src_uri, params, cleanup=True)
        logging.info("Remove local NFS image")
        source_file = params.get("source_file")
        if source_file:
            libvirt.delete_local_disk("file", path=source_file)
예제 #18
0
def run(test, params, env):
    """
    Test command: virsh migrate-setspeed <domain> <bandwidth>
                  virsh migrate-getspeed <domain>.

    1) Prepare test environment.
    2) Try to set the maximum migration bandwidth (in MiB/s)
       for a domain through valid and invalid command.
    3) Recover test environment.
    4) Check result.
    """

    # MAIN TEST CODE ###
    # Process cartesian parameters
    vm_name = params.get("migrate_main_vm")
    bandwidth = params.get("bandwidth", "default")
    options_extra = params.get("options_extra", "")
    status_error = "yes" == params.get("status_error", "yes")
    virsh_dargs = {'debug': True}
    # Checking uris for migration
    twice_migration = "yes" == params.get("twice_migration", "no")
    if twice_migration:
        src_uri = params.get("migrate_src_uri",
                             "qemu+ssh://EXAMPLE/system")
        dest_uri = params.get("migrate_dest_uri",
                              "qemu+ssh://EXAMPLE/system")
        if src_uri.count('///') or src_uri.count('EXAMPLE'):
            test.cancel("The src_uri '%s' is invalid"
                        % src_uri)
        if dest_uri.count('///') or dest_uri.count('EXAMPLE'):
            test.cancel("The dest_uri '%s' is invalid"
                        % dest_uri)

    bz1083483 = False
    if bandwidth == "zero":
        expected_value = 0
    elif bandwidth == "one":
        expected_value = 1
    elif bandwidth == "negative":
        expected_value = -1
        bz1083483 = True
    elif bandwidth == "default":
        expected_value = DEFAULT
    elif bandwidth == "UINT32_MAX":
        expected_value = UINT32_MiB
    elif bandwidth == "INT64_MAX":
        expected_value = INT64_MiB
    elif bandwidth == "UINT64_MAX":
        expected_value = UINT64_MiB
        bz1083483 = True
    elif bandwidth == "INVALID_VALUE":
        expected_value = INT64_MiB + 1
        bz1083483 = True
    else:
        expected_value = bandwidth

    orig_value = virsh.migrate_getspeed(vm_name).stdout.strip()

    def set_get_speed(vm_name, expected_value, status_error=False,
                      options_extra="", **virsh_dargs):
        """Set speed and check its result"""
        result = virsh.migrate_setspeed(vm_name, expected_value,
                                        options_extra, **virsh_dargs)
        status = result.exit_status
        err = result.stderr.strip()

        # Check status_error
        if status_error:
            if status == 0 or err == "":
                # Without code for bz1083483 applied, this will succeed
                # when it shouldn't be succeeding.
                if bz1083483 and not libvirt_version.version_compare(1, 2, 4):
                    test.cancel("bz1083483 should result in fail")
                else:
                    test.fail("Expect fail, but run successfully!")

            # no need to perform getspeed if status_error is true
            return
        else:
            if status != 0 or err != "":
                test.fail("Run failed with right "
                          "virsh migrate-setspeed command")

        result = virsh.migrate_getspeed(vm_name, **virsh_dargs)
        status = result.exit_status
        actual_value = result.stdout.strip()
        err = result.stderr.strip()

        if status != 0 or err != "":
            test.fail("Run failed with virsh migrate-getspeed")

        logging.info("The expected bandwidth is %s MiB/s, "
                     "the actual bandwidth is %s MiB/s"
                     % (expected_value, actual_value))

        if int(actual_value) != int(expected_value):
            test.fail("Bandwidth value from getspeed "
                      "is different from expected value "
                      "set by setspeed")

    def verify_migration_speed(test, params, env):
        """
        Check if migration speed is effective with twice migration.
        """
        vms = env.get_all_vms()
        src_uri = params.get("migrate_src_uri", "qemu+ssh://EXAMPLE/system")
        dest_uri = params.get("migrate_dest_uri", "qemu+ssh://EXAMPLE/system")

        if not len(vms):
            test.cancel("Please provide migrate_vms for test.")

        if src_uri.count('///') or src_uri.count('EXAMPLE'):
            test.cancel("The src_uri '%s' is invalid" % src_uri)

        if dest_uri.count('///') or dest_uri.count('EXAMPLE'):
            test.cancel("The dest_uri '%s' is invalid" % dest_uri)

        remote_host = params.get("migrate_dest_host")
        username = params.get("migrate_dest_user", "root")
        password = params.get("migrate_dest_pwd")
        # Config ssh autologin for remote host
        ssh_key.setup_ssh_key(remote_host, username, password, port=22)

        # Check migrated vms' state
        for vm in vms:
            if vm.is_dead():
                vm.start()

        load_vm_names = params.get("load_vms").split()
        # vms for load
        load_vms = []
        for vm_name in load_vm_names:
            load_vms.append(libvirt_vm.VM(vm_name, params, test.bindir,
                                          env.get("address_cache")))
        params["load_vms"] = load_vms

        bandwidth = int(params.get("bandwidth", "4"))
        stress_type = params.get("stress_type", "load_vms_booting")
        migration_type = params.get("migration_type", "orderly")
        thread_timeout = int(params.get("thread_timeout", "60"))
        delta = float(params.get("allowed_delta", "0.1"))
        virsh_migrate_timeout = int(params.get("virsh_migrate_timeout", "60"))
        # virsh migrate options
        virsh_migrate_options = "--live --unsafe --timeout %s" % virsh_migrate_timeout
        # Migrate vms to remote host
        mig_first = migration.MigrationTest()
        virsh_dargs = {"debug": True}
        for vm in vms:
            set_get_speed(vm.name, bandwidth, virsh_dargs=virsh_dargs)
            vm.wait_for_login()
        utils_test.load_stress(stress_type, params=params, vms=vms)
        mig_first.do_migration(vms, src_uri, dest_uri, migration_type,
                               options=virsh_migrate_options, thread_timeout=thread_timeout)
        for vm in vms:
            mig_first.cleanup_dest_vm(vm, None, dest_uri)
            # Keep it clean for second migration
            if vm.is_alive():
                vm.destroy()

        # Migrate vms again with new bandwidth
        second_bandwidth = params.get("second_bandwidth", "times")
        if second_bandwidth == "half":
            second_bandwidth = bandwidth / 2
            speed_times = 2
        elif second_bandwidth == "times":
            second_bandwidth = bandwidth * 2
            speed_times = 0.5
        elif second_bandwidth == "same":
            second_bandwidth = bandwidth
            speed_times = 1

        # Migrate again
        for vm in vms:
            if vm.is_dead():
                vm.start()
            vm.wait_for_login()
            set_get_speed(vm.name, second_bandwidth, virsh_dargs=virsh_dargs)
        utils_test.load_stress(stress_type, params=params, vms=vms)
        mig_second = migration.MigrationTest()
        mig_second.do_migration(vms, src_uri, dest_uri, migration_type,
                                options=virsh_migrate_options, thread_timeout=thread_timeout)
        for vm in vms:
            mig_second.cleanup_dest_vm(vm, None, dest_uri)

        fail_info = []
        # Check whether migration failed
        if len(fail_info):
            test.fail(fail_info)

        for vm in vms:
            first_time = mig_first.mig_time[vm.name]
            second_time = mig_second.mig_time[vm.name]
            logging.debug("Migration time for %s:\n"
                          "Time with Bandwidth '%s' first: %s\n"
                          "Time with Bandwidth '%s' second: %s", vm.name,
                          bandwidth, first_time, second_bandwidth, second_time)
            shift = float(abs(first_time * speed_times - second_time)) / float(second_time)
            logging.debug("Shift:%s", shift)
            if delta < shift:
                fail_info.append("Spent time for migrating %s is intolerable." % vm.name)

        # Check again for speed result
        if len(fail_info):
            test.fail(fail_info)

    # Run test case
    try:
        set_get_speed(vm_name, expected_value, status_error,
                      options_extra, **virsh_dargs)
        if twice_migration:
            verify_migration_speed(test, params, env)
        else:
            set_get_speed(vm_name, expected_value, status_error,
                          options_extra, **virsh_dargs)
    finally:
        #restore bandwidth to default
        virsh.migrate_setspeed(vm_name, orig_value)
        if twice_migration:
            for vm in env.get_all_vms():
                migration.MigrationTest().cleanup_dest_vm(vm, src_uri, dest_uri)
                if vm.is_alive():
                    vm.destroy(gracefully=False)
예제 #19
0
def run(test, params, env):
    """
    Test virsh migrate command.
    """
    def check_vm_network_accessed(session=None):
        """
        The operations to the VM need to be done before or after
        migration happens

        :param session: The session object to the host

        :raise: test.error when ping fails
        """
        # Confirm local/remote VM can be accessed through network.
        logging.info("Check VM network connectivity")
        s_ping, _ = utils_test.ping(vm.get_address(),
                                    count=10,
                                    timeout=20,
                                    output_func=logging.debug,
                                    session=session)
        if s_ping != 0:
            if session:
                session.close()
            test.fail("%s did not respond after %d sec." % (vm.name, 20))

    def check_migration_res(result):
        """
        Check if the migration result is as expected

        :param result: the output of migration
        :raise: test.fail if test is failed
        """
        if not result:
            test.error("No migration result is returned.")

        logging.info("Migration out: %s", result.stdout_text.strip())
        logging.info("Migration error: %s", result.stderr_text.strip())

        if status_error:  # Migration should fail
            if err_msg:  # Special error messages are expected
                if not re.search(err_msg, result.stderr_text.strip()):
                    test.fail("Can not find the expected patterns '%s' in "
                              "output '%s'" %
                              (err_msg, result.stderr_text.strip()))
                else:
                    logging.debug("It is the expected error message")
            else:
                if int(result.exit_status) != 0:
                    logging.debug("Migration failure is expected result")
                else:
                    test.fail("Migration success is unexpected result")
        else:
            if int(result.exit_status) != 0:
                test.fail(result.stderr_text.strip())

    check_parameters(test, params)

    # Params for NFS shared storage
    shared_storage = params.get("migrate_shared_storage", "")
    if shared_storage == "":
        default_guest_asset = defaults.get_default_guest_os_info()['asset']
        default_guest_asset = "%s.qcow2" % default_guest_asset
        shared_storage = os.path.join(params.get("nfs_mount_dir"),
                                      default_guest_asset)
        logging.debug("shared_storage:%s", shared_storage)

    # Params to update disk using shared storage
    params["disk_type"] = "file"
    params["disk_source_protocol"] = "netfs"
    params["mnt_path_name"] = params.get("nfs_mount_dir")

    # Local variables
    virsh_args = {"debug": True}
    virsh_options = params.get("virsh_options", "")

    server_ip = params.get("server_ip")
    server_user = params.get("server_user", "root")
    server_pwd = params.get("server_pwd")
    client_ip = params.get("client_ip")
    client_pwd = params.get("client_pwd")
    extra = params.get("virsh_migrate_extra")
    options = params.get("virsh_migrate_options")

    guest_src_url = params.get("guest_src_url")
    guest_src_path = params.get("guest_src_path",
                                "/var/lib/libvirt/images/guest.img")
    check_disk = "yes" == params.get("check_disk")
    disk_model = params.get("disk_model")
    disk_target = params.get("disk_target", "vda")
    controller_model = params.get("controller_model")

    check_interface = "yes" == params.get("check_interface")
    iface_type = params.get("iface_type", "network")
    iface_model = params.get("iface_model", "virtio")
    iface_params = {
        'type': iface_type,
        'model': iface_model,
        'del_addr': True,
        'source': '{"network": "default"}'
    }

    check_memballoon = "yes" == params.get("check_memballoon")
    membal_model = params.get("membal_model")

    check_rng = "yes" == params.get("check_rng")
    rng_model = params.get("rng_model")

    migr_vm_back = "yes" == params.get("migrate_vm_back", "no")
    status_error = "yes" == params.get("status_error", "no")
    remote_virsh_dargs = {
        'remote_ip': server_ip,
        'remote_user': server_user,
        'remote_pwd': server_pwd,
        'unprivileged_user': None,
        'ssh_remote_auth': True
    }

    xml_check_after_mig = params.get("guest_xml_check_after_mig")

    err_msg = params.get("err_msg")
    vm_session = None
    remote_virsh_session = None
    vm = None
    mig_result = None

    if not libvirt_version.version_compare(5, 0, 0):
        test.cancel("This libvirt version doesn't support "
                    "virtio-transitional model.")
    # Make sure all of parameters are assigned a valid value
    check_parameters(test, params)

    # params for migration connection
    params["virsh_migrate_desturi"] = libvirt_vm.complete_uri(
        params.get("migrate_dest_host"))
    params["virsh_migrate_connect_uri"] = libvirt_vm.complete_uri(
        params.get("migrate_source_host"))
    src_uri = params.get("virsh_migrate_connect_uri")
    dest_uri = params.get("virsh_migrate_desturi")

    vm_name = params.get("migrate_main_vm")
    vm = env.get_vm(vm_name)
    vm.verify_alive()

    # For safety reasons, we'd better back up  xmlfile.
    new_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    orig_config_xml = new_xml.copy()

    migration_test = migration.MigrationTest()
    try:
        # Create a remote runner for later use
        runner_on_target = remote.RemoteRunner(host=server_ip,
                                               username=server_user,
                                               password=server_pwd)
        # download guest source and update interface model to keep guest up
        if guest_src_url:
            blk_source = download.get_file(guest_src_url, guest_src_path)
            if not blk_source:
                test.error("Fail to download image.")
            params["blk_source_name"] = blk_source
            if (not check_interface) and iface_model:
                iface_dict = {'model': iface_model}
                libvirt.modify_vm_iface(vm_name, "update_iface", iface_dict)
            if not check_disk:
                params["disk_model"] = "virtio-transitional"

        if check_interface:
            libvirt.modify_vm_iface(vm_name, "update_iface", iface_params)

        if check_memballoon:
            membal_dict = {'membal_model': membal_model}
            dom_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
            libvirt.update_memballoon_xml(dom_xml, membal_dict)

        if check_rng:
            rng_dict = {'rng_model': rng_model}
            rng_xml = libvirt.create_rng_xml(rng_dict)
            dom_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
            libvirt.add_vm_device(dom_xml, rng_xml)

        # Change the disk of the vm
        libvirt.set_vm_disk(vm, params)

        if not vm.is_alive():
            vm.start()

        logging.debug("Guest xml after starting:\n%s",
                      vm_xml.VMXML.new_from_dumpxml(vm_name))

        # Check local guest network connection before migration
        vm_session = vm.wait_for_login(restart_network=True)
        check_vm_network_accessed()

        # Execute migration process
        vms = [vm]
        migration_test.do_migration(vms,
                                    None,
                                    dest_uri,
                                    'orderly',
                                    options,
                                    thread_timeout=900,
                                    ignore_status=True,
                                    virsh_opt=virsh_options,
                                    extra_opts=extra)
        mig_result = migration_test.ret

        check_migration_res(mig_result)

        if int(mig_result.exit_status) == 0:
            server_session = remote.wait_for_login('ssh', server_ip, '22',
                                                   server_user, server_pwd,
                                                   r"[\#\$]\s*$")
            check_vm_network_accessed(server_session)
            server_session.close()

        if xml_check_after_mig:
            if not remote_virsh_session:
                remote_virsh_session = virsh.VirshPersistent(
                    **remote_virsh_dargs)
            target_guest_dumpxml = (remote_virsh_session.dumpxml(
                vm_name, debug=True, ignore_status=True).stdout_text.strip())
            if check_disk:
                check_str = disk_model if disk_model else controller_model
            if check_interface:
                check_str = iface_model
            if check_memballoon:
                check_str = membal_model
            if check_rng:
                check_str = rng_model

            xml_check_after_mig = "%s'%s'" % (xml_check_after_mig, check_str)
            if not re.search(xml_check_after_mig, target_guest_dumpxml):
                test.fail("Fail to search '%s' in target guest XML:\n%s" %
                          (xml_check_after_mig, target_guest_dumpxml))
            remote_virsh_session.close_session()

        # Execute migration from remote
        if migr_vm_back:
            ssh_connection = utils_conn.SSHConnection(server_ip=client_ip,
                                                      server_pwd=client_pwd,
                                                      client_ip=server_ip,
                                                      client_pwd=server_pwd)
            try:
                ssh_connection.conn_check()
            except utils_conn.ConnectionError:
                ssh_connection.conn_setup()
                ssh_connection.conn_check()

            # Pre migration setup for local machine
            migration_test.migrate_pre_setup(src_uri, params)
            cmd = "virsh migrate %s %s %s" % (vm_name, virsh_options, src_uri)
            logging.debug("Start migration: %s", cmd)
            cmd_result = remote.run_remote_cmd(cmd, params, runner_on_target)
            logging.info(cmd_result)
            if cmd_result.exit_status:
                test.fail("Failed to run '%s' on remote: %s" %
                          (cmd, cmd_result))

    finally:
        logging.debug("Recover test environment")
        # Clean VM on destination
        vm.connect_uri = ''
        migration_test.cleanup_dest_vm(vm, src_uri, dest_uri)

        logging.info("Recovery VM XML configration")
        orig_config_xml.sync()
        logging.debug("The current VM XML:\n%s", orig_config_xml.xmltreefile)

        # Clean up of pre migration setup for local machine
        if migr_vm_back:
            if 'ssh_connection' in locals():
                ssh_connection.auto_recover = True
            migration_test.migrate_pre_setup(src_uri, params, cleanup=True)
        if remote_virsh_session:
            remote_virsh_session.close_session()

        logging.info("Remove local NFS image")
        source_file = params.get("source_file")
        libvirt.delete_local_disk("file", path=source_file)
        if guest_src_url and blk_source:
            libvirt.delete_local_disk("file", path=blk_source)
예제 #20
0
def run(test, params, env):
    """
    Test migration over unix socket.
    1) Migrate vm over unix socket
    2) Migrate vm over unix socket - libvirt tunnelled(--tunnelled)
    3) Migrate vm over unix socket - enable multifd(--parallel)
    4) Migrate vm with copy storage over unix socket - one disk
    5) Migrate vm with copy storage over unix socket - multiple disks
    6) Abort migration over unix socket and migrate again
    7) Abort migration with copy storage over unix socket, and migrate again
    8) Migrate vm with copy storage over unix socket - multiple disks
        - enable multifd(--parallel)

    :param test: test object
    :param params: Dictionary with the test parameters
    :param env: Dictionary with test environment.
    """
    def update_disk(vm, params):
        """
        Update disk for testing.

        :param vm: vm object.
        :param params: the parameters used.
        :return: updated images.
        """
        local_image_list = []
        remote_image_list = []
        vm_did_list = []
        # Change the disk of the vm
        if storage_type == "nfs":
            libvirt.set_vm_disk(vm, params)
        else:
            disk_format = params.get("disk_format", "qcow2")
            disk_num = eval(params.get("disk_num", "1"))
            blk_source = vm.get_first_disk_devices()['source']
            vsize = utils_misc.get_image_info(blk_source).get("vsize")
            remote_session = remote.remote_login("ssh", server_ip, "22",
                                                 server_user, server_pwd,
                                                 r'[$#%]')
            # Create disk on remote host
            utils_misc.make_dirs(os.path.dirname(blk_source), remote_session)
            libvirt_disk.create_disk("file",
                                     disk_format=disk_format,
                                     path=blk_source,
                                     size=vsize,
                                     session=remote_session)
            remote_image_list.append(blk_source)

            for idx in range(2, disk_num + 1):
                disk_path = os.path.join(os.path.dirname(blk_source),
                                         "test%s.img" % str(idx))
                # Create disk on local
                libvirt_disk.create_disk("file",
                                         disk_format=disk_format,
                                         path=disk_path)
                local_image_list.append(disk_path)

                target_dev = 'vd' + chr(idx + ord('a') - 1)
                new_disk_dict = {"driver_type": disk_format}
                vm_was_running = vm.is_alive()
                libvirt_pcicontr.reset_pci_num(vm_name)
                if vm_was_running and not vm.is_alive():
                    vm.start()
                    vm.wait_for_login().close()
                result = libvirt.attach_additional_device(
                    vm_name, target_dev, disk_path, new_disk_dict, False)
                libvirt.check_exit_status(result)

                libvirt_disk.create_disk("file",
                                         disk_format=disk_format,
                                         path=disk_path,
                                         session=remote_session)

                remote_image_list.append(disk_path)
                vm_did_list.append(target_dev)

            remote_session.close()
        return local_image_list, remote_image_list, vm_did_list

    def check_socket(params):
        """
        Check sockets' number

        :param params: the parameters used
        :raise: test.fail when command fails
        """
        postcopy_options = params.get("postcopy_options")
        vm_name = params.get("migrate_main_vm")
        exp_num = params.get("expected_socket_num", "2")
        if postcopy_options:
            migration_test.set_migratepostcopy(vm_name)
        cmd = ("netstat -xanp|grep -E \"CONNECTED"
               ".*(desturi-socket|migrateuri-socket)\" | wc -l")
        res = process.run(cmd, shell=True).stdout_text.strip()
        if res != exp_num:
            test.fail("There should be {} connected unix sockets, "
                      "but found {} sockets.".format(exp_num, res))

    migration_test = migration.MigrationTest()
    migration_test.check_parameters(params)

    # Local variables
    virsh_args = {"debug": True}
    server_ip = params["server_ip"] = params.get("remote_ip")
    server_user = params["server_user"] = params.get("remote_user", "root")
    server_pwd = params["server_pwd"] = params.get("remote_pwd")
    client_ip = params["client_ip"] = params.get("local_ip")
    client_pwd = params["client_pwd"] = params.get("local_pwd")
    virsh_options = params.get("virsh_options", "")
    extra = params.get("virsh_migrate_extra")
    options = params.get("virsh_migrate_options", "--live --p2p --verbose")
    storage_type = params.get("storage_type")
    disk_num = params.get("disk_num")
    desturi_port = params.get("desturi_port", "22222")
    migrateuri_port = params.get("migrateuri_port", "33333")
    disks_uri_port = params.get("disks_uri_port", "44444")
    migrate_again = "yes" == params.get("migrate_again", "no")
    action_during_mig = params.get("action_during_mig")
    if action_during_mig:
        action_during_mig = eval(action_during_mig)

    extra_args = migration_test.update_virsh_migrate_extra_args(params)

    mig_result = None
    local_image_list = []
    remote_image_list = []
    vm_did_list = []

    if not libvirt_version.version_compare(6, 6, 0):
        test.cancel("This libvirt version doesn't support "
                    "migration over unix.")

    if storage_type == "nfs":
        # Params for NFS shared storage
        shared_storage = params.get("migrate_shared_storage", "")
        if shared_storage == "":
            default_guest_asset = defaults.get_default_guest_os_info()['asset']
            default_guest_asset = "%s.qcow2" % default_guest_asset
            shared_storage = os.path.join(params.get("nfs_mount_dir"),
                                          default_guest_asset)
            logging.debug("shared_storage:%s", shared_storage)

        # Params to update disk using shared storage
        params["disk_type"] = "file"
        params["disk_source_protocol"] = "netfs"
        params["mnt_path_name"] = params.get("nfs_mount_dir")

    # params for migration connection
    params["virsh_migrate_connect_uri"] = libvirt_vm.complete_uri(
        params.get("migrate_source_host"))
    src_uri = params.get("virsh_migrate_connect_uri")
    dest_uri = params.get("virsh_migrate_desturi")
    dest_uri_ssh = libvirt_vm.complete_uri(params.get("migrate_dest_host"))

    unix_obj = None

    vm_name = params.get("migrate_main_vm")
    vm = env.get_vm(vm_name)
    vm.verify_alive()
    bk_uri = vm.connect_uri

    postcopy_options = params.get("postcopy_options")
    if postcopy_options:
        extra = "%s %s" % (extra, postcopy_options)

    # For safety reasons, we'd better back up  xmlfile.
    new_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    orig_config_xml = new_xml.copy()

    try:
        unix_obj = utils_conn.UNIXSocketConnection(params)
        unix_obj.conn_setup()
        unix_obj.auto_recover = True

        local_image_list, remote_image_list, vm_did_list = update_disk(
            vm, params)

        if not vm.is_alive():
            vm.start()

        logging.debug("Guest xml after starting:\n%s",
                      vm_xml.VMXML.new_from_dumpxml(vm_name))

        vm_session = vm.wait_for_login()

        for did in vm_did_list:
            utils_disk.linux_disk_check(vm_session, did)

        # Execute migration process
        vms = [vm]

        migration_test.do_migration(vms,
                                    None,
                                    dest_uri,
                                    'orderly',
                                    options,
                                    thread_timeout=600,
                                    ignore_status=True,
                                    virsh_opt=virsh_options,
                                    func=action_during_mig,
                                    extra_opts=extra,
                                    **extra_args)

        mig_result = migration_test.ret

        if migrate_again:
            logging.debug(
                "Sleeping 10 seconds before rerunning the migration.")
            time.sleep(10)
            if params.get("migrate_again_clear_func", "yes") == "yes":
                action_during_mig = None
            extra_args["status_error"] = params.get(
                "migrate_again_status_error", "no")
            migration_test.do_migration(vms,
                                        None,
                                        dest_uri,
                                        'orderly',
                                        options,
                                        thread_timeout=900,
                                        ignore_status=True,
                                        virsh_opt=virsh_options,
                                        extra_opts=extra,
                                        func=action_during_mig,
                                        **extra_args)

            mig_result = migration_test.ret

        if int(mig_result.exit_status) == 0:
            vm.connect_uri = dest_uri_ssh
            if not utils_misc.wait_for(
                    lambda: virsh.is_alive(
                        vm_name, uri=dest_uri_ssh, debug=True), 60):
                test.fail("The migrated VM should be alive!")
            if vm_did_list:
                vm_session_after_mig = vm.wait_for_serial_login(timeout=240)
                for did in vm_did_list:
                    vm_session_after_mig.cmd("echo mytest >> /mnt/%s1/mytest" %
                                             did)
    finally:
        logging.info("Recover test environment")
        vm.connect_uri = bk_uri
        # Clean VM on destination and source
        migration_test.cleanup_vm(vm, dest_uri)

        orig_config_xml.sync()

        # Remove image files
        for source_file in local_image_list:
            libvirt.delete_local_disk("file", path=source_file)
        for img in remote_image_list:
            remote.run_remote_cmd("rm -rf %s" % img, params)
예제 #21
0
def run(test, params, env):
    """
    Test virsh migrate command.
    """
    def cleanup_vm(vm, vm_name='', uri=''):
        """
        Clean up vm in the src or destination host environment
        when doing the uni-direction migration.
        """
        # Backup vm name and uri
        uri_bak = vm.connect_uri
        vm_name_bak = vm.name

        # Destroy and undefine vm
        vm.connect_uri = uri if uri else uri_bak
        vm.name = vm_name if vm_name else vm_name_bak
        logging.info("Cleaning up VM %s on %s", vm.name, vm.connect_uri)
        if vm.is_alive():
            vm.destroy()
        if vm.is_persistent():
            vm.undefine()

        # Restore vm connect_uri
        vm.connect_uri = uri_bak
        vm.name = vm_name_bak

    # Check whether there are unset parameters
    for v in list(itervalues(params)):
        if isinstance(v, string_types) and v.count("EXAMPLE"):
            test.cancel("Please set real value for %s" % v)

    # Params for virsh migrate options:
    live_migration = params.get("live_migration") == "yes"
    offline_migration = params.get("offline_migration") == "yes"
    persistent = params.get("persistent") == "yes"
    undefinesource = params.get("undefinesource") == "yes"
    p2p = params.get("p2p") == "yes"
    tunnelled = params.get("tunnelled") == "yes"
    postcopy = params.get("postcopy") == "yes"
    dname = params.get("dname")
    xml_option = params.get("xml_option") == "yes"
    persistent_xml_option = params.get("persistent_xml_option") == "yes"
    extra_options = params.get("virsh_migrate_extra", "")

    if live_migration and not extra_options.count("--live"):
        extra_options = "%s --live" % extra_options
    if offline_migration and not extra_options.count("--offline"):
        extra_options = "%s --offline" % extra_options
    if persistent and not extra_options.count("--persistent"):
        extra_options = "%s --persistent" % extra_options
    if undefinesource and not extra_options.count("--undefinesource"):
        extra_options = "%s --undefinesource" % extra_options
    if p2p and not extra_options.count("--p2p"):
        extra_options = "%s --p2p" % extra_options
    if tunnelled and not extra_options.count("--tunnelled"):
        extra_options = "%s --tunnelled" % extra_options
    if tunnelled and not extra_options.count("--p2p"):
        extra_options = "%s --p2p" % extra_options
    if postcopy and not extra_options.count("--postcopy"):
        extra_options = "%s --postcopy" % extra_options
    if dname and not extra_options.count("--dname"):
        extra_options = "%s --dname %s" % (extra_options, dname)
    if xml_option:
        pass
    if persistent_xml_option and not extra_options.count("--persistent"):
        extra_options = "%s --persistent" % extra_options
    if persistent_xml_option:
        pass

    # Set param migrate_options in case it is used somewhere:
    params.setdefault("migrate_options", extra_options)

    # Params for postcopy migration
    postcopy_timeout = int(params.get("postcopy_migration_timeout", "180"))

    # Params for migrate hosts:
    server_cn = params.get("server_cn")
    client_cn = params.get("client_cn")
    migrate_source_host = client_cn if client_cn else params.get(
        "migrate_source_host")
    migrate_dest_host = server_cn if server_cn else params.get(
        "migrate_dest_host")

    # Params for migrate uri
    transport = params.get("transport", "tls")
    transport_port = params.get("transport_port")
    uri_port = ":%s" % transport_port if transport_port else ''
    hypervisor_driver = params.get("hypervisor_driver", "qemu")
    hypervisor_mode = params.get("hypervisor_mode", 'system')
    if "virsh_migrate_desturi" not in list(params.keys()):
        params["virsh_migrate_desturi"] = "%s+%s://%s%s/%s" % (
            hypervisor_driver, transport, migrate_dest_host, uri_port,
            hypervisor_mode)
    if "virsh_migrate_srcuri" not in list(params.keys()):
        params["virsh_migrate_srcuri"] = "%s:///%s" % (hypervisor_driver,
                                                       hypervisor_mode)
    dest_uri = params.get("virsh_migrate_desturi")
    src_uri = params.get("virsh_migrate_srcuri")

    # Params for src vm cfg:
    src_vm_cfg = params.get("src_vm_cfg")
    src_vm_status = params.get("src_vm_status")
    with_graphic_passwd = params.get("with_graphic_passwd")
    graphic_passwd = params.get("graphic_passwd")

    # For test result check
    cancel_exception = False
    fail_exception = False
    exception = False
    result_check_pass = True

    # Objects(SSH, TLS and TCP, etc) to be cleaned up in finally
    objs_list = []

    # VM objects for migration test
    vms = []

    try:
        # Get a MigrationTest() Object
        logging.debug("Get a MigrationTest()  object")
        obj_migration = migration.MigrationTest()

        # Setup libvirtd remote connection TLS connection env
        if transport == "tls":
            tls_obj = TLSConnection(params)
            # Setup CA, server(on dest host) and client(on src host)
            tls_obj.conn_setup()
            # Add tls_obj to objs_list
            objs_list.append(tls_obj)

        # Enable libvirtd remote connection transport port
        if transport == 'tls':
            transport_port = '16514'
        elif transport == 'tcp':
            transport_port = '16509'
        obj_migration.migrate_pre_setup(dest_uri, params, ports=transport_port)

        # Back up vm name for recovery in finally
        vm_name_backup = params.get("migrate_main_vm")

        # Get a vm object for migration
        logging.debug("Get a vm object for migration")
        vm = env.get_vm(vm_name_backup)

        # Back up vm xml for recovery in finally
        logging.debug("Backup vm xml before migration")
        vm_xml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm.name)
        if not vm_xml_backup:
            test.error("Backing up xmlfile failed.")

        # Prepare shared disk in vm xml for live migration:
        # Change the source of the first disk of vm to shared disk
        if live_migration:
            logging.debug("Prepare shared disk in vm xml for live migration")
            storage_type = params.get("storage_type")
            if storage_type == 'nfs':
                logging.debug("Prepare nfs shared disk in vm xml")
                nfs_mount_dir = params.get("nfs_mount_dir")
                libvirt.update_vm_disk_source(vm.name, nfs_mount_dir)
                libvirt.update_vm_disk_driver_cache(vm.name,
                                                    driver_cache="none")
            else:
                # TODO:Other storage types
                test.cancel("Other storage type is not supported for now")
                pass

        # Prepare graphic password in vm xml
        if with_graphic_passwd in ["yes", "no"]:
            logging.debug("Set VM graphic passwd in vm xml")
            # Get graphics list in vm xml
            vmxml_tmp = vm_xml.VMXML.new_from_inactive_dumpxml(vm.name)
            graphics_list = vmxml_tmp.get_graphics_devices

            if not graphics_list:
                # Add spice graphic with passwd to vm xml
                logging.debug("Add spice graphic to vm xml")
                graphics.Graphics.add_graphic(vm.name, graphic_passwd, "spice")
            elif graphic_passwd:
                # Graphics already exist in vm xml and passwd is required
                # Add passwd to the first graphic device in vm xml
                logging.debug("Add graphic passwd to vm xml")
                vm_xml.VMXML.add_security_info(vmxml_tmp, graphic_passwd)
                vmxml_tmp.sync()
            else:
                # Graphics already exist in vm xml and non-passwd is required
                # Do nothing here as passwd has been removed by new_from_inactive_dumpxml()
                pass

        # Prepare for required src vm status.
        logging.debug("Turning %s into certain state.", vm.name)
        if src_vm_status == "running" and not vm.is_alive():
            vm.start()
        elif src_vm_status == "shut off" and not vm.is_dead():
            vm.destroy()

        # Prepare for required src vm persistency.
        logging.debug("Prepare for required src vm persistency")
        if src_vm_cfg == "persistent" and not vm.is_persistent():
            logging.debug("Make src vm persistent")
            vm_xml_backup.define()
        elif src_vm_cfg == "transient" and vm.is_persistent():
            logging.debug("Make src vm transient")
            vm.undefine()

        # Prepare for postcopy migration: install and run stress in VM
        if postcopy and src_vm_status == "running":
            logging.debug(
                "Install and run stress in vm for postcopy migration")
            pkg_name = 'stress'

            # Get a vm session
            logging.debug("Get a vm session")
            vm_session = vm.wait_for_login()
            if not vm_session:
                test.error("Can't get a vm session successfully")

            # Install package stress if it is not installed in vm
            logging.debug(
                "Check if stress tool is installed for postcopy migration")
            pkg_mgr = utils_package.package_manager(vm_session, pkg_name)
            if not pkg_mgr.is_installed(pkg_name):
                logging.debug("Stress tool will be installed")
                if not pkg_mgr.install():
                    test.error("Package '%s' installation fails" % pkg_name)

            # Run stress in vm
            logging.debug("Run stress in vm")
            stress_args = params.get("stress_args")
            vm_session.cmd('stress %s' % stress_args)

        # Prepare for --xml <updated_xml_file>.
        if xml_option:
            logging.debug("Preparing new xml file for --xml option.")

            # Get the vm xml
            vmxml_tmp = vm_xml.VMXML.new_from_dumpxml(
                vm.name, "--security-info --migratable")

            # Update something in the xml file: e.g. title
            # Note: VM ABI shall not be broken when migrating with updated_xml
            updated_title = "VM Title in updated xml"
            vmxml_tmp.title = updated_title

            # Add --xml to migrate extra_options
            extra_options = ("%s --xml=%s" % (extra_options, vmxml_tmp.xml))

        # Prepare for --persistent-xml <updated_xml_file>.
        if persistent_xml_option:
            logging.debug(
                "Preparing new xml file for --persistent-xml option.")

            # Get the vm xml
            vmxml_persist_tmp = vm_xml.VMXML.new_from_inactive_dumpxml(
                vm.name, "--security-info")

            # Update something in the xml file: e.g. title
            # Note: VM ABI shall not be broken when migrating with updated_xml
            updated_persist_title = "VM Title in updated persist xml"
            vmxml_persist_tmp.title = updated_persist_title

            # Add --persistent-xml to migrate extra_options
            extra_options = ("%s --persistent-xml=%s" %
                             (extra_options, vmxml_persist_tmp.xml))

        # Prepare host env: clean up vm on dest host
        logging.debug("Clean up vm on dest host before migration")
        if dname:
            cleanup_vm(vm, dname, dest_uri)
        else:
            cleanup_vm(vm, vm.name, dest_uri)

        # Prepare host env: set selinux state before migration
        logging.debug("Set selinux to enforcing before migration")
        utils_selinux.set_status(params.get("selinux_state", "enforcing"))

        # Check vm network connectivity by ping before migration
        logging.debug("Check vm network before migration")
        if src_vm_status == "running":
            obj_migration.ping_vm(vm, params)

        # Get VM uptime before migration
        if src_vm_status == "running":
            vm_uptime = vm.uptime()
            logging.info("Check VM uptime before migration: %s", vm_uptime)

        # Print vm active xml before migration
        process.system_output("virsh dumpxml %s --security-info" % vm.name,
                              shell=True)

        # Print vm inactive xml before migration
        process.system_output("virsh dumpxml %s --security-info --inactive" %
                              vm.name,
                              shell=True)

        # Do uni-direction migration.
        # NOTE: vm.connect_uri will be set to dest_uri once migration is complete successfully
        logging.debug("Start to do migration test.")
        vms.append(vm)
        if postcopy:
            # Monitor the qemu monitor event of "postcopy-active" for postcopy migration
            logging.debug(
                "Monitor the qemu monitor event for postcopy migration")
            virsh_session = virsh.VirshSession(virsh_exec=virsh.VIRSH_EXEC,
                                               auto_close=True)
            cmd = "qemu-monitor-event --loop --domain %s --event MIGRATION" % vm.name
            virsh_session.sendline(cmd)

            # Do live migration and switch to postcopy by "virsh migrate-postcopy"
            logging.debug("Start to do postcopy migration")
            obj_migration.do_migration(vms,
                                       src_uri,
                                       dest_uri,
                                       "orderly",
                                       options="",
                                       thread_timeout=postcopy_timeout,
                                       ignore_status=True,
                                       func=virsh.migrate_postcopy,
                                       extra_opts=extra_options,
                                       shell=True)
            # Check migration result
            obj_migration.check_result(obj_migration.ret, params)

            # Check "postcopy-active" event after postcopy migration
            logging.debug(
                "Check postcopy-active event after postcopy migration")
            virsh_session.send_ctrl("^C")
            events_output = virsh_session.get_stripped_output()
            logging.debug("events_output are %s", events_output)
            pattern = "postcopy-active"
            if not re.search(pattern, events_output):
                test.fail("Migration didn't switch to postcopy mode")
                virsh_session.close()
            virsh_session.close()

        else:
            logging.debug("Start to do precopy migration")
            obj_migration.do_migration(vms,
                                       src_uri,
                                       dest_uri,
                                       "orderly",
                                       options="",
                                       ignore_status=True,
                                       extra_opts=extra_options)
            # Check migration result
            obj_migration.check_result(obj_migration.ret, params)
        """
        # Check src vm after migration
        # First, update vm name and connect_uri to src vm's
        """
        vm.name = vm_name_backup
        vm.connect_uri = src_uri
        logging.debug("Start to check %s state on src %s after migration.",
                      vm.name, src_uri)

        # Check src vm status after migration: existence, running, shutoff, etc
        logging.debug("Check vm status on source after migration")
        if offline_migration:
            if src_vm_status == "shut off" and undefinesource:
                if vm.exists():
                    result_check_pass = False
                    logging.error(
                        "Src vm should not exist after offline migration"
                        " with --undefinesource")
                    logging.debug("Src vm state is %s" % vm.state())
            elif not libvirt.check_vm_state(
                    vm.name, src_vm_status, uri=vm.connect_uri):
                result_check_pass = False
                logging.error("Src vm should be %s after offline migration" %
                              src_vm_status)
                logging.debug("Src vm state is %s" % vm.state())

        if live_migration:
            if not undefinesource and src_vm_cfg == "persistent":
                if not libvirt.check_vm_state(
                        vm.name, "shut off", uri=vm.connect_uri):
                    result_check_pass = False
                    logging.error(
                        "Src vm should be shutoff after live migration")
                    logging.debug("Src vm state is %s" % vm.state())
            elif vm.exists():
                result_check_pass = False
                logging.error("Src vm should not exist after live migration")
                logging.debug("Src vm state is %s" % vm.state())

        # Check src vm status after migration: persistency
        logging.debug("Check vm persistency on source after migration")
        if src_vm_cfg == "persistent" and not undefinesource:
            if not vm.is_persistent():
                # Src vm should be persistent after migration without --undefinesource
                result_check_pass = False
                logging.error("Src vm should be persistent after migration")
        elif vm.is_persistent():
            result_check_pass = False
            logging.error("Src vm should be not be persistent after migration")
        """
        # Check dst vm after migration
        # First, update vm name and connect_uri to dst vm's
        """
        vm.name = dname if dname else vm.name
        vm.connect_uri = dest_uri
        logging.debug("Start to check %s state on target %s after migration.",
                      vm.name, vm.connect_uri)

        # Check dst vm status after migration: running, shutoff, etc
        logging.debug("Check vm status on target after migration")
        if live_migration:
            if not libvirt.check_vm_state(
                    vm.name, src_vm_status, uri=vm.connect_uri):
                result_check_pass = False
                logging.error("Dst vm should be %s after live migration",
                              src_vm_status)
        elif vm.is_alive():
            result_check_pass = False
            logging.error("Dst vm should not be alive after offline migration")

        # Print vm active xml after migration
        process.system_output("virsh -c %s dumpxml %s --security-info" %
                              (vm.connect_uri, vm.name),
                              shell=True)

        # Print vm inactive xml after migration
        process.system_output(
            "virsh -c %s dumpxml %s --security-info --inactive" %
            (vm.connect_uri, vm.name),
            shell=True)

        # Check dst vm xml after migration
        logging.debug("Check vm xml on target after migration")
        remote_virsh = virsh.Virsh(uri=vm.connect_uri)
        vmxml_active_tmp = vm_xml.VMXML.new_from_dumpxml(
            vm.name, "--security-info", remote_virsh)
        vmxml_inactive_tmp = vm_xml.VMXML.new_from_inactive_dumpxml(
            vm.name, "--security-info", remote_virsh)
        # Check dst vm xml after migration: --xml <updated_xml_file>
        if xml_option and not offline_migration:
            logging.debug("Check vm active xml for --xml")
            if not vmxml_active_tmp.title == updated_title:
                print("vmxml active tmp title is %s" % vmxml_active_tmp.title)
                result_check_pass = False
                logging.error("--xml doesn't take effect in migration")

        if xml_option and offline_migration:
            logging.debug("Check vm inactive xml for --xml")
            if not vmxml_active_tmp.title == updated_title:
                result_check_pass = False
                logging.error("--xml doesn't take effect in migration")

        # Check dst vm xml after migration: --persistent-xml <updated_xml_file>
        if persistent_xml_option:
            logging.debug("Check vm inactive xml for --persistent-xml")
            if not offline_migration and not vmxml_inactive_tmp.title == updated_persist_title:
                print("vmxml inactive tmp title is %s" %
                      vmxml_inactive_tmp.title)
                result_check_pass = False
                logging.error(
                    "--persistent-xml doesn't take effect in live migration")
            elif offline_migration and vmxml_inactive_tmp.title == updated_persist_title:
                result_check_pass = False
                logging.error(
                    "--persistent-xml should not take effect in offline "
                    "migration")

        # Check dst vm xml after migration: graphic passwd
        if with_graphic_passwd == "yes":
            logging.debug("Check graphic passwd in vm xml after migration")
            graphic_active = vmxml_active_tmp.devices.by_device_tag(
                'graphics')[0]
            graphic_inactive = vmxml_inactive_tmp.devices.by_device_tag(
                'graphics')[0]
            try:
                logging.debug("Check graphic passwd in active vm xml")
                if graphic_active.passwd != graphic_passwd:
                    result_check_pass = False
                    logging.error(
                        "Graphic passwd in active xml of dst vm should be %s",
                        graphic_passwd)

                logging.debug("Check graphic passwd in inactive vm xml")
                if graphic_inactive.passwd != graphic_passwd:
                    result_check_pass = False
                    logging.error(
                        "Graphic passwd in inactive xml of dst vm should be %s",
                        graphic_passwd)
            except LibvirtXMLNotFoundError:
                result_check_pass = False
                logging.error("Graphic passwd lost in dst vm xml")

        # Check dst vm uptime, network, etc after live migration
        if live_migration:
            # Check dst VM uptime after migration
            # Note: migrated_vm_uptime should be greater than the vm_uptime got
            # before migration
            migrated_vm_uptime = vm.uptime(connect_uri=dest_uri)
            logging.info(
                "Check VM uptime in destination after "
                "migration: %s", migrated_vm_uptime)
            if not migrated_vm_uptime:
                result_check_pass = False
                logging.error("Failed to check vm uptime after migration")
            elif vm_uptime > migrated_vm_uptime:
                result_check_pass = False
                logging.error(
                    "VM went for a reboot while migrating to destination")

            # Check dst VM network connectivity after migration
            logging.debug("Check VM network connectivity after migrating")
            obj_migration.ping_vm(vm, params, uri=dest_uri)

            # Restore vm.connect_uri as it is set to src_uri in ping_vm()
            logging.debug(
                "Restore vm.connect_uri as it is set to src_uri in ping_vm()")
            vm.connect_uri = dest_uri

        # Check dst vm status after migration: persistency
        logging.debug("Check vm persistency on target after migration")
        if persistent:
            if not vm.is_persistent():
                result_check_pass = False
                logging.error("Dst vm should be persistent after migration "
                              "with --persistent")
                time.sleep(10)
            # Destroy vm and check vm state should be shutoff. BZ#1076354
            vm.destroy()
            if not libvirt.check_vm_state(
                    vm.name, "shut off", uri=vm.connect_uri):
                result_check_pass = False
                logging.error(
                    "Dst vm with name %s should exist and be shutoff", vm.name)
        elif vm.is_persistent():
            result_check_pass = False
            logging.error("Dst vm should not be persistent after migration "
                          "without --persistent")

    finally:
        logging.debug("Start to clean up env")
        # Clean up vm on dest and src host
        for vm in vms:
            cleanup_vm(vm, vm_name=dname, uri=dest_uri)
            cleanup_vm(vm, vm_name=vm_name_backup, uri=src_uri)

        # Recover source vm defination (just in case).
        logging.info("Recover vm defination on source")
        if vm_xml_backup:
            vm_xml_backup.define()

        # Clean up SSH, TCP, TLS test env
        if objs_list and len(objs_list) > 0:
            logging.debug("Clean up test env: SSH, TCP, TLS, etc")
            for obj in objs_list:
                obj.auto_recover = True
                obj.__del__()

        # Disable libvirtd remote connection transport port
        obj_migration.migrate_pre_setup(dest_uri,
                                        params,
                                        cleanup=True,
                                        ports=transport_port)

        # Check test result.
        if not result_check_pass:
            test.fail("Migration succeed, but some check points didn't pass."
                      "Please check the error log for details")
예제 #22
0
def run(test, params, env):
    """
    Test migration with memory related configuration

    :param test: test object
    :param params: Dictionary with the test parameters
    :param env: Dictionary with test environment.
    """
    migrate_vm_back = "yes" == params.get("migrate_vm_back", "no")
    remote_ip = params.get("remote_ip")
    remote_user = params.get("remote_user")
    remote_pwd = params.get("remote_pwd")
    local_ip = params.get("local_ip")
    local_pwd = params.get("local_pwd")
    ballooned_mem = params.get("ballooned_mem")
    check = params.get("check")
    remove_dict = {}
    src_libvirt_file = None

    remote_virsh_dargs = {
        'remote_ip': remote_ip,
        'remote_user': remote_user,
        'remote_pwd': remote_pwd,
        'unprivileged_user': None,
        'ssh_remote_auth': True
    }

    migration_test = migration.MigrationTest()
    migration_test.check_parameters(params)

    # Params for NFS shared storage
    shared_storage = params.get("migrate_shared_storage", "")
    if shared_storage == "":
        default_guest_asset = defaults.get_default_guest_os_info()['asset']
        default_guest_asset = "%s.qcow2" % default_guest_asset
        shared_storage = os.path.join(params.get("nfs_mount_dir"),
                                      default_guest_asset)
        logging.debug("shared_storage:%s", shared_storage)

    # Params to update disk using shared storage
    params["disk_type"] = "file"
    params["disk_source_protocol"] = "netfs"
    params["mnt_path_name"] = params.get("nfs_mount_dir")

    # Local variables
    server_ip = params.get("server_ip")
    server_user = params.get("server_user", "root")
    server_pwd = params.get("server_pwd")
    virsh_args = {"debug": True}
    virsh_options = params.get("virsh_options", "")
    options = params.get("virsh_migrate_options", "--live --verbose")
    func_params_exists = "yes" == params.get("func_params_exists", "yes")
    log_file = params.get("log_outputs",
                          "/var/log/libvirt/libvirtd_daemons.log")
    check_str_local_log = params.get("check_str_local_log", "")
    libvirtd_conf_dict = eval(params.get("libvirtd_conf_dict", '{}'))

    func_name = None
    libvirtd_conf = None
    mig_result = None

    # params for migration connection
    params["virsh_migrate_desturi"] = libvirt_vm.complete_uri(
        params.get("migrate_dest_host"))
    dest_uri = params.get("virsh_migrate_desturi")

    vm_name = params.get("migrate_main_vm")
    vm = env.get_vm(vm_name)
    vm.verify_alive()

    extra_args = {}
    if func_params_exists:
        extra_args.update({'func_params': params})

    # For safety reasons, we'd better back up  xmlfile.
    new_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    orig_config_xml = new_xml.copy()

    try:
        if check == "mem_balloon":
            # Update memory balloon device to correct model
            membal_dict = {
                'membal_model': 'virtio',
                'membal_stats_period': '10'
            }
            libvirt.update_memballoon_xml(new_xml, membal_dict)

        if check == "mem_device":
            libvirt_cpu.add_cpu_settings(new_xml, params)

            dimm_params = {
                k.replace('memdev_', ''): v
                for k, v in params.items() if k.startswith('memdev_')
            }
            dimm_xml = utils_hotplug.create_mem_xml(**dimm_params)

            libvirt.add_vm_device(new_xml, dimm_xml)
            logging.debug(virsh.dumpxml(vm_name))

        # Change the disk of the vm
        libvirt.set_vm_disk(vm, params)

        remove_dict = {"do_search": '{"%s": "ssh:/"}' % dest_uri}
        src_libvirt_file = libvirt_config.remove_key_for_modular_daemon(
            remove_dict)

        # Update libvirtd configuration
        if libvirtd_conf_dict:
            if os.path.exists(log_file):
                logging.debug("Delete local libvirt log file '%s'", log_file)
                os.remove(log_file)
            logging.debug("Update libvirtd configuration file")
            conf_type = params.get("conf_type", "libvirtd")
            if conf_type == "libvirtd" and utils_split_daemons.is_modular_daemon(
            ):
                conf_type = "virtqemud"
            libvirtd_conf = libvirt.customize_libvirt_config(
                libvirtd_conf_dict, conf_type)
        try:
            if not vm.is_alive():
                vm.start()
        except virt_vm.VMStartError as e:
            logging.info("Failed to start VM")
            test.fail("Failed to start VM: %s" % vm_name)

        logging.debug("Guest xml after starting:\n%s",
                      vm_xml.VMXML.new_from_dumpxml(vm_name))

        # Check local guest network connection before migration
        vm.wait_for_login(restart_network=True).close()
        migration_test.ping_vm(vm, params)

        # Execute migration process
        vms = [vm]

        migration_test.do_migration(vms,
                                    None,
                                    dest_uri,
                                    'orderly',
                                    options,
                                    thread_timeout=900,
                                    ignore_status=True,
                                    virsh_opt=virsh_options,
                                    func=func_name,
                                    **extra_args)

        mig_result = migration_test.ret
        migration_test.check_result(mig_result, params)

        if int(mig_result.exit_status) == 0:
            migration_test.ping_vm(vm, params, uri=dest_uri)

        if check_str_local_log:
            libvirt.check_logfile(check_str_local_log, log_file)

        if check == "mem_balloon":
            remote_virsh_session = virsh.VirshPersistent(**remote_virsh_dargs)
            remote_virsh_session.setmem(vm_name, ballooned_mem, None, None,
                                        False, "", **virsh_args)

            def check_mem_balloon():
                """Check if memory balloon worked"""

                memstat_ouput = remote_virsh_session.dommemstat(
                    vm_name, "", **virsh_args)
                memstat_after = memstat_ouput.stdout_text
                mem_after = memstat_after.splitlines()[0].split()[1]
                if mem_after != ballooned_mem:
                    logging.debug("Current memory size is: %s" % mem_after)
                    return False
                return True

            check_ret = utils_misc.wait_for(check_mem_balloon, timeout=20)
            if not check_ret:
                test.fail("Memory is not ballooned to the expected size: %s" %
                          ballooned_mem)

            remote_virsh_session.close_session()

        # Create a remote runner for later use
        runner_on_target = remote.RemoteRunner(host=remote_ip,
                                               username=remote_user,
                                               password=remote_pwd)
        if check == "mem_device":
            qemu_checks = params.get('qemu_checks', '').split('`')
            logging.debug("qemu_checks:%s" % qemu_checks[0])
            for qemu_check in qemu_checks:
                libvirt.check_qemu_cmd_line(qemu_check, False, params,
                                            runner_on_target)

        if migrate_vm_back:
            ssh_connection = utils_conn.SSHConnection(server_ip=remote_ip,
                                                      server_pwd=remote_pwd,
                                                      client_ip=local_ip,
                                                      client_pwd=local_pwd)
            try:
                ssh_connection.conn_check()
            except utils_conn.ConnectionError:
                ssh_connection.conn_setup()
                ssh_connection.conn_check()

            # Pre migration setup for local machine
            src_full_uri = libvirt_vm.complete_uri(
                params.get("migrate_source_host"))

            migration_test.migrate_pre_setup(src_full_uri, params)
            cmd = "virsh migrate %s %s %s" % (vm_name, options, src_full_uri)
            logging.debug("Start migration: %s", cmd)
            cmd_result = remote.run_remote_cmd(cmd, params, runner_on_target)
            logging.info(cmd_result)
            if cmd_result.exit_status:
                destroy_cmd = "virsh destroy %s" % vm_name
                remote.run_remote_cmd(destroy_cmd,
                                      params,
                                      runner_on_target,
                                      ignore_status=False)
                test.fail("Failed to run '%s' on remote: %s" %
                          (cmd, cmd_result))
    finally:
        logging.debug("Recover test environment")
        # Clean VM on destination and source
        try:
            migration_test.cleanup_dest_vm(vm, vm.connect_uri, dest_uri)
            if vm.is_alive():
                vm.destroy(gracefully=False)
        except Exception as err:
            logging.error(err)

        logging.info("Recovery VM XML configration")
        orig_config_xml.sync()

        if libvirtd_conf:
            logging.debug("Recover the configurations")
            libvirt.customize_libvirt_config(None,
                                             is_recover=True,
                                             config_object=libvirtd_conf)
        if src_libvirt_file:
            src_libvirt_file.restore()
예제 #23
0
def run(test, params, env):
    """
    Test command: migrate-compcache <domain> [--size <number>]

    1) Run migrate-compcache command and check return code.
    """
    vm_ref = params.get("vm_ref", "name")
    vm_name = params.get("migrate_main_vm")
    start_vm = 'yes' == params.get('start_vm', 'yes')
    pause_vm = 'yes' == params.get('pause_after_start_vm', 'no')
    expect_succeed = 'yes' == params.get('expect_succeed', 'yes')
    size_option = params.get('size_option', 'valid')
    action = params.get('compcache_action', 'get')
    vm = env.get_vm(vm_name)

    # Check if the virsh command migrate-compcache is available
    if not virsh.has_help_command('migrate-compcache'):
        test.cancel("This version of libvirt does not support "
                    "virsh command migrate-compcache")

    # Prepare the VM state if it's not correct.
    if start_vm and not vm.is_alive():
        vm.start()
    elif not start_vm and vm.is_alive():
        vm.destroy()
    if pause_vm and not vm.is_paused():
        vm.pause()

    # Setup domain reference
    if vm_ref == 'domname':
        vm_ref = vm_name

    # Setup size according to size_option:
    # minimal: Same as memory page size
    # maximal: Same as guest memory
    # empty: An empty string
    # small: One byte less than page size
    # large: Larger than guest memory
    # huge : Largest int64
    page_size = get_page_size()
    if size_option == 'minimal':
        size = str(page_size)
    elif size_option == 'maximal':
        size = str(vm.get_max_mem() * 1024)
    elif size_option == 'empty':
        size = '""'
    elif size_option == 'small':
        size = str(page_size - 1)
    elif size_option == 'large':
        # Guest memory is larger than the max mem set,
        # add 50MB to ensure size exceeds guest memory.
        size = str(vm.get_max_mem() * 1024 + 50000000)
    elif size_option == 'huge':
        size = str(2**64 - 1)
    else:
        size = size_option

    # If we need to get, just omit the size option
    if action == 'get':
        size = None

    # Run testing command
    result = virsh.migrate_compcache(vm_ref, size=size)
    logging.debug(result)

    remote_uri = params.get("compcache_remote_uri")
    remote_host = params.get("migrate_dest_host")
    remote_user = params.get("migrate_dest_user", "root")
    remote_pwd = params.get("migrate_dest_pwd")
    check_job_compcache = False
    compressed_size = None
    if not remote_host.count(
            "EXAMPLE") and size is not None and expect_succeed:
        # Config ssh autologin for remote host
        ssh_key.setup_ssh_key(remote_host, remote_user, remote_pwd, port=22)
        if vm.is_dead():
            vm.start()
        if vm.is_paused():
            vm.resume()
        vm.wait_for_login()
        # Do actual migration to verify compression cache of migrate jobs
        command = ("virsh migrate %s %s --compressed --unsafe --verbose" %
                   (vm_name, remote_uri))
        logging.debug("Start migrating: %s", command)
        p = subprocess.Popen(command,
                             shell=True,
                             stdout=subprocess.PIPE,
                             stderr=subprocess.PIPE)

        # Give enough time for starting job
        t = 0
        while t < 5:
            jobinfo = virsh.domjobinfo(vm_ref, debug=True,
                                       ignore_status=True).stdout
            jobtype = "None"
            for line in jobinfo.splitlines():
                key = line.split(':')[0]
                if key.count("type"):
                    jobtype = line.split(':')[-1].strip()
                elif key.strip() == "Compression cache":
                    compressed_size = line.split(':')[-1].strip()
            if "None" == jobtype or compressed_size is None:
                t += 1
                time.sleep(1)
                continue
            else:
                check_job_compcache = True
                logging.debug("Job started: %s", jobtype)
                break

        if p.poll():
            try:
                p.kill()
            except OSError:
                pass

        # Cleanup in case of successful migration
        migration.MigrationTest().cleanup_dest_vm(vm, None, remote_uri)

    # Shut down the VM to make sure the compcache setting cleared
    if vm.is_alive():
        vm.destroy()

    # Check test result
    if expect_succeed:
        if result.exit_status != 0:
            test.fail('Expected succeed, but failed with result:\n%s' % result)
        if check_job_compcache:
            value = compressed_size.split()[0].strip()
            unit = compressed_size.split()[-1].strip()
            value = int(float(value))
            if unit == "KiB":
                size = int(int(size) / 1024)
            elif unit == "MiB":
                size = int(int(size) / 1048576)
            elif unit == "GiB":
                size = int(int(size) / 1073741824)
            if value != size:
                test.fail("Compression cache is not match" " with set")
            else:
                return
            test.fail("Get compression cache in job failed.")
        else:
            logging.warn("The compressed size wasn't been verified "
                         "during migration.")
    elif not expect_succeed:
        if result.exit_status == 0:
            test.fail('Expected fail, but succeed with result:\n%s' % result)
예제 #24
0
def run(test, params, env):
    """
    Test migration with specified max bandwidth
    1) Set both precopy and postcopy bandwidth by virsh migrate parameter
    2) Set bandwidth before migration starts by migrate parameter --postcopy-bandwidth
    3) Set bandwidth when migration is in post-copy phase
    4) Set bandwidth when migration is in pre-copy phase
    5) Set bandwidth when guest is running and before migration starts
    6) Set bandwidth before migration starts by migrate parameter --bandwidth
    7) Set bandwidth when guest is running and before migration starts
    8) Set bandwidth when guest is shutoff
    9) Do live migration with default max bandwidth

    :param test: test object
    :param params: Dictionary with the test parameters
    :param env: Dictionary with test environment.
    """
    def get_speed(exp_migrate_speed):
        """
        Get migration speed and compare with value in exp_migrate_speed

        :params exp_migrate_speed: the dict of expected migration speed
        :raise: test.fail if speed does not match
        """
        if exp_migrate_speed.get("precopy_speed"):
            output = virsh.migrate_getspeed(vm_name,
                                            **virsh_args).stdout_text.strip()
            if exp_migrate_speed['precopy_speed'] != output:
                virsh.migrate_getspeed(vm_name,
                                       extra="--postcopy",
                                       **virsh_args)
                test.fail("Migration speed is expected to be '%s MiB/s', but "
                          "'%s MiB/s' found!" %
                          (exp_migrate_speed['precopy_speed'], output))
        if exp_migrate_speed.get("postcopy_speed"):
            output = virsh.migrate_getspeed(vm_name,
                                            extra="--postcopy",
                                            **virsh_args).stdout_text.strip()
            if exp_migrate_speed['postcopy_speed'] != output:
                test.fail("Prostcopy migration speed is expected to be '%s "
                          "MiB/s', but '%s MiB/s' found!" %
                          (exp_migrate_speed['postcopy_speed'], output))

    def check_bandwidth(params):
        """
        Check migration bandwidth

        :param params: the parameters used
        :raise: test.fail if migration bandwidth does not match expected values
        """
        exp_migrate_speed = eval(params.get('exp_migrate_speed', '{}'))
        migrate_postcopy_cmd = "yes" == params.get("migrate_postcopy_cmd",
                                                   "yes")
        if extra.count("bandwidth"):
            get_speed(exp_migrate_speed)
        if params.get("set_postcopy_in_precopy_phase"):
            virsh.migrate_setspeed(vm_name,
                                   params.get("set_postcopy_in_precopy_phase"),
                                   "--postcopy", **virsh_args)
            get_speed(exp_migrate_speed)

        params.update({
            'compare_to_value':
            exp_migrate_speed.get("precopy_speed", "8796093022207")
        })

        if exp_migrate_speed.get("precopy_speed", "0") == "8796093022207":
            params.update({'domjob_ignore_status': True})
        libvirt_domjobinfo.check_domjobinfo(vm, params)

        if migrate_postcopy_cmd:
            if not utils_misc.wait_for(
                    lambda: not virsh.migrate_postcopy(vm_name, **virsh_args).
                    exit_status, 5):
                test.fail("Failed to set migration postcopy.")

            if params.get("set_postcopy_in_postcopy_phase"):
                virsh.migrate_setspeed(
                    vm_name, params.get("set_postcopy_in_postcopy_phase"),
                    "--postcopy", **virsh_args)
                get_speed(exp_migrate_speed)
            time.sleep(5)

            if exp_migrate_speed.get("postcopy_speed"):
                params.update(
                    {'compare_to_value': exp_migrate_speed["postcopy_speed"]})
                params.update({'domjob_ignore_status': False})
            libvirt_domjobinfo.check_domjobinfo(vm, params)

    migration_test = migration.MigrationTest()
    migration_test.check_parameters(params)

    # Params for NFS shared storage
    shared_storage = params.get("migrate_shared_storage", "")
    if shared_storage == "":
        default_guest_asset = defaults.get_default_guest_os_info()['asset']
        default_guest_asset = "%s.qcow2" % default_guest_asset
        shared_storage = os.path.join(params.get("nfs_mount_dir"),
                                      default_guest_asset)
        logging.debug("shared_storage:%s", shared_storage)

    # Params to update disk using shared storage
    params["disk_type"] = "file"
    params["disk_source_protocol"] = "netfs"
    params["mnt_path_name"] = params.get("nfs_mount_dir")

    # Local variables
    server_ip = params.get("server_ip")
    server_user = params.get("server_user", "root")
    server_pwd = params.get("server_pwd")
    virsh_args = {"debug": True}
    virsh_options = params.get("virsh_options", "")
    extra = params.get("virsh_migrate_extra")
    options = params.get("virsh_migrate_options", "--live --verbose")
    jobinfo_item = params.get("jobinfo_item", 'Memory bandwidth:')
    set_postcopy_speed_before_mig = params.get("set_postcopy_speed_before_mig")
    set_precopy_speed_before_mig = params.get("set_precopy_speed_before_mig")
    set_precopy_speed_before_vm_start = params.get(
        "set_precopy_speed_before_vm_start")
    stress_package = params.get("stress_package")
    exp_migrate_speed = eval(params.get('exp_migrate_speed', '{}'))
    log_file = params.get("log_outputs",
                          "/var/log/libvirt/libvirt_daemons.log")
    check_str_local_log = params.get("check_str_local_log", "")
    libvirtd_conf_dict = eval(params.get("libvirtd_conf_dict", '{}'))
    action_during_mig = check_bandwidth
    params.update({"action_during_mig_params_exists": "yes"})
    extra_args = migration_test.update_virsh_migrate_extra_args(params)

    libvirtd_conf = None
    mig_result = None
    remove_dict = {}
    src_libvirt_file = None

    if not libvirt_version.version_compare(6, 0, 0):
        test.cancel("This libvirt version doesn't support "
                    "postcopy migration bandwidth function.")

    # params for migration connection
    params["virsh_migrate_desturi"] = libvirt_vm.complete_uri(
        params.get("migrate_dest_host"))
    dest_uri = params.get("virsh_migrate_desturi")

    vm_name = params.get("migrate_main_vm")
    vm = env.get_vm(vm_name)
    vm.verify_alive()

    postcopy_options = params.get("postcopy_options")
    if postcopy_options:
        extra = "%s %s" % (extra, postcopy_options)

    # For safety reasons, we'd better back up  xmlfile.
    new_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    orig_config_xml = new_xml.copy()

    try:
        # Change the disk of the vm
        libvirt.set_vm_disk(vm, params)

        # Update libvirtd configuration
        if libvirtd_conf_dict:
            if os.path.exists(log_file):
                logging.debug("Delete local libvirt log file '%s'", log_file)
                os.remove(log_file)
            logging.debug("Update libvirtd configuration file")
            conf_type = "libvirtd"
            if utils_split_daemons.is_modular_daemon():
                conf_type = "virtqemud"
            libvirtd_conf = libvirt.customize_libvirt_config(
                libvirtd_conf_dict,
                conf_type,
            )

        if set_precopy_speed_before_vm_start:
            if vm.is_alive():
                vm.destroy()
            virsh.migrate_setspeed(vm_name, set_precopy_speed_before_vm_start,
                                   **virsh_args)

        if not vm.is_alive():
            vm.start()

        logging.debug("Guest xml after starting:\n%s",
                      vm_xml.VMXML.new_from_dumpxml(vm_name))

        # Check local guest network connection before migration
        vm.wait_for_login(restart_network=True).close()
        migration_test.ping_vm(vm, params)

        remove_dict = {"do_search": '{"%s": "ssh:/"}' % dest_uri}
        src_libvirt_file = libvirt_config.remove_key_for_modular_daemon(
            remove_dict)

        if any([set_precopy_speed_before_mig, set_postcopy_speed_before_mig]):
            if set_precopy_speed_before_mig:
                virsh.migrate_setspeed(vm_name, set_precopy_speed_before_mig,
                                       **virsh_args)

            if set_postcopy_speed_before_mig:
                virsh.migrate_setspeed(vm_name, set_postcopy_speed_before_mig,
                                       "--postcopy", **virsh_args)
            get_speed(exp_migrate_speed)

        if stress_package:
            migration_test.run_stress_in_vm(vm, params)

        # Execute migration process
        vms = [vm]

        migration_test.do_migration(vms,
                                    None,
                                    dest_uri,
                                    'orderly',
                                    options,
                                    thread_timeout=900,
                                    ignore_status=True,
                                    virsh_opt=virsh_options,
                                    func=action_during_mig,
                                    extra_opts=extra,
                                    **extra_args)

        if int(migration_test.ret.exit_status) == 0:
            migration_test.ping_vm(vm, params, uri=dest_uri)

        if check_str_local_log:
            libvirt.check_logfile(check_str_local_log, log_file)

    finally:
        logging.debug("Recover test environment")
        # Clean VM on destination and source
        migration_test.cleanup_vm(vm, dest_uri)

        logging.info("Recover VM XML configuration")
        orig_config_xml.sync()

        if libvirtd_conf:
            logging.debug("Recover the configurations")
            libvirt.customize_libvirt_config(None,
                                             is_recover=True,
                                             config_object=libvirtd_conf)
        if src_libvirt_file:
            src_libvirt_file.restore()

        logging.info("Remove local NFS image")
        source_file = params.get("source_file")
        libvirt.delete_local_disk("file", path=source_file)
예제 #25
0
def multi_migration(vm,
                    src_uri,
                    dest_uri,
                    options,
                    migrate_type,
                    migrate_thread_timeout,
                    jobabort=False,
                    lrunner=None,
                    rrunner=None,
                    status_error=None):
    """
    Migrate multiple vms simultaneously or not.

    :param vm: list of all vm instances
    :param src_uri: source ip address for migration
    :param dest_uri: destination ipaddress for migration
    :param options: options to be passed in migration command
    :param migrate_type: orderly or simultaneous migration type
    :param migrate_thread_timeout: thread timeout for migrating vms
    :param jobabort: If jobabort is True, run "virsh domjobabort vm_name"
               during migration.
    :param param timeout: thread's timeout
    :param lrunner: local session instance
    :param rrunner: remote session instance
    :param status_error: Whether expect error status
    """

    obj_migration = migration.MigrationTest()
    if migrate_type.lower() == "simultaneous":
        logging.info("Migrate vms simultaneously.")
        try:
            obj_migration.do_migration(vms=vm,
                                       srcuri=src_uri,
                                       desturi=dest_uri,
                                       migration_type="simultaneous",
                                       options=options,
                                       thread_timeout=migrate_thread_timeout,
                                       ignore_status=False,
                                       status_error=status_error)
            if jobabort:
                # To ensure Migration has been started.
                time.sleep(5)
                logging.info("Aborting job during migration.")
                jobabort_threads = []
                func = thread_func_jobabort
                for each_vm in vm:
                    jobabort_thread = threading.Thread(target=func,
                                                       args=(each_vm, ))
                    jobabort_threads.append(jobabort_thread)
                    jobabort_thread.start()
                for jobabort_thread in jobabort_threads:
                    jobabort_thread.join(migrate_thread_timeout)
            ret_migration = True

        except Exception as info:
            raise exceptions.TestFail(info)

    elif migrate_type.lower() == "orderly":
        logging.info("Migrate vms orderly.")
        try:
            obj_migration.do_migration(vms=vm,
                                       srcuri=src_uri,
                                       desturi=dest_uri,
                                       migration_type="orderly",
                                       options=options,
                                       thread_timeout=migrate_thread_timeout,
                                       ignore_status=False,
                                       status_error=status_error)
            for each_vm in vm:
                ping_thread = threading.Thread(target=thread_func_ping,
                                               args=(lrunner, rrunner,
                                                     each_vm.get_address()))
                ping_thread.start()

        except Exception as info:
            raise exceptions.TestFail(info)

    if obj_migration.RET_MIGRATION:
        ret_migration = True
    else:
        ret_migration = False
def run(test, params, env):
    """
    Run the test

    :param test: test object
    :param params: Dictionary with the test parameters
    :param env: Dictionary with test environment.
    """
    libvirt_version.is_libvirt_feature_supported(params)

    vm_name = params.get("migrate_main_vm")
    vm = env.get_vm(vm_name)
    vm.verify_alive()
    bk_uri = vm.connect_uri

    migration_test = migration.MigrationTest()
    migration_test.check_parameters(params)
    extra_args = migration_test.update_virsh_migrate_extra_args(params)

    extra = params.get("virsh_migrate_extra")
    postcopy_options = params.get("postcopy_options")
    if postcopy_options:
        extra = "%s %s" % (extra, postcopy_options)
    params["virsh_migrate_desturi"] = libvirt_vm.complete_uri(
        params.get("migrate_dest_host"))
    dest_uri = params.get("virsh_migrate_desturi")
    options = params.get("virsh_migrate_options",
                         "--live --p2p --persistent --verbose")
    virsh_options = params.get("virsh_options", "")
    migrate_again = "yes" == params.get("migrate_again", "no")
    src_state = params.get("virsh_migrate_src_state", "shut off")
    set_src_and_dst_hostname = "yes" == params.get("set_src_and_dst_hostname",
                                                   "no")
    src_hostname = params.get("src_hostname")
    dst_hostname = params.get("dst_hostname")
    server_ip = params.get("remote_ip")
    server_user = params.get("remote_user", "root")
    server_pwd = params.get("remote_pwd")
    server_params = {
        'server_ip': server_ip,
        'server_user': server_user,
        'server_pwd': server_pwd
    }

    dst_session = None
    dst_libvirtd = None
    src_libvirtd = None
    src_NM_service = None
    dest_NM_service = None
    old_dst_hostname = None
    old_source_hostname = None

    # For safety reasons, we'd better back up  xmlfile.
    new_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    orig_config_xml = new_xml.copy()

    try:
        # Update guest disk xml
        libvirt.set_vm_disk(vm, params)

        if set_src_and_dst_hostname:
            old_dst_hostname = get_hostname(test, remote_params=server_params)
            old_source_hostname = get_hostname(test)
            dst_session = remote_old.wait_for_login('ssh', server_ip, '22',
                                                    server_user, server_pwd,
                                                    r"[\#\$]\s*$")
            dst_libvirtd = utils_libvirtd.Libvirtd(session=dst_session)
            src_libvirtd = utils_libvirtd.Libvirtd()

            try:
                # Check local NetworkManager service
                NM = utils_path.find_command("NetworkManager")
            except utils_path.CmdNotFoundError:
                logging.debug("No NetworkManager command found on source.")
                NM = None

            if NM is not None:
                # Stop local NetworkManager service
                src_NM_service = service.Factory.create_service(
                    "NetworkManager")
                if src_NM_service is not None:
                    stop_NM(src_NM_service)

            set_hostname(src_hostname, test)
            src_libvirtd.restart()

            # Check remote NetworkManager service
            check_cmd = "rpm -q NetworkManager"
            check_result = remote_old.run_remote_cmd(check_cmd,
                                                     server_params,
                                                     ignore_status=False)
            if check_result.exit_status:
                logging.debug("No NetworkManager command found on target.")
            else:
                # Stop remote NetworkManager service
                remote_runner = remote.RemoteRunner(host=server_ip,
                                                    username=server_user,
                                                    password=server_pwd)
                runner = remote_runner.run
                dest_NM_service = service.Factory.create_service(
                    "NetworkManager", run=runner)
                if dest_NM_service is not None:
                    stop_NM(dest_NM_service)

            set_hostname(dst_hostname, test, remote_params=server_params)
            dst_libvirtd.restart()

        if not vm.is_alive():
            vm.start()

        logging.debug("Guest xml after starting:\n%s",
                      vm_xml.VMXML.new_from_dumpxml(vm_name))

        vm.wait_for_login()

        # Execute migration process
        migration_base.do_migration(vm, migration_test, None, dest_uri,
                                    options, virsh_options, extra, None,
                                    extra_args)

        func_returns = dict(migration_test.func_ret)
        migration_test.func_ret.clear()
        logging.debug("Migration returns function results:%s", func_returns)

        if migrate_again:
            if not vm.is_alive():
                vm.start()
            vm.wait_for_login()
            extra_args['status_error'] = params.get(
                "migrate_again_status_error", "no")

            if params.get("virsh_migrate_extra_mig_again"):
                extra = params.get("virsh_migrate_extra_mig_again")

            migration_base.do_migration(vm, migration_test, None, dest_uri,
                                        options, virsh_options, extra, None,
                                        extra_args)
        if int(migration_test.ret.exit_status) == 0:
            migration_test.post_migration_check([vm], params, uri=dest_uri)
        if not libvirt.check_vm_state(vm_name, state=src_state, uri=bk_uri):
            test.fail("Can't get the expected vm state '%s'" % src_state)
    finally:
        logging.info("Recover test environment")
        vm.connect_uri = bk_uri
        # Clean VM on destination and source
        migration_test.cleanup_vm(vm, dest_uri)

        if set_src_and_dst_hostname:
            if src_NM_service is not None:
                start_NM(src_NM_service)

            if dest_NM_service is not None:
                start_NM(dest_NM_service)

            set_hostname(old_dst_hostname, test, remote_params=server_params)
            if dst_libvirtd:
                dst_libvirtd.restart()
            if dst_session:
                dst_session.close()
            set_hostname(old_source_hostname, test)
            if src_libvirtd:
                src_libvirtd.restart()
        orig_config_xml.sync()
예제 #27
0
def run(test, params, env):
    """
    Test virsh migrate command.
    """

    def check_vm_network_accessed(session=None, ping_dest="www.baidu.com"):
        """
        The operations to the VM need to be done before or after
        migration happens

        :param session: The session object to the host
        :param ping_dest: The destination to be ping

        :raise: test.fail when ping fails
        """
        # Confirm local/remote VM can be accessed through network.
        logging.info("Check VM network connectivity")
        status, output = utils_test.ping(ping_dest,
                                         count=10,
                                         timeout=20,
                                         output_func=logging.debug,
                                         session=session)
        if status != 0:
            test.fail("Ping failed, status: %s,"
                      " output: %s" % (status, output))

    def get_vm_ifaces(session=None):
        """
        Get interfaces of vm

        :param session: The session object to the host
        :return: interfaces
        """
        p_iface, v_iface = utils_net.get_remote_host_net_ifs(session)

        return p_iface

    def check_vm_iface_num(iface_list, exp_num=3):
        """
        Check he number of interfaces

        :param iface_list: The interface list
        :param exp_num: The expected number
        :raise: test.fail when interfaces' number is not equal to exp_num
        """
        if len(iface_list) != exp_num:
            test.fail("%d interfaces should be found on the vm, "
                      "but find %s." % (exp_num, iface_list))

    def create_or_del_networks(pf_name, params, remote_virsh_session=None,
                               is_del=False):
        """
        Create or delete network on local or remote

        :param params: Dictionary with the test parameters
        :param pf_name: The name of PF
        :param remote_virsh_session: The virsh session object to the remote host
        :param is_del: Whether the networks should be deleted
        :raise: test.fail when fails to define/start network
        """
        net_hostdev_name = params.get("net_hostdev_name", "hostdev-net")
        net_hostdev_fwd = params.get("net_hostdev_fwd",
                                     '{"mode": "hostdev", "managed": "yes"}')
        net_bridge_name = params.get("net_bridge_name", "host-bridge")
        net_bridge_fwd = params.get("net_bridge_fwd", '{"mode": "bridge"}')
        bridge_name = params.get("bridge_name", "br0")

        net_dict = {"net_name": net_hostdev_name,
                    "net_forward": net_hostdev_fwd,
                    "net_forward_pf": '{"dev": "%s"}' % pf_name}
        bridge_dict = {"net_name": net_bridge_name,
                       "net_forward": net_bridge_fwd,
                       "net_bridge": '{"name": "%s"}' % bridge_name}

        if not is_del:
            for net_params in (net_dict, bridge_dict):
                net_dev = libvirt.create_net_xml(net_params.get("net_name"),
                                                 net_params)
                if not remote_virsh_session:
                    if net_dev.get_active():
                        net_dev.undefine()
                    net_dev.define()
                    net_dev.start()
                else:
                    remote.scp_to_remote(server_ip, '22', server_user, server_pwd,
                                         net_dev.xml, net_dev.xml, limit="",
                                         log_filename=None, timeout=600,
                                         interface=None)
                    remote_virsh_session.net_define(net_dev.xml, **virsh_args)
                    remote_virsh_session.net_start(net_params.get("net_name"),
                                                   **virsh_args)

        else:
            virsh_session = virsh
            if remote_virsh_session:
                virsh_session = remote_virsh_session
            for nname in (net_hostdev_name, net_bridge_name):
                if nname not in virsh_session.net_state_dict():
                    continue
                virsh_session.net_destroy(nname, debug=True, ignore_status=True)
                virsh_session.net_undefine(nname, debug=True, ignore_status=True)

    def check_vm_network_connection(net_name, expected_conn=0):
        """
        Check network connections in network xml

        :param net_name: The network to be checked
        :param expected_conn: The expected value
        :raise: test.fail when fails
        """
        output = virsh.net_dumpxml(net_name, debug=True).stdout_text
        if expected_conn == 0:
            reg_pattern = r"<network>"
        else:
            reg_pattern = r"<network connections='(\d)'>"
        res = re.findall(reg_pattern, output, re.I)
        if not res:
            test.fail("Unable to find expected connection in %s." % net_name)
        if expected_conn != 0:
            if expected_conn != int(res[0]):
                test.fail("Unable to get expected connection number."
                          "Expected: %s, Actual %s" % (expected_conn, int(res[0])))

    def get_hostdev_addr_from_xml():
        """
        Get VM hostdev address

        :return: pci driver id
        """
        address_dict = {}
        for ifac in vm_xml.VMXML.new_from_dumpxml(vm_name).devices.by_device_tag("interface"):
            if ifac.type_name == "hostdev":
                address_dict = ifac.hostdev_address.attrs

        return libvirt.pci_info_from_address(address_dict, 16, "id")

    def check_vfio_pci(pci_path, status_error=False):
        """
        Check if vf driver is vfio-pci

        :param pci_path: The absolute path of pci device
        :param status_error: Whether the driver should be vfio-pci
        """
        cmd = "readlink %s/driver | awk -F '/' '{print $NF}'" % pci_path
        output = process.run(cmd, shell=True, verbose=True).stdout_text.strip()
        if (output == "vfio-pci") == status_error:
            test.fail("Get incorrect dirver %s, it should%s be vfio-pci."
                      % (output, ' not' if status_error else ''))

    def update_iface_xml(vmxml):
        """
        Update interfaces for guest

        :param vmxml: vm_xml.VMXML object
        """
        vmxml.remove_all_device_by_type('interface')
        vmxml.sync()

        iface_dict = {"type": "network", "source": "{'network': 'host-bridge'}",
                      "mac": mac_addr, "model": "virtio",
                      "teaming": '{"type":"persistent"}',
                      "alias": '{"name": "ua-backup0"}',
                      "inbound": '{"average":"5"}',
                      "outbound": '{"average":"5"}'}

        iface_dict2 = {"type": "network", "source": "{'network': 'hostdev-net'}",
                       "mac": mac_addr, "model": "virtio",
                       "teaming": '{"type":"transient", "persistent": "ua-backup0"}'}

        iface = interface.Interface('network')
        for ifc in (iface_dict, iface_dict2):
            iface.xml = libvirt.modify_vm_iface(vm.name, "get_xml", ifc)
            vmxml.add_device(iface)
        vmxml.sync()

    migration_test = migration.MigrationTest()
    migration_test.check_parameters(params)

    # Params for NFS shared storage
    shared_storage = params.get("migrate_shared_storage", "")
    if shared_storage == "":
        default_guest_asset = defaults.get_default_guest_os_info()['asset']
        default_guest_asset = "%s.qcow2" % default_guest_asset
        shared_storage = os.path.join(params.get("nfs_mount_dir"),
                                      default_guest_asset)
        logging.debug("shared_storage:%s", shared_storage)

    # Params to update disk using shared storage
    params["disk_type"] = "file"
    params["disk_source_protocol"] = "netfs"
    params["mnt_path_name"] = params.get("nfs_mount_dir")

    # Local variables
    virsh_args = {"debug": True}
    virsh_options = params.get("virsh_options", "")

    server_ip = params.get("server_ip")
    server_user = params.get("server_user", "root")
    server_pwd = params.get("server_pwd")
    client_ip = params.get("client_ip")
    client_pwd = params.get("client_pwd")
    extra = params.get("virsh_migrate_extra")
    options = params.get("virsh_migrate_options")

    bridge_name = params.get("bridge_name", "br0")
    net_hostdev_name = params.get("net_hostdev_name", "hostdev-net")
    net_bridge_name = params.get("net_bridge_name", "host-bridge")
    driver = params.get("driver", "ixgbe")
    vm_tmp_file = params.get("vm_tmp_file", "/tmp/test.txt")
    cmd_during_mig = params.get("cmd_during_mig")
    net_failover_test = "yes" == params.get("net_failover_test", "no")
    cancel_migration = "yes" == params.get("cancel_migration", "no")
    try:
        vf_no = int(params.get("vf_no", "4"))
    except ValueError as e:
        test.error(e)

    migr_vm_back = "yes" == params.get("migrate_vm_back", "no")
    err_msg = params.get("err_msg")
    status_error = "yes" == params.get("status_error", "no")
    cmd_parms = {'server_ip': server_ip, 'server_user': server_user,
                 'server_pwd': server_pwd}
    remote_virsh_dargs = {'remote_ip': server_ip, 'remote_user': server_user,
                          'remote_pwd': server_pwd, 'unprivileged_user': None,
                          'ssh_remote_auth': True}
    destparams_dict = copy.deepcopy(params)

    remote_virsh_session = None
    vm_session = None
    vm = None
    mig_result = None
    func_name = None
    extra_args = {}
    default_src_vf = 0
    default_dest_vf = 0
    default_src_rp_filter = 1
    default_dest_rp_filer = 1

    if not libvirt_version.version_compare(6, 0, 0):
        test.cancel("This libvirt version doesn't support migration with "
                    "net failover devices.")

    # params for migration connection
    params["virsh_migrate_desturi"] = libvirt_vm.complete_uri(
                                       params.get("migrate_dest_host"))
    params["virsh_migrate_connect_uri"] = libvirt_vm.complete_uri(
                                       params.get("migrate_source_host"))
    src_uri = params.get("virsh_migrate_connect_uri")
    dest_uri = params.get("virsh_migrate_desturi")

    vm_name = params.get("migrate_main_vm")
    vm = env.get_vm(vm_name)
    vm.verify_alive()

    # For safety reasons, we'd better back up  xmlfile.
    new_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    orig_config_xml = new_xml.copy()

    try:
        # Create a remote runner for later use
        runner_on_target = remote.RemoteRunner(host=server_ip,
                                               username=server_user,
                                               password=server_pwd)

        server_session = remote.wait_for_login('ssh', server_ip, '22',
                                               server_user, server_pwd,
                                               r"[\#\$]\s*$")
        if net_failover_test:
            src_pf, src_pf_pci = utils_sriov.find_pf(driver)
            logging.debug("src_pf is %s. src_pf_pci: %s", src_pf, src_pf_pci)
            params['pf_name'] = src_pf
            dest_pf, dest_pf_pci = utils_sriov.find_pf(driver, server_session)
            logging.debug("dest_pf is %s. dest_pf_pci: %s", dest_pf, dest_pf_pci)
            destparams_dict['pf_name'] = dest_pf

            src_pf_pci_path = utils_misc.get_pci_path(src_pf_pci)
            dest_pf_pci_path = utils_misc.get_pci_path(dest_pf_pci, server_session)

            cmd = "cat %s/sriov_numvfs" % (src_pf_pci_path)
            default_src_vf = process.run(cmd, shell=True,
                                         verbose=True).stdout_text

            cmd = "cat %s/sriov_numvfs" % (dest_pf_pci_path)
            status, default_dest_vf = utils_misc.cmd_status_output(cmd,
                                                                   shell=True,
                                                                   session=server_session)
            if status:
                test.error("Unable to get default sriov_numvfs on target!"
                           "status: %s, output: %s" % (status, default_dest_vf))

            if not utils_sriov.set_vf(src_pf_pci_path, vf_no):
                test.error("Failed to set vf on source.")

            if not utils_sriov.set_vf(dest_pf_pci_path, vf_no, session=server_session):
                test.error("Failed to set vf on target.")

            # Create PF and bridge connection on source and target host
            cmd = 'cat /proc/sys/net/ipv4/conf/all/rp_filter'
            default_src_rp_filter = process.run(cmd, shell=True,
                                                verbose=True).stdout_text
            status, default_dest_rp_filter = utils_misc.cmd_status_output(cmd,
                                                                          shell=True,
                                                                          session=server_session)
            if status:
                test.error("Unable to get default rp_filter on target!"
                           "status: %s, output: %s" % (status, default_dest_rp_filter))
            cmd = 'echo 0 >/proc/sys/net/ipv4/conf/all/rp_filter'
            process.run(cmd, shell=True, verbose=True)
            utils_misc.cmd_status_output(cmd, shell=True, session=server_session)
            utils_sriov.add_or_del_connection(params, is_del=False)
            utils_sriov.add_or_del_connection(destparams_dict, is_del=False,
                                              session=server_session)

            if not remote_virsh_session:
                remote_virsh_session = virsh.VirshPersistent(**remote_virsh_dargs)
            create_or_del_networks(dest_pf, params,
                                   remote_virsh_session=remote_virsh_session)
            remote_virsh_session.close_session()
            create_or_del_networks(src_pf, params)
            # Change network interface xml
            mac_addr = utils_net.generate_mac_address_simple()
            update_iface_xml(new_xml)

        # Change the disk of the vm
        libvirt.set_vm_disk(vm, params)

        if not vm.is_alive():
            vm.start()

        # Check local guest network connection before migration
        if vm.serial_console is not None:
            vm.cleanup_serial_console()
        vm.create_serial_console()
        vm_session = vm.wait_for_serial_login(timeout=240)

        if net_failover_test:
            utils_net.restart_guest_network(vm_session)
        iface_list = get_vm_ifaces(vm_session)

        vm_ipv4, vm_ipv6 = utils_net.get_linux_ipaddr(vm_session, iface_list[0])
        check_vm_network_accessed(ping_dest=vm_ipv4)

        if net_failover_test:
            check_vm_iface_num(iface_list)
            check_vm_network_connection(net_hostdev_name, 1)
            check_vm_network_connection(net_bridge_name, 1)

            hostdev_pci_id = get_hostdev_addr_from_xml()
            vf_path = utils_misc.get_pci_path(hostdev_pci_id)
            check_vfio_pci(vf_path)
            if cmd_during_mig:
                s, o = utils_misc.cmd_status_output(cmd_during_mig, shell=True,
                                                    session=vm_session)
                if s:
                    test.fail("Failed to run %s in vm." % cmd_during_mig)

        if extra.count("--postcopy"):
            func_name = virsh.migrate_postcopy
            extra_args.update({'func_params': params})
        if cancel_migration:
            func_name = migration_test.do_cancel

        # Execute migration process
        vms = [vm]

        migration_test.do_migration(vms, None, dest_uri, 'orderly',
                                    options, thread_timeout=900,
                                    ignore_status=True, virsh_opt=virsh_options,
                                    func=func_name, extra_opts=extra,
                                    **extra_args)
        mig_result = migration_test.ret

        migration_test.check_result(mig_result, params)

        if int(mig_result.exit_status) == 0:
            server_session = remote.wait_for_login('ssh', server_ip, '22',
                                                   server_user, server_pwd,
                                                   r"[\#\$]\s*$")
            check_vm_network_accessed(server_session, vm_ipv4)
            server_session.close()
            if net_failover_test:
                # Check network connection
                check_vm_network_connection(net_hostdev_name)
                check_vm_network_connection(net_bridge_name)
                # VF driver should not be vfio-pci
                check_vfio_pci(vf_path, True)

                cmd_parms.update({'vm_ip': vm_ipv4,
                                  'vm_pwd': params.get("password")})
                vm_after_mig = remote.VMManager(cmd_parms)
                vm_after_mig.setup_ssh_auth()
                cmd = "ip link"
                cmd_result = vm_after_mig.run_command(cmd)
                libvirt.check_result(cmd_result)
                p_iface = re.findall(r"\d+:\s+(\w+):\s+.*", cmd_result.stdout_text)
                p_iface = [x for x in p_iface if x != 'lo']
                check_vm_iface_num(p_iface)

                # Check the output of ping command
                cmd = 'cat %s' % vm_tmp_file
                cmd_result = vm_after_mig.run_command(cmd)
                libvirt.check_result(cmd_result)

                if re.findall('Destination Host Unreachable', cmd_result.stdout_text, re.M):
                    test.fail("The network does not work well during "
                              "the migration peirod. ping output: %s"
                              % cmd_result.stdout_text)

            # Execute migration from remote
            if migr_vm_back:
                ssh_connection = utils_conn.SSHConnection(server_ip=client_ip,
                                                          server_pwd=client_pwd,
                                                          client_ip=server_ip,
                                                          client_pwd=server_pwd)
                try:
                    ssh_connection.conn_check()
                except utils_conn.ConnectionError:
                    ssh_connection.conn_setup()
                    ssh_connection.conn_check()

                # Pre migration setup for local machine
                migration_test.migrate_pre_setup(src_uri, params)

                cmd = "virsh migrate %s %s %s" % (vm_name,
                                                  virsh_options, src_uri)
                logging.debug("Start migration: %s", cmd)
                cmd_result = remote.run_remote_cmd(cmd, params, runner_on_target)
                logging.info(cmd_result)
                if cmd_result.exit_status:
                    test.fail("Failed to run '%s' on remote: %s"
                              % (cmd, cmd_result))
                logging.debug("migration back done")
                check_vm_network_accessed(ping_dest=vm_ipv4)
                if net_failover_test:
                    if vm_session:
                        vm_session.close()
                    vm_session = vm.wait_for_login()
                    iface_list = get_vm_ifaces(vm_session)
                    check_vm_iface_num(iface_list)

        else:
            check_vm_network_accessed(ping_dest=vm_ipv4)
            if net_failover_test:
                iface_list = get_vm_ifaces(vm_session)
                check_vm_iface_num(iface_list)

    finally:
        logging.debug("Recover test environment")
        # Clean VM on destination
        migration_test.cleanup_dest_vm(vm, vm.connect_uri, dest_uri)

        if vm.is_alive():
            vm.destroy(gracefully=False)

        logging.info("Recovery VM XML configration")
        orig_config_xml.sync()
        logging.debug("The current VM XML:\n%s", orig_config_xml.xmltreefile)

        server_session = remote.wait_for_login('ssh', server_ip, '22',
                                               server_user, server_pwd,
                                               r"[\#\$]\s*$")
        if 'src_pf' in locals():
            cmd = 'echo %s  >/proc/sys/net/ipv4/conf/all/rp_filter' % default_src_rp_filter
            process.run(cmd, shell=True, verbose=True)
            utils_sriov.add_or_del_connection(params, is_del=True)
            create_or_del_networks(src_pf, params, is_del=True)

        if 'dest_pf' in locals():
            cmd = 'echo %s  >/proc/sys/net/ipv4/conf/all/rp_filter' % default_dest_rp_filter
            utils_misc.cmd_status_output(cmd, shell=True, session=server_session)
            utils_sriov.add_or_del_connection(destparams_dict, session=server_session,
                                              is_del=True)
            remote_virsh_session = virsh.VirshPersistent(**remote_virsh_dargs)
            create_or_del_networks(dest_pf, params,
                                   remote_virsh_session,
                                   is_del=True)
            remote_virsh_session.close_session()

        if 'dest_pf_pci_path' in locals() and default_dest_vf != vf_no:
            utils_sriov.set_vf(dest_pf_pci_path, default_dest_vf, server_session)
        if 'src_pf_pci_path' in locals() and default_src_vf != vf_no:
            utils_sriov.set_vf(src_pf_pci_path, default_src_vf)

        # Clean up of pre migration setup for local machine
        if migr_vm_back:
            if 'ssh_connection' in locals():
                ssh_connection.auto_recover = True
            migration_test.migrate_pre_setup(src_uri, params,
                                             cleanup=True)

        server_session.close()
        if remote_virsh_session:
            remote_virsh_session.close_session()

        logging.info("Remove local NFS image")
        source_file = params.get("source_file")
        if source_file:
            libvirt.delete_local_disk("file", path=source_file)
예제 #28
0
def run(test, params, env):
    """
    Test command: virsh domjobabort.

    The command can abort the currently running domain job.
    1.Prepare test environment,destroy or suspend a VM.
    2.Do action to get a subprocess(dump, save, managedsave).
    3.Perform virsh domjobabort operation to abort VM's job.
    4.Recover the VM's status and wait for the subprocess over.
    5.Confirm the test result.
    """

    vm_name = params.get("main_vm", "vm1")
    vm = env.get_vm(vm_name)
    start_vm = params.get("start_vm")
    pre_vm_state = params.get("pre_vm_state", "start")
    readonly = ("yes" == params.get("readonly", "no"))
    if start_vm == "no" and vm.is_alive():
        vm.destroy()

    # Instead of "paused_after_start_vm", use "pre_vm_state".
    # After start the VM, wait for some time to make sure the job
    # can be created on this domain.
    if start_vm == "yes":
        vm.wait_for_login()
        if params.get("pre_vm_state") == "suspend":
            vm.pause()

    domid = vm.get_id()
    domuuid = vm.get_uuid()
    original_speed = virsh.migrate_getspeed(vm_name).stdout.strip()

    def get_subprocess(action, vm_name, file, remote_uri=None):
        """
        Execute background virsh command, return subprocess w/o waiting for exit()

        :param cmd : virsh command.
        :param guest_name : VM's name
        :param file_source : virsh command's file option.
        """
        args = ""
        if action == "managedsave":
            file = ""
        elif action == "migrate":
            # Slow down migration for domjobabort
            virsh.migrate_setspeed(vm_name, "1")
            file = remote_uri
            args = "--unsafe"
        command = "virsh %s %s %s %s" % (action, vm_name, file, args)
        logging.debug("Action: %s", command)
        p = subprocess.Popen(command,
                             shell=True,
                             stdout=subprocess.PIPE,
                             stderr=subprocess.PIPE)
        return p

    action = params.get("jobabort_action", "dump")
    dump_opt = params.get("dump_opt", None)
    status_error = params.get("status_error", "no")
    job = params.get("jobabort_job", "yes")
    tmp_file = os.path.join(data_dir.get_tmp_dir(), "domjobabort.tmp")
    tmp_pipe = os.path.join(data_dir.get_tmp_dir(), "domjobabort.fifo")
    vm_ref = params.get("jobabort_vm_ref")
    remote_uri = params.get("jobabort_remote_uri")
    remote_host = params.get("migrate_dest_host")
    remote_user = params.get("migrate_dest_user", "root")
    remote_pwd = params.get("migrate_dest_pwd")
    saved_data = None

    # Build job action
    if dump_opt:
        action = "dump --crash"

    if action == "managedsave":
        tmp_pipe = '/var/lib/libvirt/qemu/save/%s.save' % vm.name

    if action == "restore":
        virsh.save(vm_name, tmp_file, ignore_status=True)

    if action == "migrate":
        if remote_host.count("EXAMPLE"):
            test.cancel("Remote host should be configured " "for migrate.")
        else:
            # Config ssh autologin for remote host
            ssh_key.setup_ssh_key(remote_host,
                                  remote_user,
                                  remote_pwd,
                                  port=22)

    if vm_ref == "id":
        vm_ref = domid
    elif vm_ref == "hex_id":
        vm_ref = hex(int(domid))
    elif vm_ref == "uuid":
        vm_ref = domuuid
    elif vm_ref.find("invalid") != -1:
        vm_ref = params.get(vm_ref)
    elif vm_ref == "name":
        vm_ref = vm_name

    # Get the subprocess of VM.
    # The command's effect is to abort the currently running domain job.
    # So before do "domjobabort" action, we must create a job on the domain.
    process = None
    if job == "yes" and start_vm == "yes" and status_error == "no":
        if os.path.exists(tmp_pipe):
            os.unlink(tmp_pipe)
        os.mkfifo(tmp_pipe)

        process = get_subprocess(action, vm_name, tmp_pipe, remote_uri)

        saved_data = None
        if action == "restore":
            with open(tmp_file, 'r') as tmp_f:
                saved_data = tmp_f.read(10 * 1024 * 1024)
            f = open(tmp_pipe, 'w')
            f.write(saved_data[:1024 * 1024])
        elif action == "migrate":
            f = None
        else:
            f = open(tmp_pipe, 'rb')
            dummy = f.read(1024 * 1024).decode(locale.getpreferredencoding(),
                                               'ignore')

    # Give enough time for starting job
    t = 0
    while t < 5:
        jobtype = vm.get_job_type()
        if "None" == jobtype:
            t += 1
            time.sleep(1)
            continue
        elif jobtype is False:
            logging.error("Get job type failed.")
            break
        else:
            logging.debug("Job started: %s", jobtype)
            break
    virsh_dargs = {'ignore_status': True, 'debug': True}
    if readonly:
        virsh_dargs.update({'readonly': True})
    ret = virsh.domjobabort(vm_ref, **virsh_dargs)
    status = ret.exit_status

    if process and f:
        if saved_data:
            f.write(saved_data[1024 * 1024:])
        else:
            dummy = f.read()
        f.close()

        try:
            os.unlink(tmp_pipe)
        except OSError as detail:
            logging.info("Can't remove %s: %s", tmp_pipe, detail)
        try:
            os.unlink(tmp_file)
        except OSError as detail:
            logging.info("Can't remove %s: %s", tmp_file, detail)

    # Recover the environment.
    if pre_vm_state == "suspend":
        vm.resume()
    if process:
        if process.poll():
            try:
                process.kill()
            except OSError:
                pass

    if action == "migrate":
        # Recover migration speed
        virsh.migrate_setspeed(vm_name, original_speed)
        migration.MigrationTest().cleanup_dest_vm(vm, None, remote_uri)

    # check status_error
    if status_error == "yes":
        if status == 0:
            test.fail("Run successfully with wrong command!")
    elif status_error == "no":
        if status != 0:
            test.fail("Run failed with right command")
예제 #29
0
def run(test, params, env):
    """
    Test migration with glusterfs.
    """
    def create_or_clean_backend_dir(g_uri,
                                    params,
                                    session=None,
                                    is_clean=False):
        """
        Create/cleanup backend directory

        :params g_uri: glusterfs uri
        :params params: the parameters to be checked
        :params session: VM/remote session object
        :params is_cleanup: True for cleanup backend directory;
                            False for create one.
        :return: gluster_img if is_clean is equal to True
        """
        mount_point = params.get("gluster_mount_dir")
        is_symlink = params.get("gluster_create_symlink") == "yes"
        symlink_name = params.get("gluster_symlink")
        gluster_img = None
        if not is_clean:
            if not utils_misc.check_exists(mount_point, session):
                utils_misc.make_dirs(mount_point, session)

            if gluster.glusterfs_is_mounted(mount_point, session):
                gluster.glusterfs_umount(g_uri, mount_point, session)
            gluster.glusterfs_mount(g_uri, mount_point, session)

            gluster_img = os.path.join(mount_point, disk_img)
            if is_symlink:
                utils_misc.make_symlink(mount_point, symlink_name)
                utils_misc.make_symlink(mount_point, symlink_name,
                                        remote_session)
                gluster_img = os.path.join(symlink_name, disk_img)
            return gluster_img
        else:
            if is_symlink:
                utils_misc.rm_link(symlink_name, session)

            gluster.glusterfs_umount(g_uri, mount_point, session)
            if utils_misc.check_exists(mount_point, session):
                utils_misc.safe_rmdir(gluster_mount_dir, session=session)

    # Local variables
    virsh_args = {"debug": True}
    server_ip = params["server_ip"] = params.get("remote_ip")
    server_user = params["server_user"] = params.get("remote_user", "root")
    server_pwd = params["server_pwd"] = params.get("remote_pwd")
    client_ip = params["client_ip"] = params.get("local_ip")
    client_pwd = params["client_pwd"] = params.get("local_pwd")
    extra = params.get("virsh_migrate_extra")
    options = params.get("virsh_migrate_options", "--live --p2p --verbose")
    virsh_options = params.get("virsh_options", "")

    vol_name = params.get("vol_name")
    disk_format = params.get("disk_format", "qcow2")
    gluster_mount_dir = params.get("gluster_mount_dir")

    status_error = "yes" == params.get("status_error", "no")
    err_msg = params.get("err_msg")
    host_ip = params.get("gluster_server_ip", "")
    migrate_vm_back = params.get("migrate_vm_back", "no") == "yes"

    selinux_local = params.get('set_sebool_local', 'yes') == "yes"
    selinux_remote = params.get('set_sebool_remote', 'no') == "yes"
    sebool_fusefs_local = params.get('set_sebool_fusefs_local', 'yes')
    sebool_fusefs_remote = params.get('set_sebool_fusefs_remote', 'yes')
    test_dict = dict(params)
    test_dict["local_boolean_varible"] = "virt_use_fusefs"
    test_dict["remote_boolean_varible"] = "virt_use_fusefs"
    remote_dargs = {
        'server_ip': server_ip,
        'server_user': server_user,
        'server_pwd': server_pwd,
        'file_path': "/etc/libvirt/libvirt.conf"
    }

    remove_pkg = False
    seLinuxBool = None
    seLinuxfusefs = None
    gluster_uri = None
    mig_result = None
    remove_dict = {}
    remote_libvirt_file = None
    src_libvirt_file = None

    # Make sure all of parameters are assigned a valid value
    migrate_test = migration.MigrationTest()
    migrate_test.check_parameters(params)
    extra_args = migrate_test.update_virsh_migrate_extra_args(params)

    # params for migration connection
    params["virsh_migrate_desturi"] = libvirt_vm.complete_uri(
        params.get("migrate_dest_host"))
    params["virsh_migrate_connect_uri"] = libvirt_vm.complete_uri(
        params.get("migrate_source_host"))
    src_uri = params.get("virsh_migrate_connect_uri")
    dest_uri = params.get("virsh_migrate_desturi")

    # For --postcopy enable
    postcopy_options = params.get("postcopy_options")
    if postcopy_options:
        extra = "%s %s" % (virsh_options, postcopy_options)
        func_name = virsh.migrate_postcopy

    vm_name = params.get("migrate_main_vm")
    vm = env.get_vm(vm_name)
    vm.verify_alive()

    # Back up xml file.
    new_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    orig_config_xml = new_xml.copy()

    try:
        # Create a remote runner for later use
        runner_on_target = remote.RemoteRunner(host=server_ip,
                                               username=server_user,
                                               password=server_pwd)

        # Configure selinux
        if selinux_local or selinux_remote:
            seLinuxBool = utils_misc.SELinuxBoolean(params)
            seLinuxBool.setup()
            if sebool_fusefs_local or sebool_fusefs_remote:
                seLinuxfusefs = utils_misc.SELinuxBoolean(test_dict)
                seLinuxfusefs.setup()

        # Setup glusterfs
        disk_img = "gluster.%s" % disk_format
        params['disk_img'] = disk_img
        host_ip = gluster.setup_or_cleanup_gluster(is_setup=True, **params)
        logging.debug("host ip: %s ", host_ip)

        # Check if gluster server is deployed locally
        if not host_ip:
            logging.debug("Enable port 24007 and 49152:49216")
            migrate_test.migrate_pre_setup(src_uri, params, ports="24007")
            migrate_test.migrate_pre_setup(src_uri, params)
            gluster_uri = "{}:{}".format(client_ip, vol_name)
        else:
            gluster_uri = "{}:{}".format(host_ip, vol_name)

        remote_session = remote.wait_for_login('ssh', server_ip, '22',
                                               server_user, server_pwd,
                                               r"[\#\$]\s*$")

        if gluster_mount_dir:
            # The package 'glusterfs-fuse' is not installed on target
            # which makes issue when trying to 'mount -t glusterfs'
            pkg_name = 'glusterfs-fuse'
            logging.debug("Check if glusterfs-fuse is installed")
            pkg_mgr = utils_package.package_manager(remote_session, pkg_name)
            if not pkg_mgr.is_installed(pkg_name):
                logging.debug("glusterfs-fuse will be installed")
                if not pkg_mgr.install():
                    test.error("Package '%s' installation fails" % pkg_name)
                else:
                    remove_pkg = True

            gluster_img = create_or_clean_backend_dir(gluster_uri, params)
            create_or_clean_backend_dir(gluster_uri, params, remote_session)

            # Get the image path
            image_source = vm.get_first_disk_devices()['source']
            image_info = utils_misc.get_image_info(image_source)
            if image_info["format"] == disk_format:
                disk_cmd = "cp -f %s %s" % (image_source, gluster_img)
            else:
                # Convert the disk format
                disk_cmd = ("qemu-img convert -f %s -O %s %s %s" %
                            (image_info["format"], disk_format, image_source,
                             gluster_img))
            process.run("%s; chmod a+rw %s" % (disk_cmd, gluster_mount_dir),
                        shell=True)

            logging.debug("Gluster Image is %s", gluster_img)
            gluster_backend_disk = {'disk_source_name': gluster_img}
            # Update disk xml with gluster image in backend dir
            libvirt.set_vm_disk(vm, gluster_backend_disk)
        remote_session.close()

        vm_xml_cxt = virsh.dumpxml(vm_name).stdout_text.strip()
        logging.debug("The VM XML with gluster disk source: \n%s", vm_xml_cxt)

        vm.wait_for_login().close()
        migrate_test.ping_vm(vm, params)

        remove_dict = {"do_search": '{"%s": "ssh:/"}' % dest_uri}
        src_libvirt_file = libvirt_config.remove_key_for_modular_daemon(
            remove_dict)

        vms = [vm]
        migrate_test.do_migration(vms,
                                  None,
                                  dest_uri,
                                  'orderly',
                                  options,
                                  thread_timeout=900,
                                  ignore_status=True,
                                  virsh_opt=virsh_options,
                                  extra_opts=extra,
                                  **extra_args)
        migrate_test.ping_vm(vm, params, dest_uri)

        if migrate_vm_back:
            ssh_connection = utils_conn.SSHConnection(server_ip=client_ip,
                                                      server_pwd=client_pwd,
                                                      client_ip=server_ip,
                                                      client_pwd=server_pwd)
            try:
                ssh_connection.conn_check()
            except utils_conn.ConnectionError:
                ssh_connection.conn_setup()
                ssh_connection.conn_check()

            # Pre migration setup for local machine
            migrate_test.migrate_pre_setup(src_uri, params)
            remove_dict = {"do_search": ('{"%s": "ssh:/"}' % src_uri)}
            remote_libvirt_file = libvirt_config\
                .remove_key_for_modular_daemon(remove_dict, remote_dargs)

            cmd = "virsh migrate %s %s %s %s" % (vm_name, options,
                                                 virsh_options, src_uri)
            logging.debug("Start migrating: %s", cmd)
            cmd_result = remote.run_remote_cmd(cmd, params, runner_on_target)
            logging.info(cmd_result)

            if cmd_result.exit_status:
                destroy_cmd = "virsh destroy %s" % vm_name
                remote.run_remote_cmd(destroy_cmd,
                                      params,
                                      runner_on_target,
                                      ignore_status=False)
                test.fail("Failed to run '%s' on remote: %s" %
                          (cmd, cmd_result))

    finally:
        logging.info("Recover test environment")
        migrate_test.cleanup_vm(vm, dest_uri)

        orig_config_xml.sync()

        if src_libvirt_file:
            src_libvirt_file.restore()
        if remote_libvirt_file:
            del remote_libvirt_file

        # Clean up of pre migration setup for local machine
        if migrate_vm_back:
            if 'ssh_connection' in locals():
                ssh_connection.auto_recover = True
            migrate_test.migrate_pre_setup(src_uri, params, cleanup=True)

        # Cleanup selinu configuration
        if seLinuxBool:
            seLinuxBool.cleanup()
            if seLinuxfusefs:
                seLinuxfusefs.cleanup()

        # Disable ports 24007 and 49152:49216
        if not host_ip:
            logging.debug("Disable 24007 and 49152:49216 in Firewall")
            migrate_test.migrate_pre_setup(src_uri,
                                           params,
                                           cleanup=True,
                                           ports="24007")
            migrate_test.migrate_pre_setup(src_uri, params, cleanup=True)

        gluster.setup_or_cleanup_gluster(False, **params)

        # Cleanup backend directory/symlink
        if gluster_mount_dir and gluster_uri:
            remote_session = remote.wait_for_login('ssh', server_ip, '22',
                                                   server_user, server_pwd,
                                                   r"[\#\$]\s*$")
            create_or_clean_backend_dir(gluster_uri, params, is_clean=True)
            create_or_clean_backend_dir(gluster_uri, params, remote_session,
                                        True)
            if remove_pkg:
                pkg_mgr = utils_package.package_manager(
                    remote_session, pkg_name)
                if pkg_mgr.is_installed(pkg_name):
                    logging.debug("glusterfs-fuse will be uninstalled")
                    if not pkg_mgr.remove():
                        logging.error("Package '%s' un-installation fails",
                                      pkg_name)
            remote_session.close()
예제 #30
0
def run(test, params, env):
    """
    Run the test to check events when migration

    :param test: test object
    :param params: Dictionary with the test parameters
    :param env: Dictionary with test environment.
    """
    vm_name = params.get("migrate_main_vm")
    vm = env.get_vm(vm_name)
    vm.verify_alive()
    bk_uri = vm.connect_uri

    migration_test = migration.MigrationTest()
    migration_test.check_parameters(params)
    extra_args = migration_test.update_virsh_migrate_extra_args(params)

    extra = params.get("virsh_migrate_extra")
    postcopy_options = params.get("postcopy_options")
    if postcopy_options:
        extra = "%s %s" % (extra, postcopy_options)
    params["virsh_migrate_desturi"] = libvirt_vm.complete_uri(
        params.get("migrate_dest_host"))
    dest_uri = params.get("virsh_migrate_desturi")
    options = params.get("virsh_migrate_options",
                         "--live --p2p --persistent --verbose")
    virsh_options = params.get("virsh_options", "")
    action_during_mig = params.get("action_during_mig")
    migrate_speed = params.get("migrate_speed")

    virsh_session = None
    remote_session = None

    if action_during_mig:
        action_during_mig = migration_base.parse_funcs(action_during_mig, test,
                                                       params)

    # For safety reasons, we'd better back up  xmlfile.
    new_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    orig_config_xml = new_xml.copy()

    try:
        # Change the disk of the vm
        libvirt.set_vm_disk(vm, params)

        if not vm.is_alive():
            vm.start()

        logging.debug("Guest xml after starting:\n%s",
                      vm_xml.VMXML.new_from_dumpxml(vm_name))

        vm.wait_for_login().close()

        if migrate_speed:
            mode = 'both' if '--postcopy' in postcopy_options else 'precopy'
            migration_test.control_migrate_speed(vm_name, int(migrate_speed),
                                                 mode)
        # Monitor event on source/target host
        virsh_session, remote_virsh_session = migration_base.monitor_event(
            params)

        # Execute migration process
        migration_base.do_migration(vm, migration_test, None, dest_uri,
                                    options, virsh_options, extra,
                                    action_during_mig, extra_args)

        func_returns = dict(migration_test.func_ret)
        migration_test.func_ret.clear()
        logging.debug("Migration returns function results:%s", func_returns)

        # Check event output
        migration_base.check_event_output(params, test, virsh_session,
                                          remote_virsh_session)

        if int(migration_test.ret.exit_status) == 0:
            migration_test.post_migration_check([vm], params, uri=dest_uri)
    finally:
        logging.info("Recover test environment")
        vm.connect_uri = bk_uri
        # Clean VM on destination and source
        migration_test.cleanup_vm(vm, dest_uri)

        orig_config_xml.sync()