Пример #1
0
def run(test, params, env):
    """
    Test migration of multi vms.
    """
    vm_names = params.get("migrate_vms").split()
    if len(vm_names) < 2:
        raise exceptions.TestSkipError("No multi vms provided.")

    # Prepare parameters
    method = params.get("virsh_migrate_method")
    jobabort = "yes" == params.get("virsh_migrate_jobabort", "no")
    options = params.get("virsh_migrate_options", "")
    status_error = "yes" == params.get("status_error", "no")
    remote_host = params.get("remote_host", "DEST_HOSTNAME.EXAMPLE.COM")
    local_host = params.get("local_host", "SOURCE_HOSTNAME.EXAMPLE.COM")
    host_user = params.get("host_user", "root")
    host_passwd = params.get("host_password", "PASSWORD")
    nfs_shared_disk = params.get("nfs_shared_disk", True)
    migration_type = params.get("virsh_migration_type", "simultaneous")
    migrate_timeout = int(params.get("virsh_migrate_thread_timeout", 900))
    migration_time = int(params.get("virsh_migrate_timeout", 60))

    # Params for NFS and SSH setup
    params["server_ip"] = params.get("migrate_dest_host")
    params["server_user"] = "******"
    params["server_pwd"] = params.get("migrate_dest_pwd")
    params["client_ip"] = params.get("migrate_source_host")
    params["client_user"] = "******"
    params["client_pwd"] = params.get("migrate_source_pwd")
    params["nfs_client_ip"] = params.get("migrate_dest_host")
    params["nfs_server_ip"] = params.get("migrate_source_host")
    desturi = libvirt_vm.get_uri_with_transport(transport="ssh",
                                                dest_ip=remote_host)
    srcuri = libvirt_vm.get_uri_with_transport(transport="ssh",
                                               dest_ip=local_host)

    # Don't allow the defaults.
    if srcuri.count('///') or srcuri.count('EXAMPLE'):
        raise exceptions.TestSkipError("The srcuri '%s' is invalid" % srcuri)
    if desturi.count('///') or desturi.count('EXAMPLE'):
        raise exceptions.TestSkipError("The desturi '%s' is invalid" % desturi)

    # Config ssh autologin for remote host
    ssh_key.setup_remote_ssh_key(remote_host,
                                 host_user,
                                 host_passwd,
                                 port=22,
                                 public_key="rsa")

    # Prepare local session and remote session
    localrunner = remote.RemoteRunner(host=remote_host,
                                      username=host_user,
                                      password=host_passwd)
    remoterunner = remote.RemoteRunner(host=remote_host,
                                       username=host_user,
                                       password=host_passwd)
    # Configure NFS in remote host
    if nfs_shared_disk:
        nfs_client = nfs.NFSClient(params)
        nfs_client.setup()

    # Prepare MigrationHelper instance
    vms = []
    for vm_name in vm_names:
        vm = env.get_vm(vm_name)
        vms.append(vm)

    try:
        option = make_migration_options(method, options, migration_time)

        # make sure cache=none
        if "unsafe" not in options:
            device_target = params.get("virsh_device_target", "sda")
            for vm in vms:
                if vm.is_alive():
                    vm.destroy()
            for each_vm in vm_names:
                logging.info("configure cache=none")
                vmxml = vm_xml.VMXML.new_from_dumpxml(each_vm)
                device_source = str(
                    vmxml.get_disk_attr(each_vm, device_target, 'source',
                                        'file'))
                ret_detach = virsh.detach_disk(each_vm, device_target,
                                               "--config")
                status = ret_detach.exit_status
                output = ret_detach.stdout.strip()
                logging.info("Status:%s", status)
                logging.info("Output:\n%s", output)
                if not ret_detach:
                    raise exceptions.TestError("Detach disks fails")

                subdriver = utils_test.get_image_info(device_source)['format']
                ret_attach = virsh.attach_disk(
                    each_vm, device_source, device_target, "--driver qemu "
                    "--config --cache none "
                    "--subdriver %s" % subdriver)
                status = ret_attach.exit_status
                output = ret_attach.stdout.strip()
                logging.info("Status:%s", status)
                logging.info("Output:\n%s", output)
                if not ret_attach:
                    raise exceptions.TestError("Attach disks fails")

        for vm in vms:
            if vm.is_dead():
                vm.start()
                vm.wait_for_login()
        multi_migration(vms,
                        srcuri,
                        desturi,
                        option,
                        migration_type,
                        migrate_timeout,
                        jobabort,
                        lrunner=localrunner,
                        rrunner=remoterunner,
                        status_error=status_error)
    except Exception as info:
        logging.error("Test failed: %s" % info)
        flag_migration = False

    # NFS cleanup
    if nfs_shared_disk:
        logging.info("NFS cleanup")
        nfs_client.cleanup(ssh_auto_recover=False)

    localrunner.session.close()
    remoterunner.session.close()

    if not (ret_migration or flag_migration):
        if not status_error:
            raise exceptions.TestFail("Migration test failed")
    if not ret_jobabort:
        if not status_error:
            raise exceptions.TestFail("Abort migration failed")
    if not ret_downtime_tolerable:
        raise exceptions.TestFail("Downtime during migration is intolerable")
def run(test, params, env):
    """
    Test virsh migrate-setmaxdowntime command.

    1) Prepare migration environment
    2) Start migration and set migrate-maxdowntime
    3) Cleanup environment(migrated vm on destination)
    4) Check result
    """
    dest_uri = params.get("virsh_migrate_dest_uri",
                          "qemu+ssh://MIGRATE_EXAMPLE/system")
    src_uri = params.get("virsh_migrate_src_uri",
                         "qemu+ssh://MIGRATE_EXAMPLE/system")
    if dest_uri.count('///') or dest_uri.count('MIGRATE_EXAMPLE'):
        raise error.TestNAError("Set your destination uri first.")
    if src_uri.count('MIGRATE_EXAMPLE'):
        raise error.TestNAError("Set your source uri first.")
    if src_uri == dest_uri:
        raise error.TestNAError("You should not set dest uri same as local.")
    vm_ref = params.get("setmmdt_vm_ref", "domname")
    pre_vm_state = params.get("pre_vm_state", "running")
    status_error = "yes" == params.get("status_error", "no")
    do_migrate = "yes" == params.get("do_migrate", "yes")
    migrate_maxdowntime = params.get("migrate_maxdowntime", 1.000)
    if (migrate_maxdowntime == ""):
        downtime = ""
    else:
        downtime = int(float(migrate_maxdowntime)) * 1000
    extra = params.get("setmmdt_extra")

    # A delay between threads
    delay_time = int(params.get("delay_time", 1))
    # timeout of threads
    thread_timeout = 180

    vm_name = params.get("migrate_main_vm")
    vm = env.get_vm(vm_name)
    domuuid = vm.get_uuid()

    grep_str_local = params.get("grep_str_from_local_libvirt_log", "")

    # For safety reasons, we'd better back up original guest xml
    orig_config_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    if not orig_config_xml:
        raise error.TestError("Backing up xmlfile failed.")

    # Params to configure libvirtd.conf
    log_file = "/var/log/libvirt/libvirtd.log"
    log_level = "1"
    log_filters = '"1:json 1:libvirt 1:qemu 1:monitor 3:remote 4:event"'
    libvirtd_conf_dict = {
        "log_level": log_level,
        "log_filters": log_filters,
        "log_outputs": '"%s:file:%s"' % (log_level, log_file)
    }

    # Update libvirtd config with new parameters
    libvirtd = utils_libvirtd.Libvirtd()
    libvirtd_conf = config_libvirt(libvirtd_conf_dict)
    libvirtd.restart()

    # Params to update disk using shared storage
    params["disk_type"] = "file"
    params["disk_source_protocol"] = "netfs"
    params["mnt_path_name"] = params.get("nfs_mount_dir")

    # Params to setup SSH connection
    params["server_ip"] = params.get("migrate_dest_host")
    params["server_pwd"] = params.get("migrate_dest_pwd")
    params["client_ip"] = params.get("migrate_source_host")
    params["client_pwd"] = params.get("migrate_source_pwd")
    params["nfs_client_ip"] = params.get("migrate_dest_host")
    params["nfs_server_ip"] = params.get("migrate_source_host")

    # Params to enable SELinux boolean on remote host
    params["remote_boolean_varible"] = "virt_use_nfs"
    params["remote_boolean_value"] = "on"
    params["set_sebool_remote"] = "yes"

    remote_host = params.get("migrate_dest_host")
    username = params.get("migrate_dest_user", "root")
    password = params.get("migrate_dest_pwd")
    # Config ssh autologin for remote host
    ssh_key.setup_ssh_key(remote_host, username, password, port=22)

    setmmdt_dargs = {'debug': True, 'ignore_status': True, 'uri': src_uri}
    migrate_dargs = {'debug': True, 'ignore_status': True}

    seLinuxBool = None
    nfs_client = None
    local_selinux_bak = ""

    try:
        # Update the disk using shared storage
        libvirt.set_vm_disk(vm, params)

        # Backup the SELinux status on local host for recovering
        local_selinux_bak = params.get("selinux_status_bak", "")

        # Configure NFS client on remote host
        nfs_client = nfs.NFSClient(params)
        nfs_client.setup()

        logging.info("Enable virt NFS SELinux boolean on target host")
        seLinuxBool = utils_misc.SELinuxBoolean(params)
        seLinuxBool.setup()

        if not vm.is_alive():
            vm.start()
        vm.wait_for_login()
        domid = vm.get_id()

        # Confirm how to reference a VM.
        if vm_ref == "domname":
            vm_ref = vm_name
        elif vm_ref == "domid":
            vm_ref = domid
        elif vm_ref == "domuuid":
            vm_ref = domuuid

        # Prepare vm state
        if pre_vm_state == "paused":
            vm.pause()
        elif pre_vm_state == "shutoff":
            vm.destroy(gracefully=False)
            # Ensure VM in 'shut off' status
            utils_misc.wait_for(lambda: vm.state() == "shut off", 30)

        # Set max migration downtime must be during migration
        # Using threads for synchronization
        threads = []
        if do_migrate:
            threads.append(
                threading.Thread(target=thread_func_live_migration,
                                 args=(vm, dest_uri, migrate_dargs)))

        threads.append(
            threading.Thread(target=thread_func_setmmdt,
                             args=(vm_ref, downtime, extra, setmmdt_dargs)))
        for thread in threads:
            thread.start()
            # Migration must be executing before setting maxdowntime
            time.sleep(delay_time)
        # Wait until thread is over
        for thread in threads:
            thread.join(thread_timeout)

        if (status_error is False or do_migrate is False):
            logging.debug("To match the expected pattern '%s' ...",
                          grep_str_local)
            cmd = "grep -E '%s' %s" % (grep_str_local, log_file)
            cmdResult = process.run(cmd, shell=True, verbose=False)
            logging.debug(cmdResult)

    finally:
        # Clean up.
        if do_migrate:
            logging.debug("Cleanup VM on remote host...")
            cleanup_dest(vm, src_uri, dest_uri)

        if orig_config_xml:
            logging.debug("Recover VM XML...")
            orig_config_xml.sync()

        if seLinuxBool:
            logging.info("Recover NFS SELinux boolean on remote host...")
            seLinuxBool.cleanup(True)

        if nfs_client:
            logging.info("Cleanup NFS client environment...")
            nfs_client.cleanup()

        logging.info("Remove the NFS image...")
        source_file = params.get("source_file")
        libvirt.delete_local_disk("file", path=source_file)

        logging.info("Cleanup NFS server environment...")
        exp_dir = params.get("export_dir")
        mount_dir = params.get("mnt_path_name")
        libvirt.setup_or_cleanup_nfs(False,
                                     export_dir=exp_dir,
                                     mount_dir=mount_dir,
                                     restore_selinux=local_selinux_bak)

        # Recover libvirtd service configuration on local
        if libvirtd_conf:
            logging.debug("Recover local libvirtd configuration...")
            libvirtd_conf.restore()
            libvirtd.restart()
            os.remove(log_file)

    # Check results.
    if status_error:
        if ret_setmmdt:
            if not do_migrate and libvirt_version.version_compare(1, 2, 9):
                # https://bugzilla.redhat.com/show_bug.cgi?id=1146618
                # Commit fe808d9 fix it and allow setting migration
                # max downtime any time since libvirt-1.2.9
                logging.info("Libvirt version is newer than 1.2.9,"
                             "Allow set maxdowntime while VM isn't migrating")
            else:
                raise error.TestFail("virsh migrate-setmaxdowntime succeed "
                                     "but not expected.")
    else:
        if do_migrate and not ret_migration:
            raise error.TestFail("Migration failed.")

        if not ret_setmmdt:
            raise error.TestFail("virsh migrate-setmaxdowntime failed.")
Пример #3
0
    nfs_client = None
    seLinuxBool = None
    exception = False
    remote_viewer_pid = None
    asynch_migration = False
    ret_migrate = True

    try:
        # Change the disk of the vm to shared disk
        libvirt.set_vm_disk(vm, params)
        # Backup the SELinux status on local host for recovering
        local_selinux_bak = params.get("selinux_status_bak")

        # Configure NFS client on remote host
        nfs_client = nfs.NFSClient(params)
        nfs_client.setup()

        logging.info("Enable virt NFS SELinux boolean on target host.")
        seLinuxBool = SELinuxBoolean(params)
        seLinuxBool.setup()

        subdriver = utils_test.get_image_info(shared_storage)['format']
        extra_attach = ("--config --driver qemu --subdriver %s --cache %s" %
                        (subdriver, disk_cache))

        # Attach a scsi device for special testcases
        if attach_scsi_disk:
            shared_dir = os.path.dirname(shared_storage)
            # This is a workaround. It does not take effect to specify
            # this parameter in config file
Пример #4
0
def run(test, params, env):
    """
    Test migration under stress.
    """
    vm_names = params.get("migration_vms").split()
    if len(vm_names) < 2:
        raise exceptions.TestSkipError("Provide enough vms for migration")

    src_uri = libvirt_vm.complete_uri(
        params.get("migrate_source_host", "EXAMPLE"))
    if src_uri.count('///') or src_uri.count('EXAMPLE'):
        raise exceptions.TestSkipError("The src_uri '%s' is invalid" % src_uri)

    dest_uri = libvirt_vm.complete_uri(
        params.get("migrate_dest_host", "EXAMPLE"))
    if dest_uri.count('///') or dest_uri.count('EXAMPLE'):
        raise exceptions.TestSkipError("The dest_uri '%s' is invalid" %
                                       dest_uri)

    # Params for NFS and SSH setup
    params["server_ip"] = params.get("migrate_dest_host")
    params["server_user"] = "******"
    params["server_pwd"] = params.get("migrate_dest_pwd")
    params["client_ip"] = params.get("migrate_source_host")
    params["client_user"] = "******"
    params["client_pwd"] = params.get("migrate_source_pwd")
    params["nfs_client_ip"] = params.get("migrate_dest_host")
    params["nfs_server_ip"] = params.get("migrate_source_host")

    # Configure NFS client on remote host
    nfs_client = nfs.NFSClient(params)
    nfs_client.setup()

    # Migrated vms' instance
    vms = []
    for vm_name in vm_names:
        vms.append(
            libvirt_vm.VM(vm_name, params, test.bindir,
                          env.get("address_cache")))

    load_vm_names = params.get("load_vms").split()
    # vms for load
    load_vms = []
    for vm_name in load_vm_names:
        load_vms.append(
            libvirt_vm.VM(vm_name, params, test.bindir,
                          env.get("address_cache")))
    params['load_vms'] = load_vms

    cpu = int(params.get("smp", 1))
    memory = int(params.get("mem")) * 1024
    stress_type = params.get("migration_stress_type")
    vm_bytes = params.get("stress_vm_bytes")
    stress_args = params.get("stress_args")
    migration_type = params.get("migration_type")
    start_migration_vms = "yes" == params.get("start_migration_vms", "yes")
    thread_timeout = int(params.get("thread_timeout", 120))
    remote_host = params.get("migrate_dest_host")
    username = params.get("migrate_dest_user", "root")
    password = params.get("migrate_dest_pwd")
    prompt = params.get("shell_prompt", r"[\#\$]")

    # Set vm_bytes for start_cmd
    mem_total = utils_memory.memtotal()
    vm_reserved = len(vms) * memory
    if vm_bytes == "half":
        vm_bytes = (mem_total - vm_reserved) / 2
    elif vm_bytes == "shortage":
        vm_bytes = mem_total - vm_reserved + 524288
    if vm_bytes is not None:
        params["stress_args"] = stress_args % vm_bytes

    for vm in vms:
        # Keep vm dead for edit
        if vm.is_alive():
            vm.destroy()
        set_cpu_memory(vm.name, cpu, memory)

    try:
        vm_ipaddr = {}
        if start_migration_vms:
            for vm in vms:
                vm.start()
                vm.wait_for_login()
                vm_ipaddr[vm.name] = vm.get_address()
                # TODO: recover vm if start failed?
        # Config ssh autologin for remote host
        ssh_key.setup_ssh_key(remote_host, username, password, port=22)

        do_stress_migration(vms, src_uri, dest_uri, stress_type,
                            migration_type, params, thread_timeout)
        # Check network of vms on destination
        if start_migration_vms and migration_type != "cross":
            for vm in vms:
                utils_test.check_dest_vm_network(vm, vm_ipaddr[vm.name],
                                                 remote_host, username,
                                                 password, prompt)
    finally:
        logging.debug("Cleanup vms...")
        for vm_name in vm_names:
            vm = libvirt_vm.VM(vm_name, params, test.bindir,
                               env.get("address_cache"))
            utlv.MigrationTest().cleanup_dest_vm(vm, None, dest_uri)
            if vm.is_alive():
                vm.destroy(gracefully=False)

        if nfs_client:
            logging.info("Cleanup NFS client environment...")
            nfs_client.cleanup()
        env.clean_objects()