def run(test, params, env):
    """
    Test migration of multi vms.
    """
    vm_names = params.get("vms").split()
    if len(vm_names) < 2:
        raise error.TestNAError("No multi vms provided.")

    # Prepare parameters
    method = params.get("virsh_migrate_method")
    simultaneous = "yes" == params.get("simultaneous_migration", "no")
    jobabort = "yes" == params.get("virsh_migrate_jobabort", "no")
    options = params.get("virsh_migrate_options", "")
    status_error = "yes" == params.get("status_error", "no")
    #remote_migration = "yes" == params.get("remote_migration", "no")
    remote_host = params.get("remote_host", "DEST_HOSTNAME.EXAMPLE.COM")
    local_host = params.get("local_host", "SOURCE_HOSTNAME.EXAMPLE.COM")
    host_user = params.get("host_user", "root")
    host_passwd = params.get("host_password", "PASSWORD")
    desturi = libvirt_vm.get_uri_with_transport(transport="ssh",
                                                dest_ip=remote_host)
    srcuri = libvirt_vm.get_uri_with_transport(transport="ssh",
                                               dest_ip=local_host)

    # Don't allow the defaults.
    if srcuri.count('///') or srcuri.count('EXAMPLE'):
        raise error.TestNAError("The srcuri '%s' is invalid", srcuri)
    if desturi.count('///') or desturi.count('EXAMPLE'):
        raise error.TestNAError("The desturi '%s' is invalid", desturi)

    # Config ssh autologin for remote host
    ssh_key.setup_ssh_key(remote_host, host_user, host_passwd, port=22)

    # Prepare MigrationHelper instance
    helpers = []
    for vm_name in vm_names:
        helper = MigrationHelper(vm_name, test, params, env)
        helper.set_virsh_instance()
        helper.set_migration_cmd(options, method, desturi)
        helpers.append(helper)

    for helper in helpers:
        vm = helper.vm
        if vm.is_dead():
            vm.start()
        vm.wait_for_login()

    try:
        multi_migration(helpers, simultaneous=False, jobabort=False)
    finally:
        for helper in helpers:
            helper.virsh_instance.close_session()
            helper.cleanup_vm(srcuri, desturi)

        if not ret_migration:
            if not status_error:
                raise error.TestFail("Migration test failed.")
        if not ret_jobabort:
            if not status_error:
                raise error.TestFail("Abort migration failed.")
Пример #2
0
def run(test, params, env):
    """
    Test migration of multi vms.
    """
    vm_names = params.get("vms").split()
    if len(vm_names) < 2:
        raise error.TestNAError("No multi vms provided.")

    # Prepare parameters
    method = params.get("virsh_migrate_method")
    simultaneous = "yes" == params.get("simultaneous_migration", "no")
    jobabort = "yes" == params.get("virsh_migrate_jobabort", "no")
    options = params.get("virsh_migrate_options", "")
    status_error = "yes" == params.get("status_error", "no")
    #remote_migration = "yes" == params.get("remote_migration", "no")
    remote_host = params.get("remote_host", "DEST_HOSTNAME.EXAMPLE.COM")
    local_host = params.get("local_host", "SOURCE_HOSTNAME.EXAMPLE.COM")
    host_user = params.get("host_user", "root")
    host_passwd = params.get("host_password", "PASSWORD")
    desturi = libvirt_vm.get_uri_with_transport(transport="ssh",
                                                dest_ip=remote_host)
    srcuri = libvirt_vm.get_uri_with_transport(transport="ssh",
                                               dest_ip=local_host)

    # Don't allow the defaults.
    if srcuri.count('///') or srcuri.count('EXAMPLE'):
        raise error.TestNAError("The srcuri '%s' is invalid", srcuri)
    if desturi.count('///') or desturi.count('EXAMPLE'):
        raise error.TestNAError("The desturi '%s' is invalid", desturi)

    # Config ssh autologin for remote host
    ssh_key.setup_ssh_key(remote_host, host_user, host_passwd, port=22)

    # Prepare MigrationHelper instance
    helpers = []
    for vm_name in vm_names:
        helper = MigrationHelper(vm_name, test, params, env)
        helper.set_virsh_instance()
        helper.set_migration_cmd(options, method, desturi)
        helpers.append(helper)

    for helper in helpers:
        vm = helper.vm
        if vm.is_dead():
            vm.start()
        vm.wait_for_login()

    try:
        multi_migration(helpers, simultaneous=False, jobabort=False)
    finally:
        for helper in helpers:
            helper.virsh_instance.close_session()
            helper.cleanup_vm(srcuri, desturi)

        if not ret_migration:
            if not status_error:
                raise error.TestFail("Migration test failed.")
        if not ret_jobabort:
            if not status_error:
                raise error.TestFail("Abort migration failed.")
Пример #3
0
 def setup_ssh(self):
     if self.password:
         try:
             self.ssh_ping()
         except error.AutoservSshPingHostError:
             ssh_key.setup_ssh_key(self.hostname, self.user, self.password,
                                   self.port)
Пример #4
0
 def setup_ssh(self):
     if self.password:
         try:
             self.ssh_ping()
         except error.AutoservSshPingHostError:
             ssh_key.setup_ssh_key(self.hostname, self.user, self.password,
                                   self.port)
Пример #5
0
    def env_setup(ip):
        logging.debug("Setup env for %s" % ip)
        ssh_key.setup_ssh_key(hostname=ip, user=username, port=shell_port, password=password)
        ssh_cmd(ip, "service iptables stop")
        ssh_cmd(ip, "echo 1 > /proc/sys/net/ipv4/conf/all/arp_ignore")

        netperf_dir = os.path.join(os.environ["AUTODIR"], "tests/netperf2")
        for i in params.get("netperf_files").split():
            remote.scp_to_remote(ip, shell_port, username, password, "%s/%s" % (netperf_dir, i), "/tmp/")
        ssh_cmd(ip, params.get("setup_cmd"))
Пример #6
0
    def env_setup(ip):
        logging.debug("Setup env for %s" % ip)
        ssh_key.setup_ssh_key(hostname=ip, user=username, port=shell_port,
                              password=password)
        ssh_cmd(ip, "service iptables stop")
        ssh_cmd(ip, "echo 1 > /proc/sys/net/ipv4/conf/all/arp_ignore")

        netperf_dir = os.path.join(os.environ['AUTODIR'], "tests/netperf2")
        for i in params.get("netperf_files").split():
            remote.scp_to_remote(ip, shell_port, username, password,
                                      "%s/%s" % (netperf_dir, i), "/tmp/")
        ssh_cmd(ip, params.get("setup_cmd"))
Пример #7
0
def run(test, params, env):
    """
    Test convert vm to ovirt
    """
    args_dict = get_args_dict(params)
    hypervisor = args_dict.get('hypervisor')
    xen_ip = args_dict.get('xen_ip')
    xen_pwd = args_dict.get('xen_pwd')
    remote_node_user = args_dict.get('remote_node_user', 'root')
    vpx_pwd = args_dict.get('vpx_pwd')
    vpx_pwd_file = args_dict.get('vpx_pwd_file')
    address_cache = env.get('address_cache')
    if hypervisor == 'xen':
        # Set up ssh access using ssh-agent and authorized_keys
        ssh_key.setup_ssh_key(xen_ip,
                              user=remote_node_user,
                              port=22,
                              password=xen_pwd)
        try:
            utils_misc.add_identities_into_ssh_agent()
        except:
            utils.run("ssh-agent -k")
            raise error.TestError("Failed to start 'ssh-agent'")

    if hypervisor == 'esx':
        fp = open(vpx_pwd_file, 'w')
        fp.write(vpx_pwd)
        fp.close()

    try:
        # Set libguestfs environment variable
        os.environ['LIBGUESTFS_BACKEND'] = 'direct'

        # Run virt-v2v command
        ret = utils_v2v.v2v_cmd(args_dict)
        logging.debug("virt-v2v verbose messages:\n%s", ret)
        if ret.exit_status != 0:
            raise error.TestFail("Convert VM failed")

        # Import the VM to oVirt Data Center from export domain
        if not utils_v2v.import_to_ovirt(params, address_cache):
            raise error.TestFail("Import VM failed")
    finally:
        if hypervisor == "xen":
            utils.run("ssh-agent -k")
        if hypervisor == "esx":
            utils.run("rm -rf %s" % vpx_pwd_file)
Пример #8
0
def run(test, params, env):
    """
    Test convert vm to ovirt
    """
    args_dict = get_args_dict(params)
    hypervisor = args_dict.get('hypervisor')
    xen_ip = args_dict.get('xen_ip')
    xen_pwd = args_dict.get('xen_pwd')
    remote_node_user = args_dict.get('remote_node_user', 'root')
    vpx_pwd = args_dict.get('vpx_pwd')
    vpx_pwd_file = args_dict.get('vpx_pwd_file')
    address_cache = env.get('address_cache')
    if hypervisor == 'xen':
        # Set up ssh access using ssh-agent and authorized_keys
        ssh_key.setup_ssh_key(xen_ip, user=remote_node_user,
                              port=22, password=xen_pwd)
        try:
            utils_misc.add_identities_into_ssh_agent()
        except:
            utils.run("ssh-agent -k")
            raise error.TestError("Failed to start 'ssh-agent'")

    if hypervisor == 'esx':
        fp = open(vpx_pwd_file, 'w')
        fp.write(vpx_pwd)
        fp.close()

    try:
        # Set libguestfs environment variable
        os.environ['LIBGUESTFS_BACKEND'] = 'direct'

        # Run virt-v2v command
        ret = utils_v2v.v2v_cmd(args_dict)
        logging.debug("virt-v2v verbose messages:\n%s", ret)
        if ret.exit_status != 0:
            raise error.TestFail("Convert VM failed")

        # Import the VM to oVirt Data Center from export domain
        if not utils_v2v.import_to_ovirt(params, address_cache):
            raise error.TestFail("Import VM failed")
    finally:
        if hypervisor == "xen":
            utils.run("ssh-agent -k")
        if hypervisor == "esx":
            utils.run("rm -rf %s" % vpx_pwd_file)
Пример #9
0
def v2v_cmd(params):
    """
    Append cmd to 'virt-v2v' and execute, optionally return full results.

    :param params: A dictionary includes all of required parameters such as
                    'target', 'hypervisor' and 'hostname', etc.
    :return: stdout of command
    """
    if V2V_EXEC is None:
        raise ValueError('Missing command: virt-v2v')

    target = params.get('target')
    hypervisor = params.get('hypervisor')
    hostname = params.get('hostname')
    username = params.get('username')
    password = params.get('password')

    uri_obj = Uri(hypervisor)
    # Return actual 'uri' according to 'hostname' and 'hypervisor'
    uri = uri_obj.get_uri(hostname)

    tgt_obj = Target(target, uri)
    # Return virt-v2v command line options based on 'target' and 'hypervisor'
    options = tgt_obj.get_cmd_options(params)

    # Convert a existing VM without or with connection authorization.
    if hypervisor == 'esx':
        build_esx_no_verify(params)
    elif hypervisor == 'xen' or hypervisor == 'kvm':
        # Setup ssh key for build connection without password.
        ssh_key.setup_ssh_key(hostname,
                              user=username,
                              port=22,
                              password=password)
    else:
        pass

    # Construct a final virt-v2v command
    cmd = '%s %s' % (V2V_EXEC, options)
    logging.debug('%s' % cmd)
    cmd_result = utils.run(cmd, verbose=DEBUG)
    return cmd_result
Пример #10
0
def v2v_cmd(params):
    """
    Append cmd to 'virt-v2v' and execute, optionally return full results.

    @param: params: A dictionary includes all of required parameters such as
                    'target', 'hypervisor' and 'hostname', etc.
    @return: stdout of command
    """
    if V2V_EXEC is None:
        raise ValueError('Missing command: virt-v2v')

    target = params.get('target')
    hypervisor = params.get('hypervisor')
    hostname = params.get('hostname')
    username = params.get('username')
    password = params.get('password')

    uri_obj = v2v_utils.Uri(hypervisor)
    # Return actual 'uri' according to 'hostname' and 'hypervisor'
    uri = uri_obj.get_uri(hostname)

    tgt_obj = v2v_utils.Target(target, uri)
    # Return virt-v2v command line options based on 'target' and 'hypervisor'
    options = tgt_obj.get_cmd_options(params)

    # Convert a existing VM without or with connection authorization.
    if hypervisor == 'esx':
        v2v_utils.build_esx_no_verify(params)
    elif hypervisor == 'xen' or hypervisor == 'kvm':
        # Setup ssh key for build connection without password.
        ssh_key.setup_ssh_key(hostname, user=username, port=22,
                              password=password)
    else:
        pass

    # Construct a final virt-v2v command
    cmd = '%s %s' % (V2V_EXEC, options)
    logging.debug('%s' % cmd)
    cmd_result = utils.run(cmd, verbose=DEBUG)
    return cmd_result
Пример #11
0
def mount_guestfs_with_sshfs(vms):
    """
    Mount the guest filesystem with sshfs.
    """
    guestmount_path = os.path.join(data_dir.get_tmp_dir(), "guestmount")
    if not (os.path.isdir(guestmount_path)):
        os.makedirs(guestmount_path)
    sshfs_cmd = "sshfs -o allow_other,direct_io "
    for vm in vms:
        specific_path = os.path.join(guestmount_path, str(vm.get_pid()))
        if not os.path.isdir(specific_path):
            os.makedirs(specific_path)
        ssh_key.setup_ssh_key(hostname=vm.get_address(),
                              user=vm.params.get("username", ""),
                              password=vm.params.get("password", ""),
                              port=22)
        cmd = "%s %s:/ %s" % (sshfs_cmd, vm.get_address(), specific_path)
        result = utils.run(cmd, ignore_status=True)
        if result.exit_status:
            raise error.TestFail("Failed to use sshfs for guestmount.\n"
                                 "Detail:%s." % result)
    return guestmount_path
Пример #12
0
def mount_guestfs_with_sshfs(vms):
    """
    Mount the guest filesystem with sshfs.
    """
    guestmount_path = os.path.join(data_dir.get_tmp_dir(), "guestmount")
    if not (os.path.isdir(guestmount_path)):
        os.makedirs(guestmount_path)
    sshfs_cmd = "sshfs -o allow_other,direct_io "
    for vm in vms:
        specific_path = os.path.join(guestmount_path, str(vm.get_pid()))
        if not os.path.isdir(specific_path):
            os.makedirs(specific_path)
        ssh_key.setup_ssh_key(hostname=vm.get_address(),
                              user=vm.params.get("username", ""),
                              password=vm.params.get("password", ""),
                              port=22)
        cmd = "%s %s:/ %s" % (sshfs_cmd, vm.get_address(), specific_path)
        result = utils.run(cmd, ignore_status=True)
        if result.exit_status:
            raise error.TestFail("Failed to use sshfs for guestmount.\n"
                                 "Detail:%s." % result)
    return guestmount_path
Пример #13
0
def run_convert_remote_vm(test, params, env):
    """
    Convert a remote vm to local libvirt(KVM).
    """
    # VM info
    vm_name = params.get("v2v_vm")

    # Remote host parameters
    remote_hostname = params.get("remote_hostname")
    username = params.get("username", "root")
    password = params.get("password")
    remote_hypervisor = params.get("remote_hypervisor")

    # Local pool parameters
    pool_type = params.get("pool_type", "dir")
    pool_name = params.get("pool_name", "v2v_test")
    target_path = params.get("target_path", "pool_path")
    block_device = params.get("block_device")
    vg_name = params.get("volume_group_name", "vg_v2v")
    # If target_path is not an abs path, join it to test.tmpdir
    if os.path.dirname(target_path) is "":
        target_path = os.path.join(test.tmpdir, target_path)

    # dir pool need an exist path
    if pool_type == "dir":
        if not os.path.exists(target_path):
            os.mkdir(target_path)

    # V2V parameters
    input = params.get("input_method")
    files = params.get("config_files")
    network = params.get("network", "default")

    # Result check about
    ignore_virtio = "yes" == params.get("ignore_virtio", "no")

    # Create remote uri for remote host
    # Remote virt-v2v uri's instance
    ruri = utils_v2v.Uri(remote_hypervisor)
    remote_uri = ruri.get_uri(remote_hostname)

    ssh_key.setup_ssh_key(remote_hostname, user=username, port=22,
                          password=password)

    # Check remote vms
    remote_vm = libvirt_vm.VM(vm_name, params, test.bindir,
                              env.get("address_cache"))
    remote_vm.connect_uri = remote_uri
    if not remote_vm.exists():
        raise error.TestFail("Couldn't find vm '%s' to be converted "
                             "on remote uri '%s'." % (vm_name, remote_uri))

    # Remote storage pool's instance
    rsp = libvirt_storage.StoragePool(remote_uri)
    # Put remote vm's disk into a directory storage pool
    prepare_remote_sp(rsp, remote_vm, pool_name)

    # Local storage pool's instance
    lsp = libvirt_storage.StoragePool()
    try:
        # Create storage pool for test
        if pool_type == "dir":
            if not create_dir_pool(lsp, pool_name, target_path):
                raise error.TestFail("Prepare directory storage pool for "
                                     "virt-v2v failed.")
        elif pool_type == "partition":
            if not create_partition_pool(lsp, pool_name, block_device,
                                         target_path):
                raise error.TestFail("Prepare partition storage pool for "
                                     "virt-v2v failed.")
        elif pool_type == "lvm":
            if not create_lvm_pool(lsp, pool_name, block_device, vg_name,
                                   target_path):
                raise error.TestFail("Preapre lvm storage pool for "
                                     "virt-v2v failed.")

        # Maintain a single params for v2v to avoid duplicate parameters
        v2v_params = {"hostname": remote_hostname, "username": username,
                      "password": password, "hypervisor": remote_hypervisor,
                      "storage": pool_name, "network": network,
                      "target": "libvirt", "vms": vm_name,
                      "input": input, "files": files}
        try:
            result = utils_v2v.v2v_cmd(v2v_params)
        except error.CmdError:
            raise error.TestFail("Virt v2v failed.")

        # v2v may be successful, but devices' driver may be not virtio
        error_info = []
        # Check v2v vm on local host
        # Update parameters for local hypervisor and vm
        params['vms'] = vm_name
        params['target'] = "libvirt"
        vm_check = utils_v2v.LinuxVMCheck(test, params, env)
        if not vm_check.is_disk_virtio():
            error_info.append("Error:disk type was not converted to virtio.")
        if not vm_check.is_net_virtio():
            error_info.append("Error:nic type was not converted to virtio.")

        # Close vm for cleanup
        if vm_check.vm is not None and vm_check.vm.is_alive():
            vm_check.vm.destroy()

        if not ignore_virtio and len(error_info):
            raise error.TestFail(error_info)
    finally:
        cleanup_vm(vm_name)
        lsp.delete_pool(pool_name)
        rsp.delete_pool(pool_name)
Пример #14
0
def run(test, params, env):
    """
    Test command: migrate-compcache <domain> [--size <number>]

    1) Run migrate-compcache command and check return code.
    """
    vm_ref = params.get("vm_ref", "name")
    vm_name = params.get('main_vm')
    start_vm = 'yes' == params.get('start_vm', 'yes')
    pause_vm = 'yes' == params.get('pause_after_start_vm', 'no')
    expect_succeed = 'yes' == params.get('expect_succeed', 'yes')
    size_option = params.get('size_option', 'valid')
    action = params.get('compcache_action', 'get')
    vm = env.get_vm(vm_name)

    # Check if the virsh command migrate-compcache is available
    if not virsh.has_help_command('migrate-compcache'):
        raise error.TestNAError("This version of libvirt does not support "
                                "virsh command migrate-compcache")

    # Prepare the VM state if it's not correct.
    if start_vm and not vm.is_alive():
        vm.start()
    elif not start_vm and vm.is_alive():
        vm.destroy()
    if pause_vm and not vm.is_paused():
        vm.pause()

    # Setup domain reference
    if vm_ref == 'domname':
        vm_ref = vm_name

    # Setup size according to size_option:
    # minimal: Same as memory page size
    # maximal: Same as guest memory
    # empty: An empty string
    # small: One byte less than page size
    # large: Larger than guest memory
    # huge : Largest int64
    page_size = get_page_size()
    if size_option == 'minimal':
        size = str(page_size)
    elif size_option == 'maximal':
        size = str(vm.get_max_mem() * 1024)
    elif size_option == 'empty':
        size = '""'
    elif size_option == 'small':
        size = str(page_size - 1)
    elif size_option == 'large':
        # Guest memory is larger than the max mem set,
        # add 50MB to ensure size exceeds guest memory.
        size = str(vm.get_max_mem() * 1024 + 50000000)
    elif size_option == 'huge':
        size = str(2**64 - 1)
    else:
        size = size_option

    # If we need to get, just omit the size option
    if action == 'get':
        size = None

    # Run testing command
    result = virsh.migrate_compcache(vm_ref, size=size)
    logging.debug(result)

    remote_uri = params.get("jobabort_remote_uri")
    remote_host = params.get("migrate_dest_host")
    remote_user = params.get("migrate_dest_user", "root")
    remote_pwd = params.get("migrate_dest_pwd")
    check_job_compcache = False
    if not remote_host.count(
            "EXAMPLE") and size is not None and expect_succeed:
        # Config ssh autologin for remote host
        ssh_key.setup_ssh_key(remote_host, remote_user, remote_pwd, port=22)
        if vm.is_dead():
            vm.start()
        if vm.is_paused():
            vm.resume()
        vm.wait_for_login()
        # Do actual migration to verify compression cache of migrate jobs
        command = "virsh migrate %s %s --compressed" % (vm_name, remote_uri)
        p = subprocess.Popen(command,
                             shell=True,
                             stdout=subprocess.PIPE,
                             stderr=subprocess.PIPE)

        # Give enough time for starting job
        t = 0
        while t < 5:
            jobtype = vm.get_job_type()
            if "None" == jobtype:
                t += 1
                time.sleep(1)
                continue
            elif jobtype is False:
                logging.error("Get job type failed.")
                break
            else:
                logging.debug("Job started: %s", jobtype)
                break

        jobinfo = virsh.domjobinfo(vm_ref, debug=True,
                                   ignore_status=True).stdout
        check_job_compcache = True
        if p.poll():
            try:
                p.kill()
            except OSError:
                pass

        # Cleanup in case of successful migration
        utlv.MigrationTest().cleanup_dest_vm(vm, None, remote_uri)

    # Shut down the VM to make sure the compcache setting cleared
    if vm.is_alive():
        vm.destroy()

    # Check test result
    if expect_succeed:
        if result.exit_status != 0:
            raise error.TestFail(
                'Expected succeed, but failed with result:\n%s' % result)
        if check_job_compcache:
            for line in jobinfo.splitlines():
                detail = line.split(":")
                if detail[0].count("Compression cache"):
                    value = detail[-1].split()[0].strip()
                    value = int(float(value))
                    unit = detail[-1].split()[-1].strip()
                    if unit == "KiB":
                        size = int(int(size) / 1024)
                    elif unit == "MiB":
                        size = int(int(size) / 1048576)
                    elif unit == "GiB":
                        size = int(int(size) / 1073741824)
                    if value != size:
                        raise error.TestFail("Compression cache is not match"
                                             " with setted")
                    else:
                        return
            raise error.TestFail("Get compression cahce in job failed.")
    elif not expect_succeed:
        if result.exit_status == 0:
            raise error.TestFail(
                'Expected fail, but succeed with result:\n%s' % result)
Пример #15
0
def run(test, params, env):
    """
    Convert a remote vm to local libvirt(KVM).
    """
    # VM info
    xen_vm_name = params.get("v2v_xen_vm")
    vmware_vm_name = params.get("v2v_vmware_vm")

    # Remote host parameters
    xen_ip = params.get("remote_xen_ip", "XEN.EXAMPLE")
    vmware_ip = params.get("remote_vmware_ip", "VMWARE.EXAMPLE")
    username = params.get("username", "root")
    xen_pwd = params.get("remote_xen_pwd", "PWD.EXAMPLE")
    vmware_pwd = params.get("remote_vmware_pwd", "PWD.EXAMPLE")
    # To decide which type test it is
    remote_hypervisor = params.get("remote_hypervisor")

    # Local pool parameters
    pool_type = params.get("pool_type", "dir")
    pool_name = params.get("pool_name", "v2v_test")
    target_path = params.get("target_path", "pool_path")
    emulated_img = params.get("emulated_image_path", "v2v_emulated.img")
    emulated_size = params.get("emulated_image_size", "10G")

    # If target_path is not an abs path, join it to data_dir.tmpdir
    if os.path.dirname(target_path) is "":
        target_path = os.path.join(data_dir.get_tmp_dir(), target_path)

    # V2V parameters
    input = params.get("input_method")
    files = params.get("config_files")
    network = params.get("network")
    bridge = params.get("bridge")

    # Result check about
    ignore_virtio = "yes" == params.get("ignore_virtio", "no")

    # Create autologin to remote host
    esx_netrc = params.get("esx_netrc") % (vmware_ip, username, vmware_pwd)
    params['netrc'] = esx_netrc
    if remote_hypervisor == "esx":
        remote_ip = vmware_ip
        remote_pwd = vmware_pwd
        vm_name = vmware_vm_name
        if remote_ip.count("EXAMPLE") or remote_pwd.count("EXAMPLE"):
            raise error.TestNAError("Please provide host or password for "
                                    "vmware test.")
        utils_v2v.build_esx_no_verify(params)
    else:
        remote_ip = xen_ip
        remote_pwd = xen_pwd
        vm_name = xen_vm_name
        if remote_ip.count("EXAMPLE") or remote_pwd.count("EXAMPLE"):
            raise error.TestNAError("Please provide host or password for "
                                    "xen test.")
        ssh_key.setup_ssh_key(xen_ip, user=username, port=22, password=xen_pwd)

    # Create remote uri for remote host
    # Remote virt-v2v uri's instance
    ruri = utils_v2v.Uri(remote_hypervisor)
    remote_uri = ruri.get_uri(remote_ip)

    # Check remote vms
    rvirsh_dargs = {
        'uri': remote_uri,
        'remote_ip': remote_ip,
        'remote_user': username,
        'remote_pwd': remote_pwd
    }
    rvirsh = virsh.VirshPersistent(**rvirsh_dargs)
    if not rvirsh.domain_exists(vm_name):
        rvirsh.close_session()
        raise error.TestFail("Couldn't find vm '%s' to be converted "
                             "on remote uri '%s'." % (vm_name, remote_uri))

    if remote_hypervisor != "esx":
        remote_vm = libvirt_vm.VM(vm_name, params, test.bindir,
                                  env.get("address_cache"))
        remote_vm.connect_uri = remote_uri
        # Remote storage pool's instance
        rsp = libvirt_storage.StoragePool(rvirsh)
        # Put remote vm's disk into a directory storage pool
        prepare_remote_sp(rsp, remote_vm, pool_name)

    # Prepare local libvirt storage pool
    pvt = utlv.PoolVolumeTest(test, params)

    # Local storage pool's instance
    lsp = libvirt_storage.StoragePool()
    try:
        # Create storage pool for test
        pvt.pre_pool(pool_name, pool_type, target_path, emulated_img,
                     emulated_size)
        logging.debug(lsp.pool_info(pool_name))

        # Maintain a single params for v2v to avoid duplicate parameters
        v2v_params = {
            "hostname": remote_ip,
            "username": username,
            "password": remote_pwd,
            "hypervisor": remote_hypervisor,
            "storage": pool_name,
            "network": network,
            "bridge": bridge,
            "target": "libvirt",
            "vms": vm_name,
            "netrc": esx_netrc,
            "input": input,
            "files": files
        }
        try:
            result = utils_v2v.v2v_cmd(v2v_params)
            logging.debug(result)
        except error.CmdError, detail:
            raise error.TestFail("Virt v2v failed:\n%s" % str(detail))

        # v2v may be successful, but devices' driver may be not virtio
        error_info = []
        # Check v2v vm on local host
        # Update parameters for local hypervisor and vm
        logging.debug("XML info:\n%s", virsh.dumpxml(vm_name))
        params['vms'] = vm_name
        params['target'] = "libvirt"
        vm_check = utils_v2v.LinuxVMCheck(test, params, env)
        try:
            if not vm_check.is_disk_virtio():
                error_info.append("Error:disk type was not converted to "
                                  "virtio.")
            if not vm_check.is_net_virtio():
                error_info.append("Error:nic type was not converted to "
                                  "virtio.")
        except (remote.LoginError, virt_vm.VMError), detail:
            error_info.append(str(detail))
    # Connection to remote, init here for cleanup
    runner = None
    # Identify easy config. mistakes early
    warning_text = ("Migration VM %s URI %s appears problematic "
                    "this may lead to migration problems. "
                    "Consider specifying vm.connect_uri using "
                    "fully-qualified network-based style.")

    if srcuri.count('///') or srcuri.count('EXAMPLE'):
        raise error.TestNAError(warning_text % ('source', srcuri))

    if dsturi.count('///') or dsturi.count('EXAMPLE'):
        raise error.TestNAError(warning_text % ('destination', dsturi))

    # Config auto-login to remote host for migration
    ssh_key.setup_ssh_key(remote_ip, username, host_pwd)

    sys_image = vm.get_first_disk_devices()
    sys_image_source = sys_image["source"]
    sys_image_info = utils_misc.get_image_info(sys_image_source)
    logging.debug("System image information:\n%s", sys_image_info)
    sys_image_fmt = sys_image_info["format"]
    created_img_path = os.path.join(os.path.dirname(sys_image_source),
                                    "vsmimages")

    migrate_in_advance = "yes" == params.get("migrate_in_advance", "no")

    status_error = "yes" == params.get("status_error", "no")
    if source_type == "file" and device_type == "lun":
        status_error = True
def run(test, params, env):
    """
    Test command: migrate-compcache <domain> [--size <number>]

    1) Run migrate-compcache command and check return code.
    """
    vm_ref = params.get("vm_ref", "name")
    vm_name = params.get("migrate_main_vm")
    start_vm = "yes" == params.get("start_vm", "yes")
    pause_vm = "yes" == params.get("pause_after_start_vm", "no")
    expect_succeed = "yes" == params.get("expect_succeed", "yes")
    size_option = params.get("size_option", "valid")
    action = params.get("compcache_action", "get")
    vm = env.get_vm(vm_name)

    # Check if the virsh command migrate-compcache is available
    if not virsh.has_help_command("migrate-compcache"):
        raise error.TestNAError("This version of libvirt does not support " "virsh command migrate-compcache")

    # Prepare the VM state if it's not correct.
    if start_vm and not vm.is_alive():
        vm.start()
    elif not start_vm and vm.is_alive():
        vm.destroy()
    if pause_vm and not vm.is_paused():
        vm.pause()

    # Setup domain reference
    if vm_ref == "domname":
        vm_ref = vm_name

    # Setup size according to size_option:
    # minimal: Same as memory page size
    # maximal: Same as guest memory
    # empty: An empty string
    # small: One byte less than page size
    # large: Larger than guest memory
    # huge : Largest int64
    page_size = get_page_size()
    if size_option == "minimal":
        size = str(page_size)
    elif size_option == "maximal":
        size = str(vm.get_max_mem() * 1024)
    elif size_option == "empty":
        size = '""'
    elif size_option == "small":
        size = str(page_size - 1)
    elif size_option == "large":
        # Guest memory is larger than the max mem set,
        # add 50MB to ensure size exceeds guest memory.
        size = str(vm.get_max_mem() * 1024 + 50000000)
    elif size_option == "huge":
        size = str(2 ** 64 - 1)
    else:
        size = size_option

    # If we need to get, just omit the size option
    if action == "get":
        size = None

    # Run testing command
    result = virsh.migrate_compcache(vm_ref, size=size)
    logging.debug(result)

    remote_uri = params.get("compcache_remote_uri")
    remote_host = params.get("migrate_dest_host")
    remote_user = params.get("migrate_dest_user", "root")
    remote_pwd = params.get("migrate_dest_pwd")
    check_job_compcache = False
    compressed_size = None
    if not remote_host.count("EXAMPLE") and size is not None and expect_succeed:
        # Config ssh autologin for remote host
        ssh_key.setup_ssh_key(remote_host, remote_user, remote_pwd, port=22)
        if vm.is_dead():
            vm.start()
        if vm.is_paused():
            vm.resume()
        vm.wait_for_login()
        # Do actual migration to verify compression cache of migrate jobs
        command = "virsh migrate %s %s --compressed --unsafe --verbose" % (vm_name, remote_uri)
        logging.debug("Start migrating: %s", command)
        p = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)

        # Give enough time for starting job
        t = 0
        while t < 5:
            jobinfo = virsh.domjobinfo(vm_ref, debug=True, ignore_status=True).stdout
            jobtype = "None"
            for line in jobinfo.splitlines():
                key = line.split(":")[0]
                if key.count("type"):
                    jobtype = line.split(":")[-1].strip()
                elif key.strip() == "Compression cache":
                    compressed_size = line.split(":")[-1].strip()
            if "None" == jobtype or compressed_size is None:
                t += 1
                time.sleep(1)
                continue
            else:
                check_job_compcache = True
                logging.debug("Job started: %s", jobtype)
                break

        if p.poll():
            try:
                p.kill()
            except OSError:
                pass

        # Cleanup in case of successful migration
        utlv.MigrationTest().cleanup_dest_vm(vm, None, remote_uri)

    # Shut down the VM to make sure the compcache setting cleared
    if vm.is_alive():
        vm.destroy()

    # Check test result
    if expect_succeed:
        if result.exit_status != 0:
            raise error.TestFail("Expected succeed, but failed with result:\n%s" % result)
        if check_job_compcache:
            value = compressed_size.split()[0].strip()
            unit = compressed_size.split()[-1].strip()
            value = int(float(value))
            if unit == "KiB":
                size = int(int(size) / 1024)
            elif unit == "MiB":
                size = int(int(size) / 1048576)
            elif unit == "GiB":
                size = int(int(size) / 1073741824)
            if value != size:
                raise error.TestFail("Compression cache is not match" " with setted")
            else:
                return
            raise error.TestFail("Get compression cache in job failed.")
        else:
            logging.warn("The compressed size wasn't been verified " "during migration.")
    elif not expect_succeed:
        if result.exit_status == 0:
            raise error.TestFail("Expected fail, but succeed with result:\n%s" % result)
Пример #18
0
def run(test, params, env):
    """
    Test various options of virt-v2v.
    """
    if utils_v2v.V2V_EXEC is None:
        raise ValueError('Missing command: virt-v2v')
    vm_name = params.get("main_vm", "EXAMPLE")
    new_vm_name = params.get("new_vm_name")
    input_mode = params.get("input_mode")
    v2v_options = params.get("v2v_options", "EXAMPLE")
    hypervisor = params.get("hypervisor", "kvm")
    remote_host = params.get("remote_host", "EXAMPLE")
    vpx_dc = params.get("vpx_dc", "EXAMPLE")
    esx_ip = params.get("esx_ip", "EXAMPLE")
    vpx_passwd = params.get("vpx_passwd", "EXAMPLE")
    ovirt_engine_url = params.get("ovirt_engine_url", "EXAMPLE")
    ovirt_engine_user = params.get("ovirt_engine_user", "EXAMPLE")
    ovirt_engine_passwd = params.get("ovirt_engine_password", "EXAMPLE")
    output_mode = params.get("output_mode")
    output_storage = params.get("output_storage", "default")
    export_name = params.get("export_name", "EXAMPLE")
    storage_name = params.get("storage_name", "EXAMPLE")
    disk_img = params.get("input_disk_image")
    nfs_storage = params.get("nfs_storage")
    mnt_point = params.get("mount_point")
    export_domain_uuid = params.get("export_domain_uuid", "")
    fake_domain_uuid = params.get("fake_domain_uuid")
    vdsm_image_uuid = params.get("vdsm_image_uuid")
    vdsm_vol_uuid = params.get("vdsm_vol_uuid")
    vdsm_vm_uuid = params.get("vdsm_vm_uuid")
    vdsm_ovf_output = params.get("vdsm_ovf_output")
    v2v_user = params.get("unprivileged_user", "")
    v2v_timeout = int(params.get("v2v_timeout", 1200))
    status_error = "yes" == params.get("status_error", "no")
    address_cache = env.get('address_cache')
    for param in [vm_name, remote_host, esx_ip, vpx_dc, ovirt_engine_url,
                  ovirt_engine_user, ovirt_engine_passwd, output_storage,
                  export_name, storage_name, disk_img, export_domain_uuid,
                  v2v_user]:
        if "EXAMPLE" in param:
            raise error.TestNAError("Please replace %s with real value" % param)

    su_cmd = "su - %s -c " % v2v_user
    output_uri = params.get("oc_uri", "")
    pool_name = params.get("pool_name", "v2v_test")
    pool_type = params.get("pool_type", "dir")
    pool_target = params.get("pool_target_path", "v2v_pool")
    emulated_img = params.get("emulated_image_path", "v2v-emulated-img")
    pvt = utlv.PoolVolumeTest(test, params)
    global vm_imported
    vm_imported = False
    new_v2v_user = False
    restore_image_owner = False

    def create_pool():
        """
        Create libvirt pool as the output storage
        """
        if output_uri == "qemu:///session":
            target_path = os.path.join("/home", v2v_user, pool_target)
            cmd = su_cmd + "'mkdir %s'" % target_path
            utils.system(cmd, verbose=True)
            cmd = su_cmd + "'virsh pool-create-as %s dir" % pool_name
            cmd += " --target %s'" % target_path
            utils.system(cmd, verbose=True)
        else:
            pvt.pre_pool(pool_name, pool_type, pool_target, emulated_img)

    def cleanup_pool():
        """
        Clean up libvirt pool
        """
        if output_uri == "qemu:///session":
            cmd = su_cmd + "'virsh pool-destroy %s'" % pool_name
            utils.system(cmd, verbose=True)
            target_path = os.path.join("/home", v2v_user, pool_target)
            cmd = su_cmd + "'rm -rf %s'" % target_path
            utils.system(cmd, verbose=True)
        else:
            pvt.cleanup_pool(pool_name, pool_type, pool_target, emulated_img)

    def get_all_uuids(output):
        """
        Get export domain uuid, image uuid and vol uuid from command output.
        """
        tmp_target = re.findall(r"qemu-img\sconvert\s.+\s'(\S+)'\n", output)
        if len(tmp_target) != 1:
            raise error.TestError("Fail to find tmp target file name when"
                                  " converting vm disk image")
        targets = tmp_target[0].split('/')
        return (targets[3], targets[5], targets[6])

    def get_ovf_content(output):
        """
        Find and read ovf file.
        """
        export_domain_uuid, _, vol_uuid = get_all_uuids(output)
        export_vm_dir = os.path.join(mnt_point, export_domain_uuid,
                                     'master/vms')
        ovf_content = ""
        if os.path.isdir(export_vm_dir):
            ovf_id = "ovf:id='%s'" % vol_uuid
            ret = utils.system_output("grep -R \"%s\" %s" % (ovf_id,
                                                             export_vm_dir))
            ovf_file = ret.split(":")[0]
            if os.path.isfile(ovf_file):
                ovf_f = open(ovf_file, "r")
                ovf_content = ovf_f.read()
                ovf_f.close()
        else:
            logging.error("Can't find ovf file to read")
        return ovf_content

    def get_img_path(output):
        """
        Get the full path of the converted image.
        """
        img_path = ""
        img_name = vm_name + "-sda"
        if output_mode == "libvirt":
            img_path = virsh.vol_path(img_name, output_storage).stdout.strip()
        elif output_mode == "local":
            img_path = os.path.join(output_storage, img_name)
        elif output_mode in ["rhev", "vdsm"]:
            export_domain_uuid, image_uuid, vol_uuid = get_all_uuids(output)
            img_path = os.path.join(mnt_point, export_domain_uuid, 'images',
                                    image_uuid, vol_uuid)
        return img_path

    def check_vmtype(ovf, expected_vmtype):
        """
        Verify vmtype in ovf file.
        """
        if output_mode != "rhev":
            return
        if expected_vmtype == "server":
            vmtype_int = 1
        elif expected_vmtype == "desktop":
            vmtype_int = 0
        else:
            return
        if "<VmType>%s</VmType>" % vmtype_int in ovf:
            logging.info("Find VmType=%s in ovf file",
                         expected_vmtype)
        else:
            raise error.TestFail("VmType check failed")

    def check_image(output, check_point, expected_value):
        """
        Verify converted image file allocation mode and format
        """
        img_path = get_img_path(output)
        if not img_path or not os.path.isfile(img_path):
            logging.error("Fail to get image path: %s", img_path)
            return
        img_info = utils_misc.get_image_info(img_path)
        logging.info("Image info after converted: %s", img_info)
        if check_point == "allocation":
            if expected_value == "sparse":
                if img_info['vsize'] > img_info['dsize']:
                    logging.info("Image file is sparse")
                else:
                    raise error.TestFail("Image allocation check fail")
            elif expected_value == "preallocated":
                if img_info['vsize'] <= img_info['dsize']:
                    logging.info("Image file is preallocated")
                else:
                    raise error.TestFail("Image allocation check fail")
        if check_point == "format":
            if expected_value == img_info['format']:
                logging.info("Image file format is %s", expected_value)
            else:
                raise error.TestFail("Image format check fail")

    def check_new_name(output, expected_name):
        """
        Verify guest name changed to the new name.
        """
        found = False
        if output_mode == "libvirt":
            found = virsh.domain_exists(expected_name)
        if output_mode == "local":
            found = os.path.isfile(os.path.join(output_storage,
                                                expected_name + "-sda"))
        if output_mode in ["rhev", "vdsm"]:
            ovf = get_ovf_content(output)
            found = "<Name>%s</Name>" % expected_name in ovf
        else:
            return
        if found:
            logging.info("Guest name renamed when converting it")
        else:
            raise error.TestFail("Rename guest failed")

    def check_nocopy(output):
        """
        Verify no image created if convert command use --no-copy option
        """
        img_path = get_img_path(output)
        if not os.path.isfile(img_path):
            logging.info("No image created with --no-copy option")
        else:
            raise error.TestFail("Find %s" % img_path)

    def check_connection(output, expected_uri):
        """
        Check output connection uri used when converting guest
        """
        init_msg = "Initializing the target -o libvirt -oc %s" % expected_uri
        if init_msg in output:
            logging.info("Find message: %s", init_msg)
        else:
            raise error.TestFail("Not find message: %s" % init_msg)

    def check_result(cmd, result, status_error):
        """
        Check virt-v2v command result
        """
        utlv.check_exit_status(result, status_error)
        output = result.stdout + result.stderr
        if not status_error:
            if output_mode == "rhev":
                ovf = get_ovf_content(output)
                logging.debug("ovf content: %s", ovf)
                if '--vmtype' in cmd:
                    expected_vmtype = re.findall(r"--vmtype\s(\w+)", cmd)[0]
                    check_vmtype(ovf, expected_vmtype)
            if '-oa' in cmd and '--no-copy' not in cmd:
                expected_mode = re.findall(r"-oa\s(\w+)", cmd)[0]
                check_image(output, "allocation", expected_mode)
            if '-of' in cmd and '--no-copy' not in cmd:
                expected_format = re.findall(r"-of\s(\w+)", cmd)[0]
                check_image(output, "format", expected_format)
            if '-on' in cmd:
                expected_name = re.findall(r"-on\s(\w+)", cmd)[0]
                check_new_name(output, expected_name)
            if '--no-copy' in cmd:
                check_nocopy(output)
            if '-oc' in cmd:
                expected_uri = re.findall(r"-oc\s(\S+)", cmd)[0]
                check_connection(output, expected_uri)
            global vm_imported
            if output_mode == "rhev":
                if not utils_v2v.import_vm_to_ovirt(params, address_cache):
                    raise error.TestFail("Import VM failed")
                else:
                    vm_imported = True
            if output_mode == "libvirt":
                if "qemu:///session" not in v2v_options:
                    virsh.start(vm_name, debug=True, ignore_status=False)

    try:
        # Build input options
        input_option = ""
        if input_mode is None:
            pass
        elif input_mode == "libvirt":
            uri_obj = utils_v2v.Uri(hypervisor)
            ic_uri = uri_obj.get_uri(remote_host, vpx_dc, esx_ip)
            input_option = "-i %s -ic %s %s" % (input_mode, ic_uri, vm_name)
            # Build network/bridge option
            v2v_options += " -b %s -n %s" % (params.get("output_bridge"),
                                             params.get("output_network"))
        elif input_mode == "disk":
            input_option += "-i %s %s" % (input_mode, disk_img)
        elif input_mode in ['libvirtxml', 'ova']:
            raise error.TestNAError("Unsupported input mode: %s" % input_mode)
        else:
            raise error.TestError("Unknown input mode %s" % input_mode)

        # Build output options
        output_option = ""
        if output_mode:
            output_option = "-o %s -os %s" % (output_mode, output_storage)

        # Build vdsm related options
        vdsm_domain_dir, vdsm_image_dir, vdsm_vm_dir = ("", "", "")
        if output_mode in ['vdsm', 'rhev']:
            if not os.path.isdir(mnt_point):
                os.mkdir(mnt_point)
            if not utils_misc.mount(nfs_storage, mnt_point, "nfs"):
                raise error.TestError("Mount NFS Failed")
            if output_mode == 'vdsm':
                v2v_options += " --vdsm-image-uuid %s" % vdsm_image_uuid
                v2v_options += " --vdsm-vol-uuid %s" % vdsm_vol_uuid
                v2v_options += " --vdsm-vm-uuid %s" % vdsm_vm_uuid
                v2v_options += " --vdsm-ovf-output %s" % vdsm_ovf_output
                vdsm_domain_dir = os.path.join(mnt_point, fake_domain_uuid)
                vdsm_image_dir = os.path.join(mnt_point, export_domain_uuid,
                                              "images", vdsm_image_uuid)
                vdsm_vm_dir = os.path.join(mnt_point, export_domain_uuid,
                                           "master/vms", vdsm_vm_uuid)
                # For vdsm_domain_dir, just create a dir to test BZ#1176591
                os.mkdir(vdsm_domain_dir)
                os.mkdir(vdsm_image_dir)
                os.mkdir(vdsm_vm_dir)

        # Output more messages
        v2v_options += " -v -x"

        # Prepare for libvirt unprivileged user session connection
        if "qemu:///session" in v2v_options:
            try:
                pwd.getpwnam(v2v_user)
            except KeyError:
                # create new user
                utils.system("useradd %s" % v2v_user, ignore_status=True)
                new_v2v_user = True
            user_info = pwd.getpwnam(v2v_user)
            logging.info("Convert to qemu:///session by user '%s'", v2v_user)
            if input_mode == "disk":
                # Change the image owner and group
                ori_owner = os.stat(disk_img).st_uid
                ori_group = os.stat(disk_img).st_uid
                os.chown(disk_img, user_info.pw_uid, user_info.pw_gid)
                restore_image_owner = True
            else:
                raise error.TestNAError("Only support convert local disk")

        # Setup ssh-agent access to xen hypervisor
        if hypervisor == 'xen':
            os.environ['LIBGUESTFS_BACKEND'] = 'direct'
            user = params.get("xen_host_user", "root")
            passwd = params.get("xen_host_passwd", "redhat")
            logging.info("set up ssh-agent access ")
            ssh_key.setup_ssh_key(remote_host, user=user,
                                  port=22, password=passwd)
            utils_misc.add_identities_into_ssh_agent()
            # If the input format is not define, we need to either define
            # the original format in the source metadata(xml) or use '-of'
            # to force the output format, see BZ#1141723 for detail.
            if '-of' not in v2v_options:
                v2v_options += ' -of %s' % params.get("default_output_format",
                                                      "qcow2")

        # Create password file for access to ESX hypervisor
        if hypervisor == 'esx':
            vpx_passwd = params.get("vpx_passwd")
            vpx_passwd_file = os.path.join(test.tmpdir, "vpx_passwd")
            logging.info("Building ESX no password interactive verification.")
            pwd_f = open(vpx_passwd_file, 'w')
            pwd_f.write(vpx_passwd)
            pwd_f.close()
            output_option += " --password-file %s" % vpx_passwd_file

        # Create libvirt dir pool
        if output_mode == "libvirt":
            create_pool()

        # Running virt-v2v command
        cmd = "%s %s %s %s" % (utils_v2v.V2V_EXEC, input_option,
                               output_option, v2v_options)
        if v2v_user:
            cmd = su_cmd + "'%s'" % cmd
        cmd_result = utils.run(cmd, timeout=v2v_timeout, verbose=True,
                               ignore_status=True)
        if new_vm_name:
            vm_name = new_vm_name
            params['main_vm'] = new_vm_name
        check_result(cmd, cmd_result, status_error)
    finally:
        if hypervisor == "xen":
            utils.run("ssh-agent -k")
        if hypervisor == "esx":
            utils.run("rm -rf %s" % vpx_passwd_file)
        for vdsm_dir in [vdsm_domain_dir, vdsm_image_dir, vdsm_vm_dir]:
            if os.path.exists(vdsm_dir):
                shutil.rmtree(vdsm_dir)
        if os.path.exists(mnt_point):
            utils_misc.umount(nfs_storage, mnt_point, "nfs")
            os.rmdir(mnt_point)
        if output_mode == "local":
            image_name = vm_name + "-sda"
            img_file = os.path.join(output_storage, image_name)
            xml_file = img_file + ".xml"
            for local_file in [img_file, xml_file]:
                if os.path.exists(local_file):
                    os.remove(local_file)
        if output_mode == "libvirt":
            if "qemu:///session" in v2v_options:
                cmd = su_cmd + "'virsh undefine %s'" % vm_name
                utils.system(cmd)
            else:
                virsh.remove_domain(vm_name)
            cleanup_pool()
        if output_mode == "rhev" and vm_imported:
            vm_c = utils_v2v.VMCheck(test, params, env)
            vm_c.cleanup()
        if new_v2v_user:
            utils.system("userdel -f %s" % v2v_user)
        if restore_image_owner:
            os.chown(disk_img, ori_owner, ori_group)
Пример #19
0
def run(test, params, env):
    """
    Convert a local vm disk to local libvirt(KVM).
    """
    # VM info
    vm_name = params.get("v2v_vm")

    # Remote host parameters
    remote_hostname = params.get("remote_hostname", "XEN.EXAMPLE")
    username = params.get("remote_username", "root")
    password = params.get("remote_passwd", "XEN.PASSWORD")
    remote_hypervisor = params.get("remote_hypervisor")

    if remote_hostname.count("EXAMPLE") or password.count("EXAMPLE"):
        raise error.TestNAError("Please provide XEN host first.")

    # Local pool parameters
    pool_type = params.get("pool_type", "dir")
    pool_name = params.get("pool_name", "v2v_test")
    target_path = params.get("target_path", "pool_path")
    emulated_img = params.get("emulated_image_path", "v2v_emulated.img")
    emulated_size = params.get("emulated_image_size", "10G")
    local_tmp_path = params.get("local_tmp_path", data_dir.get_tmp_dir())

    # If target_path is not an abs path, join it to data_dir.TMPDIR
    if os.path.dirname(target_path) is "":
        target_path = os.path.join(data_dir.get_tmp_dir(), target_path)

    # V2V parameters
    input = params.get("input_method")
    files = params.get("config_files")
    network = params.get("network")
    bridge = params.get("bridge")

    # Result check about
    ignore_virtio = "yes" == params.get("ignore_virtio", "no")

    # Create remote uri for remote host
    # Remote virt-v2v uri's instance
    ruri = utils_v2v.Uri(remote_hypervisor)
    remote_uri = ruri.get_uri(remote_hostname)

    ssh_key.setup_ssh_key(remote_hostname, user=username, port=22,
                          password=password)

    # Check remote vms
    remote_vm = libvirt_vm.VM(vm_name, params, test.bindir,
                              env.get("address_cache"))
    remote_vm.connect_uri = remote_uri
    if not remote_vm.exists():
        raise error.TestFail("Couldn't find vm '%s' to be converted "
                             "on remote uri '%s'." % (vm_name, remote_uri))

    # Prepare local libvirt storage pool
    pvt = utlv.PoolVolumeTest(test, params)

    # Local storage pool's instance
    lsp = libvirt_storage.StoragePool()
    try:
        # Create storage pool for test
        pvt.pre_pool(pool_name, pool_type, target_path, emulated_img,
                     emulated_size)
        logging.debug(lsp.pool_info(pool_name))

        # Copy remote vm's disk to local and create xml file for it
        tmp_xml_file = copy_remote_vm(remote_vm, local_tmp_path,
                                      remote_hostname, username, password)

        # Maintain a single params for v2v to avoid duplicate parameters
        v2v_params = {"hostname": remote_hostname, "username": username,
                      "password": password, "hypervisor": remote_hypervisor,
                      "storage": pool_name, "network": network,
                      "bridge": bridge, "target": "libvirtxml",
                      "vms": tmp_xml_file, "input": input, "files": files}
        try:
            result = utils_v2v.v2v_cmd(v2v_params)
            logging.debug(result)
        except error.CmdError, detail:
            raise error.TestFail("Virt v2v failed:\n%s" % str(detail))

        # v2v may be successful, but devices' driver may be not virtio
        error_info = []
        # Check v2v vm on local host
        # Update parameters for local hypervisor and vm
        params['vms'] = vm_name
        params['target'] = "libvirt"
        vm_check = utils_v2v.LinuxVMCheck(test, params, env)
        try:
            if not vm_check.is_disk_virtio():
                error_info.append("Error:disk type was not converted to "
                                  "virtio.")
            if not vm_check.is_net_virtio():
                error_info.append("Error:nic type was not converted to "
                                  "virtio.")
        except (remote.LoginError, virt_vm.VMError), detail:
            error_info.append(str(detail))
Пример #20
0
def run(test, params, env):
    """
    Test migration under stress.
    """
    vm_names = params.get("migration_vms").split()
    if len(vm_names) < 2:
        raise error.TestNAError("Provide enough vms for migration first.")

    src_uri = params.get("migrate_src_uri", "qemu+ssh://EXAMPLE/system")
    if src_uri.count('///') or src_uri.count('EXAMPLE'):
        raise error.TestNAError("The src_uri '%s' is invalid", src_uri)

    dest_uri = params.get("migrate_dest_uri", "qemu+ssh://EXAMPLE/system")
    if dest_uri.count('///') or dest_uri.count('EXAMPLE'):
        raise error.TestNAError("The dest_uri '%s' is invalid", dest_uri)

    # Migrated vms' instance
    vms = []
    for vm_name in vm_names:
        vms.append(
            libvirt_vm.VM(vm_name, params, test.bindir,
                          env.get("address_cache")))

    load_vm_names = params.get("load_vms").split()
    # vms for load
    load_vms = []
    for vm_name in load_vm_names:
        load_vms.append(
            libvirt_vm.VM(vm_name, params, test.bindir,
                          env.get("address_cache")))
    params['load_vms'] = load_vms

    cpu = int(params.get("smp", 1))
    memory = int(params.get("mem")) * 1024
    stress_type = params.get("migration_stress_type")
    vm_bytes = params.get("stress_vm_bytes")
    stress_args = params.get("stress_args")
    migration_type = params.get("migration_type")
    start_migration_vms = "yes" == params.get("start_migration_vms", "yes")
    thread_timeout = int(params.get("thread_timeout", 120))
    remote_host = params.get("migrate_dest_host")
    username = params.get("migrate_dest_user", "root")
    password = params.get("migrate_dest_pwd")
    prompt = params.get("shell_prompt", r"[\#\$]")

    # Set vm_bytes for start_cmd
    mem_total = utils_memory.memtotal()
    vm_reserved = len(vms) * memory
    if vm_bytes == "half":
        vm_bytes = (mem_total - vm_reserved) / 2
    elif vm_bytes == "shortage":
        vm_bytes = mem_total - vm_reserved + 524288
    if vm_bytes is not None:
        params["stress_args"] = stress_args % vm_bytes

    for vm in vms:
        # Keep vm dead for edit
        if vm.is_alive():
            vm.destroy()
        set_cpu_memory(vm.name, cpu, memory)

    try:
        vm_ipaddr = {}
        if start_migration_vms:
            for vm in vms:
                vm.start()
                vm.wait_for_login()
                vm_ipaddr[vm.name] = vm.get_address()
                # TODO: recover vm if start failed?
        # Config ssh autologin for remote host
        ssh_key.setup_ssh_key(remote_host, username, password, port=22)

        do_stress_migration(vms, src_uri, dest_uri, stress_type,
                            migration_type, params, thread_timeout)
        # Check network of vms on destination
        if start_migration_vms and migration_type != "cross":
            for vm in vms:
                check_dest_vm_network(vm, vm_ipaddr[vm.name], remote_host,
                                      username, password, prompt)
    finally:
        logging.debug("Cleanup vms...")
        for vm_name in vm_names:
            vm = libvirt_vm.VM(vm_name, params, test.bindir,
                               env.get("address_cache"))
            utlv.MigrationTest().cleanup_dest_vm(vm, None, dest_uri)
            if vm.is_alive():
                vm.destroy(gracefully=False)
        env.clean_objects()
def run(test, params, env):
    """
    Test virsh migrate-setmaxdowntime command.

    1) Prepare migration environment
    2) Start migration and set migrate-maxdowntime
    3) Cleanup environment(migrated vm on destination)
    4) Check result
    """
    vm_ref = params.get("setmmdt_vm_ref", "domname")
    dest_uri = params.get(
        "virsh_migrate_dest_uri", "qemu+ssh://EXAMPLE/system")
    src_uri = params.get(
        "virsh_migrate_src_uri", "qemu+ssh://EXAMPLE/system")
    pre_vm_state = params.get("pre_vm_state", "running")
    status_error = "yes" == params.get("status_error", "no")
    do_migrate = "yes" == params.get("do_migrate", "yes")
    downtime = params.get("migrate_maxdowntime", 1000)
    extra = params.get("setmmdt_extra")
    # A delay between threads
    delay_time = int(params.get("delay_time", 1))
    # timeout of threads
    thread_timeout = 180

    vm_name = params.get("migrate_main_vm")
    vm = env.get_vm(vm_name)
    domuuid = vm.get_uuid()
    # Confirm vm is running
    if not vm.is_alive():
        vm.start()
    vm.wait_for_login()
    domid = vm.get_id()
    if dest_uri.count('///') or dest_uri.count('EXAMPLE'):
        raise error.TestNAError("Set your destination uri first.")
    if src_uri.count('///') or src_uri.count('EXAMPLE'):
        raise error.TestNAError("Set your source uri first.")
    if src_uri == dest_uri:
        raise error.TestNAError("You should not set dest uri same as local.")

    remote_host = params.get("migrate_dest_host")
    username = params.get("migrate_dest_user", "root")
    password = params.get("migrate_dest_pwd")
    # Config ssh autologin for remote host
    ssh_key.setup_ssh_key(remote_host, username, password, port=22)

    setmmdt_dargs = {'debug': True, 'ignore_status': True, 'uri': src_uri}
    migrate_dargs = {'debug': True, 'ignore_status': True}

    # Confirm how to reference a VM.
    if vm_ref == "domname":
        vm_ref = vm_name
    elif vm_ref == "domid":
        vm_ref = domid
    elif vm_ref == "domuuid":
        vm_ref = domuuid

    # Prepare vm state
    if pre_vm_state == "paused":
        vm.pause()
    elif pre_vm_state == "shutoff":
        vm.destroy()

    try:
        # Set max migration downtime must be during migration
        # Using threads for synchronization
        threads = []
        if do_migrate:
            threads.append(threading.Thread(target=thread_func_live_migration,
                                            args=(vm, dest_uri,
                                                  migrate_dargs)))

        threads.append(threading.Thread(target=thread_func_setmmdt,
                                        args=(vm_ref, downtime, extra,
                                              setmmdt_dargs)))
        for thread in threads:
            thread.start()
            # Migration must be executing before setting maxdowntime
            time.sleep(delay_time)
        # Wait until thread is over
        for thread in threads:
            thread.join(thread_timeout)

    finally:
        # Clean up.
        if do_migrate:
            cleanup_dest(vm, src_uri, dest_uri)

        if vm.is_paused():
            vm.resume()

    # Check results.
    if status_error:
        if ret_setmmdt:
            if not do_migrate and libvirt_version.version_compare(1, 2, 9):
                # https://bugzilla.redhat.com/show_bug.cgi?id=1146618
                # Commit fe808d9 fix it and allow setting migration
                # max downtime any time since libvirt-1.2.9
                logging.info("Libvirt version is newer than 1.2.9,"
                             "Allow set maxdowntime while VM isn't migrating")
            else:
                raise error.TestFail("virsh migrate-setmaxdowntime succeed "
                                     "but not expected.")
    else:
        if do_migrate and not ret_migration:
            raise error.TestFail("Migration failed.")

        if not ret_setmmdt:
            raise error.TestFail("virsh migrate-setmaxdowntime failed.")
Пример #22
0
def run(test, params, env):
    """
    Test qemu-monitor-command blockjobs by migrating with option
    --copy-storage-all or --copy-storage-inc.
    """
    if not libvirt_version.version_compare(1, 0, 1):
        raise error.TestNAError("Blockjob functions - "
                                "complete,pause,resume are"
                                "not supported in current libvirt version.")

    vm = env.get_vm(params.get("main_vm"))
    cpu_size = int(params.get("cpu_size", "1"))
    memory_size = int(params.get("memory_size", "1048576"))
    primary_target = vm.get_first_disk_devices()["target"]
    file_path, file_size = vm.get_device_size(primary_target)
    # Convert to Gib
    file_size = int(file_size) / 1073741824
    image_format = utils_test.get_image_info(file_path)["format"]

    remote_host = params.get("migrate_dest_host", "REMOTE.EXAMPLE")
    remote_user = params.get("remote_user", "root")
    remote_passwd = params.get("migrate_dest_pwd", "PASSWORD.EXAMPLE")
    if remote_host.count("EXAMPLE"):
        raise error.TestNAError("Config remote or local host first.")
    # Config ssh autologin for it
    ssh_key.setup_ssh_key(remote_host, remote_user, remote_passwd, port=22)

    # Define a new vm with modified cpu/memory
    new_vm_name = "%s_blockjob" % vm.name
    if vm.is_alive():
        vm.destroy()
    utlv.define_new_vm(vm.name, new_vm_name)
    try:
        set_cpu_memory(new_vm_name, cpu_size, memory_size)
        vm = libvirt_vm.VM(new_vm_name, vm.params, vm.root_dir,
                           vm.address_cache)
    except:  # Make sure created vm is cleaned up
        virsh.remove_domain(new_vm_name)
        raise

    rdm_params = {
        "remote_ip": remote_host,
        "remote_user": remote_user,
        "remote_pwd": remote_passwd
    }
    rdm = utils_test.RemoteDiskManager(rdm_params)

    try:
        vm = libvirt_vm.VM(new_vm_name, vm.params, vm.root_dir,
                           vm.address_cache)
        vm.start()

        rdm.create_image("file",
                         file_path,
                         file_size,
                         None,
                         None,
                         img_frmt=image_format)

        logging.debug("Start migration...")
        copied_migration(vm, params, params.get("qmp_blockjob_type"),
                         primary_target)
    finally:
        # Recover created vm
        if vm.is_alive():
            vm.destroy()
        if vm.name == new_vm_name:
            vm.undefine()
        rdm.remove_path("file", file_path)
        rdm.runner.session.close()
    # Connection to remote, init here for cleanup
    runner = None
    # Identify easy config. mistakes early
    warning_text = ("Migration VM %s URI %s appears problematic "
                    "this may lead to migration problems. "
                    "Consider specifying vm.connect_uri using "
                    "fully-qualified network-based style.")

    if srcuri.count('///') or srcuri.count('EXAMPLE'):
        raise error.TestNAError(warning_text % ('source', srcuri))

    if dsturi.count('///') or dsturi.count('EXAMPLE'):
        raise error.TestNAError(warning_text % ('destination', dsturi))

    # Config auto-login to remote host for migration
    ssh_key.setup_ssh_key(remote_ip, username, host_pwd)

    sys_image = vm.get_first_disk_devices()
    sys_image_source = sys_image["source"]
    sys_image_info = utils_misc.get_image_info(sys_image_source)
    logging.debug("System image information:\n%s", sys_image_info)
    sys_image_fmt = sys_image_info["format"]
    created_img_path = os.path.join(os.path.dirname(sys_image_source),
                                    "vsmimages")

    migrate_in_advance = "yes" == params.get("migrate_in_advance", "no")

    status_error = "yes" == params.get("status_error", "no")
    if source_type == "file" and device_type == "lun":
        status_error = True
Пример #24
0
def run_convert_remote_vm(test, params, env):
    """
    Convert a remote vm to local libvirt(KVM).
    """
    # VM info
    vm_name = params.get("v2v_vm")

    # Remote host parameters
    remote_hostname = params.get("remote_hostname")
    username = params.get("username", "root")
    password = params.get("password")
    remote_hypervisor = params.get("remote_hypervisor")

    # Local pool parameters
    pool_type = params.get("pool_type", "dir")
    pool_name = params.get("pool_name", "v2v_test")
    target_path = params.get("target_path", "pool_path")
    block_device = params.get("block_device", "/dev/BLOCK/EXAMPLE")
    vg_name = params.get("volume_group_name", "vg_v2v")

    # Confirm parameters have been set correctly.
    if (pool_type in ['partition', 'lvm']
            and re.search("EXAMPLE", block_device)):
        raise error.TestNAError("Please set correct block device.")

    # If target_path is not an abs path, join it to data_dir.tmpdir
    if os.path.dirname(target_path) is "":
        target_path = os.path.join(data_dir.get_tmp_dir(), target_path)

    # dir pool need an exist path
    if pool_type == "dir":
        if not os.path.exists(target_path):
            os.mkdir(target_path)

    # V2V parameters
    input = params.get("input_method")
    files = params.get("config_files")
    network = params.get("network", "default")

    # Result check about
    ignore_virtio = "yes" == params.get("ignore_virtio", "no")

    # Create autologin to remote host
    esx_netrc = params.get("esx_netrc") % (remote_hostname, username, password)
    params['netrc'] = esx_netrc
    if remote_hypervisor == "esx":
        utils_v2v.build_esx_no_verify(params)
    else:
        ssh_key.setup_ssh_key(remote_hostname,
                              user=username,
                              port=22,
                              password=password)

    # Create remote uri for remote host
    # Remote virt-v2v uri's instance
    ruri = utils_v2v.Uri(remote_hypervisor)
    remote_uri = ruri.get_uri(remote_hostname)

    # Check remote vms
    rvirsh_dargs = {
        'uri': remote_uri,
        'remote_ip': remote_hostname,
        'remote_user': username,
        'remote_pwd': password
    }
    rvirsh = virsh.VirshPersistent(**rvirsh_dargs)
    if not rvirsh.domain_exists(vm_name):
        rvirsh.close_session()
        raise error.TestFail("Couldn't find vm '%s' to be converted "
                             "on remote uri '%s'." % (vm_name, remote_uri))

    if remote_hypervisor != "esx":
        remote_vm = libvirt_vm.VM(vm_name, params, test.bindir,
                                  env.get("address_cache"))
        remote_vm.connect_uri = remote_uri
        # Remote storage pool's instance
        rsp = libvirt_storage.StoragePool(rvirsh)
        # Put remote vm's disk into a directory storage pool
        prepare_remote_sp(rsp, remote_vm, pool_name)

    # Local storage pool's instance
    lsp = libvirt_storage.StoragePool()
    try:
        # Create storage pool for test
        if pool_type == "dir":
            if not create_dir_pool(lsp, pool_name, target_path):
                raise error.TestFail("Prepare directory storage pool for "
                                     "virt-v2v failed.")
        elif pool_type == "partition":
            if not create_partition_pool(lsp, pool_name, block_device,
                                         target_path):
                raise error.TestFail("Prepare partition storage pool for "
                                     "virt-v2v failed.")
        elif pool_type == "lvm":
            if not create_lvm_pool(lsp, pool_name, block_device, vg_name,
                                   target_path):
                raise error.TestFail("Preapre lvm storage pool for "
                                     "virt-v2v failed.")

        # Maintain a single params for v2v to avoid duplicate parameters
        v2v_params = {
            "hostname": remote_hostname,
            "username": username,
            "password": password,
            "hypervisor": remote_hypervisor,
            "storage": pool_name,
            "network": network,
            "target": "libvirt",
            "vms": vm_name,
            "netrc": esx_netrc,
            "input": input,
            "files": files
        }
        try:
            result = utils_v2v.v2v_cmd(v2v_params)
            logging.debug(result)
        except error.CmdError, detail:
            raise error.TestFail("Virt v2v failed:\n%s" % str(detail))

        # v2v may be successful, but devices' driver may be not virtio
        error_info = []
        # Check v2v vm on local host
        # Update parameters for local hypervisor and vm
        params['vms'] = vm_name
        params['target'] = "libvirt"
        vm_check = utils_v2v.LinuxVMCheck(test, params, env)
        try:
            if not vm_check.is_disk_virtio():
                error_info.append("Error:disk type was not converted to "
                                  "virtio.")
            if not vm_check.is_net_virtio():
                error_info.append("Error:nic type was not converted to "
                                  "virtio.")
        except (remote.LoginError, virt_vm.VMError), detail:
            error_info.append(str(detail))
Пример #25
0
def run(test, params, env):
    """
    Test various options of virt-v2v.
    """
    if utils_v2v.V2V_EXEC is None:
        raise ValueError('Missing command: virt-v2v')
    vm_name = params.get("main_vm", "EXAMPLE")
    new_vm_name = params.get("new_vm_name")
    input_mode = params.get("input_mode")
    v2v_options = params.get("v2v_options", "")
    hypervisor = params.get("hypervisor", "kvm")
    remote_host = params.get("remote_host", "EXAMPLE")
    vpx_dc = params.get("vpx_dc", "EXAMPLE")
    esx_ip = params.get("esx_ip", "EXAMPLE")
    vpx_passwd = params.get("vpx_passwd", "EXAMPLE")
    ovirt_engine_url = params.get("ovirt_engine_url", "EXAMPLE")
    ovirt_engine_user = params.get("ovirt_engine_user", "EXAMPLE")
    ovirt_engine_passwd = params.get("ovirt_engine_password", "EXAMPLE")
    output_mode = params.get("output_mode")
    output_storage = params.get("output_storage", "default")
    export_name = params.get("export_name", "EXAMPLE")
    storage_name = params.get("storage_name", "EXAMPLE")
    disk_img = params.get("input_disk_image", "")
    nfs_storage = params.get("nfs_storage")
    mnt_point = params.get("mount_point")
    export_domain_uuid = params.get("export_domain_uuid", "")
    fake_domain_uuid = params.get("fake_domain_uuid")
    vdsm_image_uuid = params.get("vdsm_image_uuid")
    vdsm_vol_uuid = params.get("vdsm_vol_uuid")
    vdsm_vm_uuid = params.get("vdsm_vm_uuid")
    vdsm_ovf_output = params.get("vdsm_ovf_output")
    v2v_user = params.get("unprivileged_user", "")
    v2v_timeout = int(params.get("v2v_timeout", 1200))
    status_error = "yes" == params.get("status_error", "no")
    for param in [
            vm_name, remote_host, esx_ip, vpx_dc, ovirt_engine_url,
            ovirt_engine_user, ovirt_engine_passwd, output_storage,
            export_name, storage_name, disk_img, export_domain_uuid, v2v_user
    ]:
        if "EXAMPLE" in param:
            raise error.TestNAError("Please replace %s with real value" %
                                    param)

    su_cmd = "su - %s -c " % v2v_user
    output_uri = params.get("oc_uri", "")
    pool_name = params.get("pool_name", "v2v_test")
    pool_type = params.get("pool_type", "dir")
    pool_target = params.get("pool_target_path", "v2v_pool")
    emulated_img = params.get("emulated_image_path", "v2v-emulated-img")
    pvt = utlv.PoolVolumeTest(test, params)
    new_v2v_user = False
    restore_image_owner = False
    address_cache = env.get('address_cache')
    params['vmcheck'] = None

    def create_pool():
        """
        Create libvirt pool as the output storage
        """
        if output_uri == "qemu:///session":
            target_path = os.path.join("/home", v2v_user, pool_target)
            cmd = su_cmd + "'mkdir %s'" % target_path
            utils.system(cmd, verbose=True)
            cmd = su_cmd + "'virsh pool-create-as %s dir" % pool_name
            cmd += " --target %s'" % target_path
            utils.system(cmd, verbose=True)
        else:
            pvt.pre_pool(pool_name, pool_type, pool_target, emulated_img)

    def cleanup_pool():
        """
        Clean up libvirt pool
        """
        if output_uri == "qemu:///session":
            cmd = su_cmd + "'virsh pool-destroy %s'" % pool_name
            utils.system(cmd, verbose=True)
            target_path = os.path.join("/home", v2v_user, pool_target)
            cmd = su_cmd + "'rm -rf %s'" % target_path
            utils.system(cmd, verbose=True)
        else:
            pvt.cleanup_pool(pool_name, pool_type, pool_target, emulated_img)

    def get_all_uuids(output):
        """
        Get export domain uuid, image uuid and vol uuid from command output.
        """
        tmp_target = re.findall(r"qemu-img\sconvert\s.+\s'(\S+)'\n", output)
        if len(tmp_target) < 1:
            raise error.TestError("Fail to find tmp target file name when"
                                  " converting vm disk image")
        targets = tmp_target[0].split('/')
        return (targets[3], targets[5], targets[6])

    def get_ovf_content(output):
        """
        Find and read ovf file.
        """
        export_domain_uuid, _, vol_uuid = get_all_uuids(output)
        export_vm_dir = os.path.join(mnt_point, export_domain_uuid,
                                     'master/vms')
        ovf_content = ""
        if os.path.isdir(export_vm_dir):
            ovf_id = "ovf:id='%s'" % vol_uuid
            ret = utils.system_output("grep -R \"%s\" %s" %
                                      (ovf_id, export_vm_dir))
            ovf_file = ret.split(":")[0]
            if os.path.isfile(ovf_file):
                ovf_f = open(ovf_file, "r")
                ovf_content = ovf_f.read()
                ovf_f.close()
        else:
            logging.error("Can't find ovf file to read")
        return ovf_content

    def get_img_path(output):
        """
        Get the full path of the converted image.
        """
        img_path = ""
        img_name = vm_name + "-sda"
        if output_mode == "libvirt":
            img_path = virsh.vol_path(img_name, output_storage).stdout.strip()
        elif output_mode == "local":
            img_path = os.path.join(output_storage, img_name)
        elif output_mode in ["rhev", "vdsm"]:
            export_domain_uuid, image_uuid, vol_uuid = get_all_uuids(output)
            img_path = os.path.join(mnt_point, export_domain_uuid, 'images',
                                    image_uuid, vol_uuid)
        if not img_path or not os.path.isfile(img_path):
            raise error.TestError("Get image path: '%s' is invalid", img_path)
        return img_path

    def check_vmtype(ovf, expected_vmtype):
        """
        Verify vmtype in ovf file.
        """
        if output_mode != "rhev":
            return
        if expected_vmtype == "server":
            vmtype_int = 1
        elif expected_vmtype == "desktop":
            vmtype_int = 0
        else:
            return
        if "<VmType>%s</VmType>" % vmtype_int in ovf:
            logging.info("Find VmType=%s in ovf file", expected_vmtype)
        else:
            raise error.TestFail("VmType check failed")

    def check_image(img_path, check_point, expected_value):
        """
        Verify image file allocation mode and format
        """
        img_info = utils_misc.get_image_info(img_path)
        logging.debug("Image info: %s", img_info)
        if check_point == "allocation":
            if expected_value == "sparse":
                if img_info['vsize'] > img_info['dsize']:
                    logging.info("%s is a sparse image", img_path)
                else:
                    raise error.TestFail("%s is not a sparse image" % img_path)
            elif expected_value == "preallocated":
                if img_info['vsize'] <= img_info['dsize']:
                    logging.info("%s is a preallocated image", img_path)
                else:
                    raise error.TestFail("%s is not a preallocated image" %
                                         img_path)
        if check_point == "format":
            if expected_value == img_info['format']:
                logging.info("%s format is %s", img_path, expected_value)
            else:
                raise error.TestFail("%s format is not %s" %
                                     (img_path, expected_value))

    def check_new_name(output, expected_name):
        """
        Verify guest name changed to the new name.
        """
        found = False
        if output_mode == "libvirt":
            found = virsh.domain_exists(expected_name)
        if output_mode == "local":
            found = os.path.isfile(
                os.path.join(output_storage, expected_name + "-sda"))
        if output_mode in ["rhev", "vdsm"]:
            ovf = get_ovf_content(output)
            found = "<Name>%s</Name>" % expected_name in ovf
        else:
            return
        if found:
            logging.info("Guest name renamed when converting it")
        else:
            raise error.TestFail("Rename guest failed")

    def check_nocopy(output):
        """
        Verify no image created if convert command use --no-copy option
        """
        img_path = get_img_path(output)
        if not os.path.isfile(img_path):
            logging.info("No image created with --no-copy option")
        else:
            raise error.TestFail("Find %s" % img_path)

    def check_connection(output, expected_uri):
        """
        Check output connection uri used when converting guest
        """
        init_msg = "Initializing the target -o libvirt -oc %s" % expected_uri
        if init_msg in output:
            logging.info("Find message: %s", init_msg)
        else:
            raise error.TestFail("Not find message: %s" % init_msg)

    def check_disks(ori_disks):
        """
        Check disk counts inside the VM
        """
        vmcheck = params.get("vmcheck")
        if vmcheck is None:
            raise error.TestError("VM check object is None")
        # Initialize windows boot up
        os_type = params.get("os_type", "linux")
        if os_type == "windows":
            virsh_session = utils_sasl.VirshSessionSASL(params)
            virsh_session_id = virsh_session.get_id()
            vmcheck.virsh_session_id = virsh_session_id
            vmcheck.init_windows()
            virsh_session.close()
        # Creatge VM session
        vmcheck.create_session()
        expected_disks = int(params.get("added_disks_count", "1")) - ori_disks
        logging.debug("Expect %s disks im VM after convert", expected_disks)
        # Get disk counts
        disks = 0
        if os_type == "linux":
            cmd = "lsblk |grep disk |wc -l"
            disks = int(vmcheck.session.cmd(cmd).strip())
        else:
            cmd = r"echo list disk > C:\list_disk.txt"
            vmcheck.session.cmd(cmd)
            cmd = r"diskpart /s C:\list_disk.txt"
            output = vmcheck.session.cmd(cmd).strip()
            logging.debug("Disks in VM: %s", output)
            disks = len(output.splitlines()) - 6
        logging.debug("Find %s disks in VM after convert", disks)
        vmcheck.session.close()
        if disks == expected_disks:
            logging.info("Disk counts is expected")
        else:
            raise error.TestFail("Disk counts is wrong")

    def check_result(cmd, result, status_error):
        """
        Check virt-v2v command result
        """
        utlv.check_exit_status(result, status_error)
        output = result.stdout + result.stderr
        if not status_error:
            if output_mode == "rhev":
                ovf = get_ovf_content(output)
                logging.debug("ovf content: %s", ovf)
                if '--vmtype' in cmd:
                    expected_vmtype = re.findall(r"--vmtype\s(\w+)", cmd)[0]
                    check_vmtype(ovf, expected_vmtype)
            if '-oa' in cmd and '--no-copy' not in cmd:
                expected_mode = re.findall(r"-oa\s(\w+)", cmd)[0]
                img_path = get_img_path(output)
                check_image(img_path, "allocation", expected_mode)
            if '-of' in cmd and '--no-copy' not in cmd:
                expected_format = re.findall(r"-of\s(\w+)", cmd)[0]
                img_path = get_img_path(output)
                check_image(img_path, "format", expected_format)
            if '-on' in cmd:
                expected_name = re.findall(r"-on\s(\w+)", cmd)[0]
                check_new_name(output, expected_name)
            if '--no-copy' in cmd:
                check_nocopy(output)
            if '-oc' in cmd:
                expected_uri = re.findall(r"-oc\s(\S+)", cmd)[0]
                check_connection(output, expected_uri)
            if output_mode == "rhev":
                if not utils_v2v.import_vm_to_ovirt(params, address_cache):
                    raise error.TestFail("Import VM failed")
                else:
                    params['vmcheck'] = utils_v2v.VMCheck(test, params, env)
                    if attach_disks:
                        check_disks(params.get("ori_disks"))
            if output_mode == "libvirt":
                if "qemu:///session" not in v2v_options:
                    virsh.start(vm_name, debug=True, ignore_status=False)

    backup_xml = None
    attach_disks = "yes" == params.get("attach_disk_config", "no")
    attach_disk_path = os.path.join(test.tmpdir, "attach_disks")
    vdsm_domain_dir, vdsm_image_dir, vdsm_vm_dir = ("", "", "")
    try:
        # Build input options
        input_option = ""
        if input_mode is None:
            pass
        elif input_mode == "libvirt":
            uri_obj = utils_v2v.Uri(hypervisor)
            ic_uri = uri_obj.get_uri(remote_host, vpx_dc, esx_ip)
            input_option = "-i %s -ic %s %s" % (input_mode, ic_uri, vm_name)
            # Build network&bridge option to avoid network error
            v2v_options += " -b %s -n %s" % (params.get("output_bridge"),
                                             params.get("output_network"))
            # Multiple disks testing
            if attach_disks:
                backup_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
                # Get original vm disk counts
                params['ori_disks'] = backup_xml.get_disk_count(vm_name)
                utlv.attach_disks(env.get_vm(vm_name), attach_disk_path, None,
                                  params)
        elif input_mode == "disk":
            input_option += "-i %s %s" % (input_mode, disk_img)
        elif input_mode in ['libvirtxml', 'ova']:
            raise error.TestNAError("Unsupported input mode: %s" % input_mode)
        else:
            raise error.TestError("Unknown input mode %s" % input_mode)
        input_format = params.get("input_format")
        input_allo_mode = params.get("input_allo_mode")
        if input_format:
            input_option += " -if %s" % input_format
            if not status_error:
                logging.info("Check image before convert")
                check_image(disk_img, "format", input_format)
                if input_allo_mode:
                    check_image(disk_img, "allocation", input_allo_mode)

        # Build output options
        output_option = ""
        if output_mode:
            output_option = "-o %s -os %s" % (output_mode, output_storage)
        output_format = params.get("output_format")
        if output_format:
            output_option += " -of %s" % output_format
        output_allo_mode = params.get("output_allo_mode")
        if output_allo_mode:
            output_option += " -oa %s" % output_allo_mode

        # Build vdsm related options
        if output_mode in ['vdsm', 'rhev']:
            if not os.path.isdir(mnt_point):
                os.mkdir(mnt_point)
            if not utils_misc.mount(nfs_storage, mnt_point, "nfs"):
                raise error.TestError("Mount NFS Failed")
            if output_mode == 'vdsm':
                v2v_options += " --vdsm-image-uuid %s" % vdsm_image_uuid
                v2v_options += " --vdsm-vol-uuid %s" % vdsm_vol_uuid
                v2v_options += " --vdsm-vm-uuid %s" % vdsm_vm_uuid
                v2v_options += " --vdsm-ovf-output %s" % vdsm_ovf_output
                vdsm_domain_dir = os.path.join(mnt_point, fake_domain_uuid)
                vdsm_image_dir = os.path.join(mnt_point, export_domain_uuid,
                                              "images", vdsm_image_uuid)
                vdsm_vm_dir = os.path.join(mnt_point, export_domain_uuid,
                                           "master/vms", vdsm_vm_uuid)
                # For vdsm_domain_dir, just create a dir to test BZ#1176591
                os.mkdir(vdsm_domain_dir)
                os.mkdir(vdsm_image_dir)
                os.mkdir(vdsm_vm_dir)

        # Output more messages
        v2v_options += " -v -x"

        # Prepare for libvirt unprivileged user session connection
        if "qemu:///session" in v2v_options:
            try:
                pwd.getpwnam(v2v_user)
            except KeyError:
                # create new user
                utils.system("useradd %s" % v2v_user, ignore_status=True)
                new_v2v_user = True
            user_info = pwd.getpwnam(v2v_user)
            logging.info("Convert to qemu:///session by user '%s'", v2v_user)
            if input_mode == "disk":
                # Change the image owner and group
                ori_owner = os.stat(disk_img).st_uid
                ori_group = os.stat(disk_img).st_uid
                os.chown(disk_img, user_info.pw_uid, user_info.pw_gid)
                restore_image_owner = True
            else:
                raise error.TestNAError("Only support convert local disk")

        # Setup ssh-agent access to xen hypervisor
        if hypervisor == 'xen':
            os.environ['LIBGUESTFS_BACKEND'] = 'direct'
            user = params.get("xen_host_user", "root")
            passwd = params.get("xen_host_passwd", "redhat")
            logging.info("set up ssh-agent access ")
            ssh_key.setup_ssh_key(remote_host,
                                  user=user,
                                  port=22,
                                  password=passwd)
            utils_misc.add_identities_into_ssh_agent()
            # If the input format is not define, we need to either define
            # the original format in the source metadata(xml) or use '-of'
            # to force the output format, see BZ#1141723 for detail.
            if '-of' not in v2v_options:
                v2v_options += ' -of %s' % params.get("default_output_format",
                                                      "qcow2")

        # Create password file for access to ESX hypervisor
        if hypervisor == 'esx':
            vpx_passwd = params.get("vpx_passwd")
            vpx_passwd_file = os.path.join(test.tmpdir, "vpx_passwd")
            logging.info("Building ESX no password interactive verification.")
            pwd_f = open(vpx_passwd_file, 'w')
            pwd_f.write(vpx_passwd)
            pwd_f.close()
            output_option += " --password-file %s" % vpx_passwd_file

        # Create libvirt dir pool
        if output_mode == "libvirt":
            create_pool()

        # Running virt-v2v command
        cmd = "%s %s %s %s" % (utils_v2v.V2V_EXEC, input_option, output_option,
                               v2v_options)
        if v2v_user:
            cmd = su_cmd + "'%s'" % cmd
        cmd_result = utils.run(cmd,
                               timeout=v2v_timeout,
                               verbose=True,
                               ignore_status=True)
        if new_vm_name:
            vm_name = new_vm_name
            params['main_vm'] = new_vm_name
        check_result(cmd, cmd_result, status_error)
    finally:
        if hypervisor == "xen":
            utils.run("ssh-agent -k")
        if hypervisor == "esx":
            utils.run("rm -rf %s" % vpx_passwd_file)
        for vdsm_dir in [vdsm_domain_dir, vdsm_image_dir, vdsm_vm_dir]:
            if os.path.exists(vdsm_dir):
                shutil.rmtree(vdsm_dir)
        if os.path.exists(mnt_point):
            utils_misc.umount(nfs_storage, mnt_point, "nfs")
            os.rmdir(mnt_point)
        if output_mode == "local":
            image_name = vm_name + "-sda"
            img_file = os.path.join(output_storage, image_name)
            xml_file = img_file + ".xml"
            for local_file in [img_file, xml_file]:
                if os.path.exists(local_file):
                    os.remove(local_file)
        if output_mode == "libvirt":
            if "qemu:///session" in v2v_options:
                cmd = su_cmd + "'virsh undefine %s'" % vm_name
                utils.system(cmd)
            else:
                virsh.remove_domain(vm_name)
            cleanup_pool()
        vmcheck = params.get("vmcheck")
        if vmcheck:
            vmcheck.cleanup()
        if new_v2v_user:
            utils.system("userdel -f %s" % v2v_user)
        if restore_image_owner:
            os.chown(disk_img, ori_owner, ori_group)
        if backup_xml:
            backup_xml.sync()
        if os.path.exists(attach_disk_path):
            shutil.rmtree(attach_disk_path)
Пример #26
0
def run(test, params, env):
    """
    Test of virt-edit.

    1) Get and init parameters for test.
    2) Prepare environment.
    3) Run virt-edit command and get result.
    5) Recover environment.
    6) Check result.
    """

    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    remote_host = params.get("virt_edit_remote_host", "HOST.EXAMPLE")
    remote_user = params.get("virt_edit_remote_user", "root")
    remote_passwd = params.get("virt_edit_remote_passwd", "PASSWD.EXAMPLE")
    connect_uri = params.get("virt_edit_connect_uri")
    if connect_uri is not None:
        uri = "qemu+ssh://%s@%s/system" % (remote_user, remote_host)
        if uri.count("EXAMPLE"):
            raise error.TestNAError("Please config host and passwd first.")
        # Config ssh autologin for it
        ssh_key.setup_ssh_key(remote_host, remote_user, remote_passwd, port=22)
    else:
        uri = libvirt_vm.normalize_connect_uri(params.get("connect_uri",
                                                          "default"))
    start_vm = params.get("start_vm", "no")
    vm_ref = params.get("virt_edit_vm_ref", vm_name)
    file_ref = params.get("virt_edit_file_ref", "/etc/hosts")
    created_img = params.get("virt_edit_created_img", "/tmp/foo.img")
    foo_line = params.get("foo_line", "")
    options = params.get("virt_edit_options")
    options_suffix = params.get("virt_edit_options_suffix")
    status_error = params.get("status_error", "no")
    backup_extension = params.get("virt_edit_backup_extension")
    test_format = params.get("virt_edit_format")

    # virt-edit should not be used when vm is running.
    # (for normal test)
    if vm.is_alive() and start_vm == "no":
        vm.destroy(gracefully=True)

    dom_disk_dict = vm.get_disk_devices()  # TODO
    dom_uuid = vm.get_uuid()
    # Disk format: raw or qcow2
    disk_format = None
    # If object is a disk file path
    is_disk = False

    if vm_ref == "domdisk":
        if len(dom_disk_dict) != 1:
            raise error.TestError("Only one disk device should exist on "
                                  "%s:\n%s." % (vm_name, dom_disk_dict))
        disk_detail = dom_disk_dict.values()[0]
        vm_ref = disk_detail['source']
        logging.info("disk to be edit:%s", vm_ref)
        if test_format:
            # Get format:raw or qcow2
            info = utils.run("qemu-img info %s" % vm_ref).stdout
            for line in info.splitlines():
                comps = line.split(':')
                if comps[0].count("format"):
                    disk_format = comps[-1].strip()
                    break
            if disk_format is None:
                raise error.TestError("Cannot get disk format:%s" % info)
            options = "--format=%s" % disk_format
        is_disk = True
    elif vm_ref == "domname":
        vm_ref = vm_name
    elif vm_ref == "domuuid":
        vm_ref = dom_uuid
    elif vm_ref == "createdimg":
        vm_ref = created_img
        utils.run("dd if=/dev/zero of=%s bs=256M count=1" % created_img)
        is_disk = True

    # Decide whether pass a exprt for virt-edit command.
    if foo_line != "":
        expr = "s/$/%s/" % foo_line
    else:
        expr = ""

    if backup_extension is not None:
        if options is None:
            options = ""
        options += " -b %s" % backup_extension

    # Stop libvirtd if test need.
    libvirtd = params.get("libvirtd", "on")
    if libvirtd == "off":
        utils_libvirtd.libvirtd_stop()

    # Run test
    result = lgf.virt_edit_cmd(vm_ref, file_ref, is_disk, options,
                               options_suffix, expr, uri, debug=True)
    status = result.exit_status

    # Recover libvirtd.
    if libvirtd == "off":
        utils_libvirtd.libvirtd_start()

    utils.run("rm -f %s" % created_img)

    # Remove backup file in vm if it exists
    if backup_extension is not None:
        backup_file = file_ref + backup_extension
        cleanup_file_in_vm(vm, backup_file)

    status_error = (status_error == "yes")
    if status != 0:
        if not status_error:
            raise error.TestFail("Command executed failed.")
    else:
        if (expr != "" and
                (not login_to_check_foo_line(vm, file_ref, foo_line))):
            raise error.TestFail("Virt-edit to add %s in %s failed."
                                 "Test failed." % (foo_line, file_ref))
Пример #27
0
def run_convert_remote_vm(test, params, env):
    """
    Convert a remote vm to local libvirt(KVM).
    """
    # VM info
    vm_name = params.get("v2v_vm")

    # Remote host parameters
    remote_hostname = params.get("remote_hostname")
    username = params.get("username", "root")
    password = params.get("password")
    remote_hypervisor = params.get("remote_hypervisor")

    # Local pool parameters
    pool_type = params.get("pool_type", "dir")
    pool_name = params.get("pool_name", "v2v_test")
    target_path = params.get("target_path", "pool_path")
    block_device = params.get("block_device", "/dev/BLOCK/EXAMPLE")
    vg_name = params.get("volume_group_name", "vg_v2v")

    # Confirm parameters have been set correctly.
    if (pool_type in ['partition', 'lvm'] and
            re.search("EXAMPLE", block_device)):
        raise error.TestNAError("Please set correct block device.")

    # If target_path is not an abs path, join it to data_dir.tmpdir
    if os.path.dirname(target_path) is "":
        target_path = os.path.join(data_dir.get_tmp_dir(), target_path)

    # dir pool need an exist path
    if pool_type == "dir":
        if not os.path.exists(target_path):
            os.mkdir(target_path)

    # V2V parameters
    input = params.get("input_method")
    files = params.get("config_files")
    network = params.get("network", "default")

    # Result check about
    ignore_virtio = "yes" == params.get("ignore_virtio", "no")

    # Create autologin to remote host
    esx_netrc = params.get("esx_netrc") % (remote_hostname, username, password)
    params['netrc'] = esx_netrc
    if remote_hypervisor == "esx":
        utils_v2v.build_esx_no_verify(params)
    else:
        ssh_key.setup_ssh_key(remote_hostname, user=username, port=22,
                              password=password)

    # Create remote uri for remote host
    # Remote virt-v2v uri's instance
    ruri = utils_v2v.Uri(remote_hypervisor)
    remote_uri = ruri.get_uri(remote_hostname)

    # Check remote vms
    rvirsh_dargs = {'uri': remote_uri, 'remote_ip': remote_hostname,
                    'remote_user': username, 'remote_pwd': password}
    rvirsh = virsh.VirshPersistent(**rvirsh_dargs)
    if not rvirsh.domain_exists(vm_name):
        rvirsh.close_session()
        raise error.TestFail("Couldn't find vm '%s' to be converted "
                             "on remote uri '%s'." % (vm_name, remote_uri))

    if remote_hypervisor != "esx":
        remote_vm = libvirt_vm.VM(vm_name, params, test.bindir,
                                  env.get("address_cache"))
        remote_vm.connect_uri = remote_uri
        # Remote storage pool's instance
        rsp = libvirt_storage.StoragePool(rvirsh)
        # Put remote vm's disk into a directory storage pool
        prepare_remote_sp(rsp, remote_vm, pool_name)

    # Local storage pool's instance
    lsp = libvirt_storage.StoragePool()
    try:
        # Create storage pool for test
        if pool_type == "dir":
            if not create_dir_pool(lsp, pool_name, target_path):
                raise error.TestFail("Prepare directory storage pool for "
                                     "virt-v2v failed.")
        elif pool_type == "partition":
            if not create_partition_pool(lsp, pool_name, block_device,
                                         target_path):
                raise error.TestFail("Prepare partition storage pool for "
                                     "virt-v2v failed.")
        elif pool_type == "lvm":
            if not create_lvm_pool(lsp, pool_name, block_device, vg_name,
                                   target_path):
                raise error.TestFail("Preapre lvm storage pool for "
                                     "virt-v2v failed.")

        # Maintain a single params for v2v to avoid duplicate parameters
        v2v_params = {"hostname": remote_hostname, "username": username,
                      "password": password, "hypervisor": remote_hypervisor,
                      "storage": pool_name, "network": network,
                      "target": "libvirt", "vms": vm_name, "netrc": esx_netrc,
                      "input": input, "files": files}
        try:
            result = utils_v2v.v2v_cmd(v2v_params)
            logging.debug(result)
        except error.CmdError, detail:
            raise error.TestFail("Virt v2v failed:\n%s" % str(detail))

        # v2v may be successful, but devices' driver may be not virtio
        error_info = []
        # Check v2v vm on local host
        # Update parameters for local hypervisor and vm
        params['vms'] = vm_name
        params['target'] = "libvirt"
        vm_check = utils_v2v.LinuxVMCheck(test, params, env)
        try:
            if not vm_check.is_disk_virtio():
                error_info.append("Error:disk type was not converted to "
                                  "virtio.")
            if not vm_check.is_net_virtio():
                error_info.append("Error:nic type was not converted to "
                                  "virtio.")
        except (remote.LoginError, virt_vm.VMError), detail:
            error_info.append(str(detail))
Пример #28
0
def run(test, params, env):
    """
    Test command: virsh domjobabort.

    The command can abort the currently running domain job.
    1.Prepare test environment,destroy or suspend a VM.
    2.Do action to get a subprocess(dump, save, managedsave).
    3.Perform virsh domjobabort operation to abort VM's job.
    4.Recover the VM's status and wait for the subprocess over.
    5.Confirm the test result.
    """

    vm_name = params.get("main_vm", "vm1")
    vm = env.get_vm(vm_name)
    start_vm = params.get("start_vm")
    pre_vm_state = params.get("pre_vm_state", "start")
    if start_vm == "no" and vm.is_alive():
        vm.destroy()

    # Instead of "paused_after_start_vm", use "pre_vm_state".
    # After start the VM, wait for some time to make sure the job
    # can be created on this domain.
    if start_vm == "yes":
        vm.wait_for_login()
        if params.get("pre_vm_state") == "suspend":
            vm.pause()

    domid = vm.get_id()
    domuuid = vm.get_uuid()
    original_speed = virsh.migrate_getspeed(vm_name).stdout.strip()

    def get_subprocess(action, vm_name, file, remote_uri=None):
        """
        Execute background virsh command, return subprocess w/o waiting for exit()

        :param cmd : virsh command.
        :param guest_name : VM's name
        :param file_source : virsh command's file option.
        """
        if action == "managedsave":
            file = ""
        elif action == "migrate":
            # Slow down migration for domjobabort
            virsh.migrate_setspeed(vm_name, "1")
            file = remote_uri
        command = "virsh %s %s %s --unsafe" % (action, vm_name, file)
        logging.debug("Action: %s", command)
        p = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE,
                             stderr=subprocess.PIPE)
        return p

    action = params.get("jobabort_action", "dump")
    status_error = params.get("status_error", "no")
    job = params.get("jobabort_job", "yes")
    tmp_file = os.path.join(test.tmpdir, "domjobabort.tmp")
    tmp_pipe = os.path.join(test.tmpdir, "domjobabort.fifo")
    vm_ref = params.get("jobabort_vm_ref")
    remote_uri = params.get("jobabort_remote_uri")
    remote_host = params.get("migrate_dest_host")
    remote_user = params.get("migrate_dest_user", "root")
    remote_pwd = params.get("migrate_dest_pwd")
    saved_data = None

    if action == "managedsave":
        tmp_pipe = '/var/lib/libvirt/qemu/save/%s.save' % vm.name

    if action == "restore":
        virsh.save(vm_name, tmp_file, ignore_status=True)

    if action == "migrate":
        if remote_host.count("EXAMPLE"):
            raise error.TestNAError("Remote host should be configured "
                                    "for migrate.")
        else:
            # Config ssh autologin for remote host
            ssh_key.setup_ssh_key(remote_host, remote_user,
                                  remote_pwd, port=22)

    if vm_ref == "id":
        vm_ref = domid
    elif vm_ref == "hex_id":
        vm_ref = hex(int(domid))
    elif vm_ref == "uuid":
        vm_ref = domuuid
    elif vm_ref.find("invalid") != -1:
        vm_ref = params.get(vm_ref)
    elif vm_ref == "name":
        vm_ref = vm_name

    # Get the subprocess of VM.
    # The command's effect is to abort the currently running domain job.
    # So before do "domjobabort" action, we must create a job on the domain.
    process = None
    if job == "yes" and start_vm == "yes" and status_error == "no":
        if os.path.exists(tmp_pipe):
            os.unlink(tmp_pipe)
        os.mkfifo(tmp_pipe)

        process = get_subprocess(action, vm_name, tmp_pipe, remote_uri)

        saved_data = None
        if action == "restore":
            saved_data = file(tmp_file, 'r').read(10 * 1024 * 1024)
            f = open(tmp_pipe, 'w')
            f.write(saved_data[:1024 * 1024])
        elif action == "migrate":
            f = None
        else:
            f = open(tmp_pipe, 'r')
            dummy = f.read(1024 * 1024)

    # Give enough time for starting job
    t = 0
    while t < 5:
        jobtype = vm.get_job_type()
        if "None" == jobtype:
            t += 1
            time.sleep(1)
            continue
        elif jobtype is False:
            logging.error("Get job type failed.")
            break
        else:
            logging.debug("Job started: %s", jobtype)
            break
    ret = virsh.domjobabort(vm_ref, ignore_status=True, debug=True)
    status = ret.exit_status

    if process and f:
        if saved_data:
            f.write(saved_data[1024 * 1024:])
        else:
            dummy = f.read()
        f.close()

        if os.path.exists(tmp_pipe):
            os.unlink(tmp_pipe)
        if os.path.exists(tmp_file):
            os.unlink(tmp_file)

    # Recover the environment.
    if pre_vm_state == "suspend":
        vm.resume()
    if process:
        if process.poll():
            try:
                process.kill()
            except OSError:
                pass

    if action == "migrate":
        # Recover migration speed
        virsh.migrate_setspeed(vm_name, original_speed)
        utlv.MigrationTest().cleanup_dest_vm(vm, None, remote_uri)

    # check status_error
    if status_error == "yes":
        if status == 0:
            raise error.TestFail("Run successfully with wrong command!")
    elif status_error == "no":
        if status != 0:
            raise error.TestFail("Run failed with right command")
Пример #29
0
def run(test, params, env):
    """
    Convert a local vm disk to local libvirt(KVM).
    """
    # VM info
    vm_name = params.get("v2v_vm")

    # Remote host parameters
    remote_hostname = params.get("remote_hostname")
    username = params.get("remote_username", "root")
    password = params.get("remote_passwd")
    remote_hypervisor = params.get("remote_hypervisor")

    # Local pool parameters
    pool_type = params.get("pool_type", "dir")
    block_device = params.get("block_device", "/dev/BLOCK/EXAMPLE")
    if pool_type in ['disk', 'partition', 'lvm'] and \
            re.search("EXAMPLE", block_device):
        raise error.TestNAError("Please set correct block device.")
    pool_name = params.get("pool_name", "v2v_test")
    target_path = params.get("target_path", "pool_path")
    vg_name = params.get("volume_group_name", "vg_v2v")
    local_tmp_path = params.get("local_tmp_path", data_dir.get_tmp_dir())
    # If target_path is not an abs path, join it to data_dir.TMPDIR
    if os.path.dirname(target_path) is "":
        target_path = os.path.join(data_dir.get_tmp_dir(), target_path)

    # dir pool need an exist path
    if pool_type == "dir":
        if not os.path.exists(target_path):
            os.mkdir(target_path)

    # V2V parameters
    input = params.get("input_method")
    files = params.get("config_files")
    network = params.get("network", "default")

    # Result check about
    ignore_virtio = "yes" == params.get("ignore_virtio", "no")

    # Create remote uri for remote host
    # Remote virt-v2v uri's instance
    ruri = utils_v2v.Uri(remote_hypervisor)
    remote_uri = ruri.get_uri(remote_hostname)

    ssh_key.setup_ssh_key(remote_hostname,
                          user=username,
                          port=22,
                          password=password)

    # Check remote vms
    remote_vm = libvirt_vm.VM(vm_name, params, test.bindir,
                              env.get("address_cache"))
    remote_vm.connect_uri = remote_uri
    if not remote_vm.exists():
        raise error.TestFail("Couldn't find vm '%s' to be converted "
                             "on remote uri '%s'." % (vm_name, remote_uri))

    # Copy remote vm's disk to local and create xml file for it
    tmp_xml_file = copy_remote_vm(remote_vm, local_tmp_path, remote_hostname,
                                  username, password)

    # Local storage pool's instance
    lsp = libvirt_storage.StoragePool()
    try:
        # Create storage pool for test
        if pool_type == "dir":
            if not create_dir_pool(lsp, pool_name, target_path):
                raise error.TestFail("Prepare directory storage pool for "
                                     "virt-v2v failed.")
        elif pool_type == "partition":
            if not create_partition_pool(lsp, pool_name, block_device,
                                         target_path):
                raise error.TestFail("Prepare partition storage pool for "
                                     "virt-v2v failed.")
        elif pool_type == "lvm":
            if not create_lvm_pool(lsp, pool_name, block_device, vg_name,
                                   target_path):
                raise error.TestFail("Prepare lvm storage pool for "
                                     "virt-v2v failed.")
        elif pool_type == "disk":
            if not create_disk_pool(lsp, pool_name, block_device, target_path):
                raise error.TestFail("Prepare disk storage pool for "
                                     "virt-v2v failed.")

        # Maintain a single params for v2v to avoid duplicate parameters
        v2v_params = {
            "hostname": remote_hostname,
            "username": username,
            "password": password,
            "hypervisor": remote_hypervisor,
            "storage": pool_name,
            "network": network,
            "target": "libvirtxml",
            "vms": tmp_xml_file,
            "input": input,
            "files": files
        }
        try:
            result = utils_v2v.v2v_cmd(v2v_params)
            logging.debug(result)
        except error.CmdError, detail:
            raise error.TestFail("Virt v2v failed:\n%s" % str(detail))

        # v2v may be successful, but devices' driver may be not virtio
        error_info = []
        # Check v2v vm on local host
        # Update parameters for local hypervisor and vm
        params['vms'] = vm_name
        params['target'] = "libvirt"
        vm_check = utils_v2v.LinuxVMCheck(test, params, env)
        try:
            if not vm_check.is_disk_virtio():
                error_info.append("Error:disk type was not converted to "
                                  "virtio.")
            if not vm_check.is_net_virtio():
                error_info.append("Error:nic type was not converted to "
                                  "virtio.")
        except (remote.LoginError, virt_vm.VMError), detail:
            error_info.append(str(detail))
    def verify_migration_speed(test, params, env):
        """
        Check if migration speed is effective with twice migration.
        """
        vms = env.get_all_vms()
        src_uri = params.get("migrate_src_uri", "qemu+ssh://EXAMPLE/system")
        dest_uri = params.get("migrate_dest_uri", "qemu+ssh://EXAMPLE/system")

        if not len(vms):
            raise error.TestNAError("Please provide migrate_vms for test.")

        if src_uri.count('///') or src_uri.count('EXAMPLE'):
            raise error.TestNAError("The src_uri '%s' is invalid" % src_uri)

        if dest_uri.count('///') or dest_uri.count('EXAMPLE'):
            raise error.TestNAError("The dest_uri '%s' is invalid" % dest_uri)

        remote_host = params.get("migrate_dest_host")
        username = params.get("migrate_dest_user", "root")
        password = params.get("migrate_dest_pwd")
        # Config ssh autologin for remote host
        ssh_key.setup_ssh_key(remote_host, username, password, port=22)

        # Check migrated vms' state
        for vm in vms:
            if vm.is_dead():
                vm.start()

        load_vm_names = params.get("load_vms").split()
        # vms for load
        load_vms = []
        for vm_name in load_vm_names:
            load_vms.append(
                libvirt_vm.VM(vm_name, params, test.bindir,
                              env.get("address_cache")))
        params["load_vms"] = load_vms

        bandwidth = int(params.get("bandwidth", "4"))
        stress_type = params.get("stress_type", "load_vms_booting")
        migration_type = params.get("migration_type", "orderly")
        thread_timeout = int(params.get("thread_timeout", "60"))
        delta = float(params.get("allowed_delta", "0.1"))
        virsh_migrate_timeout = int(params.get("virsh_migrate_timeout", "60"))
        # virsh migrate options
        virsh_migrate_options = "--live --unsafe --timeout %s" % virsh_migrate_timeout
        # Migrate vms to remote host
        mig_first = utlv.MigrationTest()
        virsh_dargs = {"debug": True}
        for vm in vms:
            set_get_speed(vm.name, bandwidth, virsh_dargs=virsh_dargs)
            vm.wait_for_login()
        utils_test.load_stress(stress_type, vms, params)
        mig_first.do_migration(vms,
                               src_uri,
                               dest_uri,
                               migration_type,
                               options=virsh_migrate_options,
                               thread_timeout=thread_timeout)
        for vm in vms:
            mig_first.cleanup_dest_vm(vm, None, dest_uri)
            # Keep it clean for second migration
            if vm.is_alive():
                vm.destroy()

        # Migrate vms again with new bandwidth
        second_bandwidth = params.get("second_bandwidth", "times")
        if second_bandwidth == "half":
            second_bandwidth = bandwidth / 2
            speed_times = 2
        elif second_bandwidth == "times":
            second_bandwidth = bandwidth * 2
            speed_times = 0.5
        elif second_bandwidth == "same":
            second_bandwidth = bandwidth
            speed_times = 1

        # Migrate again
        for vm in vms:
            if vm.is_dead():
                vm.start()
            vm.wait_for_login()
            set_get_speed(vm.name, second_bandwidth, virsh_dargs=virsh_dargs)
        utils_test.load_stress(stress_type, vms, params)
        mig_second = utlv.MigrationTest()
        mig_second.do_migration(vms,
                                src_uri,
                                dest_uri,
                                migration_type,
                                options=virsh_migrate_options,
                                thread_timeout=thread_timeout)
        for vm in vms:
            mig_second.cleanup_dest_vm(vm, None, dest_uri)

        fail_info = []
        # Check whether migration failed
        if len(fail_info):
            raise error.TestFail(fail_info)

        for vm in vms:
            first_time = mig_first.mig_time[vm.name]
            second_time = mig_second.mig_time[vm.name]
            logging.debug(
                "Migration time for %s:\n"
                "Time with Bandwidth '%s' first: %s\n"
                "Time with Bandwidth '%s' second: %s", vm.name, bandwidth,
                first_time, second_bandwidth, second_time)
            shift = float(abs(first_time * speed_times -
                              second_time)) / float(second_time)
            logging.debug("Shift:%s", shift)
            if delta < shift:
                fail_info.append(
                    "Spent time for migrating %s is intolerable." % vm.name)

        # Check again for speed result
        if len(fail_info):
            raise error.TestFail(fail_info)
def run(test, params, env):
    """
    Convert a remote vm to local libvirt(KVM).
    """
    # VM info
    vm_name = params.get("main_vm")

    # Remote host parameters
    username = params.get("username", "root")
    xen_ip = params.get("xen_ip", "XEN.EXAMPLE")
    xen_pwd = params.get("xen_pwd", "PWD.EXAMPLE")
    vpx_ip = params.get("vpx_ip", "ESX.EXAMPLE")
    vpx_pwd = params.get("vpx_pwd", "PWD.EXAMPLE")
    vpx_pwd_file = params.get("vpx_passwd_file")
    vpx_dc = params.get("vpx_dc", "VPX.DC.EXAMPLE")
    esx_ip = params.get("esx_ip", "ESX.EXAMPLE")
    # To decide which type test it is
    hypervisor = params.get("hypervisor")

    # Local pool parameters
    pool_type = params.get("pool_type", "dir")
    pool_name = params.get("pool_name", "v2v_test")
    target_path = params.get("target_path", "pool_path")
    emulated_img = params.get("emulated_image_path", "v2v_emulated.img")
    emulated_size = params.get("emulated_image_size", "10G")

    # If target_path is not an abs path, join it to data_dir.tmpdir
    if not os.path.dirname(target_path):
        target_path = os.path.join(data_dir.get_tmp_dir(), target_path)

    # V2V parameters
    input = params.get("input_method")
    files = params.get("config_files")
    network = params.get("network")
    bridge = params.get("bridge")

    # Set libguestfs environment
    os.environ['LIBGUESTFS_BACKEND'] = 'direct'

    # Extra v2v command options, default to None
    v2v_opts = params.get("v2v_opts")

    if hypervisor == "esx":
        remote_ip = vpx_ip
        remote_pwd = vpx_pwd
        if remote_ip.count("EXAMPLE") or remote_pwd.count("EXAMPLE"):
            raise error.TestNAError("Please provide host or password for "
                                    "ESX test.")
        logging.info("Building ESX no password interactive verification.")
        fp = open(vpx_pwd_file, 'w')
        fp.write(vpx_pwd)
        fp.close()

    if hypervisor == "xen":               # or hypervisor == 'kvm':
        remote_ip = xen_ip
        remote_pwd = xen_pwd
        if remote_ip.count("EXAMPLE") or remote_pwd.count("EXAMPLE"):
            raise error.TestNAError("Please provide host or password for "
                                    "xen test.")
        ssh_key.setup_ssh_key(remote_ip, user=username, port=22,
                              password=remote_pwd)
        # Note that password-interactive and Kerberos access are not supported.
        # You have to set up ssh access using ssh-agent and authorized_keys.
        try:
            utils_misc.add_identities_into_ssh_agent()
        except:
            utils.run("ssh-agent -k")
            raise error.TestFail("Failed to start 'ssh-agent'")

    # Create remote uri for remote host
    # Remote virt-v2v uri's instance
    ruri = utils_v2v.Uri(hypervisor)
    remote_uri = ruri.get_uri(remote_ip, vpx_dc, esx_ip)
    logging.debug("The current virsh uri: %s", remote_uri)

    # Check remote vms
    rvirsh_dargs = {'uri': remote_uri, 'remote_ip': remote_ip,
                    'remote_user': username, 'remote_pwd': remote_pwd}
    rvirsh = virsh.VirshPersistent(**rvirsh_dargs)
    if not rvirsh.domain_exists(vm_name):
        rvirsh.close_session()
        raise error.TestFail("Couldn't find vm '%s' to be converted "
                             "on remote uri '%s'." % (vm_name, remote_uri))

    remote_vm = libvirt_vm.VM(vm_name, params, test.bindir,
                              env.get("address_cache"))
    remote_vm.connect_uri = remote_uri
    # Remote storage pool's instance
    rsp = libvirt_storage.StoragePool(rvirsh)

    # Prepare local libvirt storage pool
    pvt = utlv.PoolVolumeTest(test, params)

    # Local storage pool's instance
    lsp = libvirt_storage.StoragePool()

    # Maintain a single params for v2v to avoid duplicate parameters
    v2v_params = {"hostname": remote_ip, "username": username,
                  "password": remote_pwd, "hypervisor": hypervisor,
                  "storage": pool_name, "network": network,
                  "bridge": bridge, "target": "libvirt",
                  "main_vm": vm_name, "input": input, "files": files}
    if vpx_dc:
        v2v_params.update({"vpx_dc": vpx_dc})

    if esx_ip:
        v2v_params.update({"esx_ip": esx_ip})

    if v2v_opts:
        v2v_params.update({"v2v_opts": v2v_opts})

    try:
        # Create storage pool for test
        pvt.pre_pool(pool_name, pool_type, target_path, emulated_img,
                     image_size=emulated_size)
        logging.debug(lsp.pool_info(pool_name))

        ret = utils_v2v.v2v_cmd(v2v_params)
        logging.debug("virt-v2 verbose messages:\n%s", ret)
        if ret.exit_status != 0:
            if "Input/output error" in ret.stderr:
                raise error.TestFail("Encounter BZ#1146007")
            raise error.TestFail("Convert VM failed")

        # Update parameters for local hypervisor and vm
        logging.debug("XML info:\n%s", virsh.dumpxml(vm_name))
        params['main_vm'] = vm_name
        params['target'] = "libvirt"
        # Start VM
        vm = env.create_vm("libvirt", "libvirt", vm_name, params, test.bindir)
        vm.start()
    finally:
        if hypervisor != "esx":
            if hypervisor == "xen":
                utils.run("ssh-agent -k")
            rsp.delete_pool(pool_name)
        if rvirsh:
            rvirsh.close_session()
Пример #32
0
def run_convert_remote_vm(test, params, env):
    """
    Convert a remote vm to local libvirt(KVM).
    """
    # VM info
    vm_name = params.get("v2v_vm")

    # Remote host parameters
    remote_hostname = params.get("remote_hostname")
    username = params.get("username", "root")
    password = params.get("password")
    remote_hypervisor = params.get("remote_hypervisor")

    # Local pool parameters
    pool_type = params.get("pool_type", "dir")
    pool_name = params.get("pool_name", "v2v_test")
    target_path = params.get("target_path", "pool_path")
    block_device = params.get("block_device")
    vg_name = params.get("volume_group_name", "vg_v2v")
    # If target_path is not an abs path, join it to test.tmpdir
    if os.path.dirname(target_path) is "":
        target_path = os.path.join(test.tmpdir, target_path)

    # dir pool need an exist path
    if pool_type == "dir":
        if not os.path.exists(target_path):
            os.mkdir(target_path)

    # V2V parameters
    input = params.get("input_method")
    files = params.get("config_files")
    network = params.get("network", "default")

    # Result check about
    ignore_virtio = "yes" == params.get("ignore_virtio", "no")

    # Create remote uri for remote host
    # Remote virt-v2v uri's instance
    ruri = utils_v2v.Uri(remote_hypervisor)
    remote_uri = ruri.get_uri(remote_hostname)

    ssh_key.setup_ssh_key(remote_hostname,
                          user=username,
                          port=22,
                          password=password)

    # Check remote vms
    remote_vm = libvirt_vm.VM(vm_name, params, test.bindir,
                              env.get("address_cache"))
    remote_vm.connect_uri = remote_uri
    if not remote_vm.exists():
        raise error.TestFail("Couldn't find vm '%s' to be converted "
                             "on remote uri '%s'." % (vm_name, remote_uri))

    # Remote storage pool's instance
    rsp = libvirt_storage.StoragePool(remote_uri)
    # Put remote vm's disk into a directory storage pool
    prepare_remote_sp(rsp, remote_vm, pool_name)

    # Local storage pool's instance
    lsp = libvirt_storage.StoragePool()
    try:
        # Create storage pool for test
        if pool_type == "dir":
            if not create_dir_pool(lsp, pool_name, target_path):
                raise error.TestFail("Prepare directory storage pool for "
                                     "virt-v2v failed.")
        elif pool_type == "partition":
            if not create_partition_pool(lsp, pool_name, block_device,
                                         target_path):
                raise error.TestFail("Prepare partition storage pool for "
                                     "virt-v2v failed.")
        elif pool_type == "lvm":
            if not create_lvm_pool(lsp, pool_name, block_device, vg_name,
                                   target_path):
                raise error.TestFail("Preapre lvm storage pool for "
                                     "virt-v2v failed.")

        # Maintain a single params for v2v to avoid duplicate parameters
        v2v_params = {
            "hostname": remote_hostname,
            "username": username,
            "password": password,
            "hypervisor": remote_hypervisor,
            "storage": pool_name,
            "network": network,
            "target": "libvirt",
            "vms": vm_name,
            "input": input,
            "files": files
        }
        try:
            result = utils_v2v.v2v_cmd(v2v_params)
        except error.CmdError:
            raise error.TestFail("Virt v2v failed.")

        # v2v may be successful, but devices' driver may be not virtio
        error_info = []
        # Check v2v vm on local host
        # Update parameters for local hypervisor and vm
        params['vms'] = vm_name
        params['target'] = "libvirt"
        vm_check = utils_v2v.LinuxVMCheck(test, params, env)
        if not vm_check.is_disk_virtio():
            error_info.append("Error:disk type was not converted to virtio.")
        if not vm_check.is_net_virtio():
            error_info.append("Error:nic type was not converted to virtio.")

        # Close vm for cleanup
        if vm_check.vm is not None and vm_check.vm.is_alive():
            vm_check.vm.destroy()

        if not ignore_virtio and len(error_info):
            raise error.TestFail(error_info)
    finally:
        cleanup_vm(vm_name)
        lsp.delete_pool(pool_name)
        rsp.delete_pool(pool_name)
def run(test, params, env):
    """
    Convert a local vm disk to local libvirt(KVM).
    """
    # VM info
    vm_name = params.get("v2v_vm")

    # Remote host parameters
    remote_hostname = params.get("remote_hostname")
    username = params.get("remote_username", "root")
    password = params.get("remote_passwd")
    remote_hypervisor = params.get("remote_hypervisor")

    # Local pool parameters
    pool_type = params.get("pool_type", "dir")
    block_device = params.get("block_device", "/dev/BLOCK/EXAMPLE")
    if pool_type in ['disk', 'partition', 'lvm'] and \
            re.search("EXAMPLE", block_device):
        raise error.TestNAError("Please set correct block device.")
    pool_name = params.get("pool_name", "v2v_test")
    target_path = params.get("target_path", "pool_path")
    vg_name = params.get("volume_group_name", "vg_v2v")
    local_tmp_path = params.get("local_tmp_path", data_dir.get_tmp_dir())
    # If target_path is not an abs path, join it to data_dir.TMPDIR
    if os.path.dirname(target_path) is "":
        target_path = os.path.join(data_dir.get_tmp_dir(), target_path)

    # dir pool need an exist path
    if pool_type == "dir":
        if not os.path.exists(target_path):
            os.mkdir(target_path)

    # V2V parameters
    input = params.get("input_method")
    files = params.get("config_files")
    network = params.get("network", "default")

    # Result check about
    ignore_virtio = "yes" == params.get("ignore_virtio", "no")

    # Create remote uri for remote host
    # Remote virt-v2v uri's instance
    ruri = utils_v2v.Uri(remote_hypervisor)
    remote_uri = ruri.get_uri(remote_hostname)

    ssh_key.setup_ssh_key(remote_hostname, user=username, port=22,
                          password=password)

    # Check remote vms
    remote_vm = libvirt_vm.VM(vm_name, params, test.bindir,
                              env.get("address_cache"))
    remote_vm.connect_uri = remote_uri
    if not remote_vm.exists():
        raise error.TestFail("Couldn't find vm '%s' to be converted "
                             "on remote uri '%s'." % (vm_name, remote_uri))

    # Copy remote vm's disk to local and create xml file for it
    tmp_xml_file = copy_remote_vm(remote_vm, local_tmp_path, remote_hostname,
                                  username, password)

    # Local storage pool's instance
    lsp = libvirt_storage.StoragePool()
    try:
        # Create storage pool for test
        if pool_type == "dir":
            if not create_dir_pool(lsp, pool_name, target_path):
                raise error.TestFail("Prepare directory storage pool for "
                                     "virt-v2v failed.")
        elif pool_type == "partition":
            if not create_partition_pool(lsp, pool_name, block_device,
                                         target_path):
                raise error.TestFail("Prepare partition storage pool for "
                                     "virt-v2v failed.")
        elif pool_type == "lvm":
            if not create_lvm_pool(lsp, pool_name, block_device, vg_name,
                                   target_path):
                raise error.TestFail("Prepare lvm storage pool for "
                                     "virt-v2v failed.")
        elif pool_type == "disk":
            if not create_disk_pool(lsp, pool_name, block_device, target_path):
                raise error.TestFail("Prepare disk storage pool for "
                                     "virt-v2v failed.")

        # Maintain a single params for v2v to avoid duplicate parameters
        v2v_params = {"hostname": remote_hostname, "username": username,
                      "password": password, "hypervisor": remote_hypervisor,
                      "storage": pool_name, "network": network,
                      "target": "libvirtxml", "vms": tmp_xml_file,
                      "input": input, "files": files}
        try:
            result = utils_v2v.v2v_cmd(v2v_params)
            logging.debug(result)
        except error.CmdError, detail:
            raise error.TestFail("Virt v2v failed:\n%s" % str(detail))

        # v2v may be successful, but devices' driver may be not virtio
        error_info = []
        # Check v2v vm on local host
        # Update parameters for local hypervisor and vm
        params['vms'] = vm_name
        params['target'] = "libvirt"
        vm_check = utils_v2v.LinuxVMCheck(test, params, env)
        try:
            if not vm_check.is_disk_virtio():
                error_info.append("Error:disk type was not converted to "
                                  "virtio.")
            if not vm_check.is_net_virtio():
                error_info.append("Error:nic type was not converted to "
                                  "virtio.")
        except (remote.LoginError, virt_vm.VMError), detail:
            error_info.append(str(detail))
Пример #34
0
def run(test, params, env):
    """
    Test migration with option --copy-storage-all or --copy-storage-inc.
    """
    vm = env.get_vm(params.get("main_vm"))
    disk_type = params.get("copy_storage_type", "file")
    if disk_type == "file":
        params['added_disk_type'] = "file"
    else:
        params['added_disk_type'] = "block"
    primary_target = vm.get_first_disk_devices()["target"]
    file_path, file_size = vm.get_device_size(primary_target)
    # Convert to Gib
    file_size = int(file_size) / 1073741824

    remote_host = params.get("remote_ip", "REMOTE.EXAMPLE")
    local_host = params.get("local_ip", "LOCAL.EXAMPLE")
    remote_user = params.get("remote_user", "root")
    remote_passwd = params.get("remote_pwd", "PASSWORD.EXAMPLE")
    if remote_host.count("EXAMPLE") or local_host.count("EXAMPLE"):
        raise error.TestNAError("Config remote or local host first.")
    # Config ssh autologin for it
    ssh_key.setup_ssh_key(remote_host, remote_user, remote_passwd, port=22)

    # Attach additional disks to vm if disk count big than 1
    disks_count = int(params.get("added_disks_count", 1)) - 1
    if disks_count:
        new_vm_name = "%s_smtest" % vm.name
        if vm.is_alive():
            vm.destroy()
        utlv.define_new_vm(vm.name, new_vm_name)
        vm = libvirt_vm.VM(new_vm_name, vm.params, vm.root_dir,
                           vm.address_cache)
    vms = [vm]
    if vm.is_dead():
        vm.start()

    # Abnormal parameters
    migrate_again = "yes" == params.get("migrate_again", "no")
    abnormal_type = params.get("abnormal_type")

    try:
        rdm = utils_test.RemoteDiskManager(params)
        vgname = params.get("sm_vg_name", "SMTEST")
        added_disks_list = []
        if disk_type == "lvm":
            target1 = target2 = ""  # For cleanup
            # Create volume group with iscsi
            # For local, target is a device name
            target1 = utlv.setup_or_cleanup_iscsi(is_setup=True, is_login=True,
                                                  emulated_image="emulated_iscsi1")
            lv_utils.vg_create(vgname, target1)
            logging.debug("Created VG %s", vgname)
            # For remote, target is real target name
            target2 = utlv.setup_or_cleanup_iscsi(is_setup=True, is_login=False,
                                                  emulated_image="emulated_iscsi2")
            logging.debug("Created target: %s", target2)
            # Login on remote host
            remote_device = rdm.iscsi_login_setup(local_host, target2)
            if not rdm.create_vg(vgname, remote_device):
                raise error.TestError("Create VG %s on %s failed."
                                      % (vgname, remote_host))

        all_disks = utlv.attach_disks(vm, file_path, vgname, params)
        # Reserve for cleanup
        added_disks_list = all_disks.keys()
        all_disks[file_path] = file_size
        logging.debug("All disks need to be migrated:%s", all_disks)

        if abnormal_type == "occupied_disk":
            occupied_path = rdm.occupy_space(disk_type, file_size,
                                             file_path, vgname, timeout=600)
        if not abnormal_type == "not_exist_file":
            for disk, size in all_disks.items():
                if disk == file_path:
                    rdm.create_image("file", disk, size, None, None)
                else:
                    rdm.create_image(disk_type, disk, size, vgname,
                                     os.path.basename(disk))

        fail_flag = False
        try:
            logging.debug("Start migration...")
            copied_migration(vms, params)
            if migrate_again:
                fail_flag = True
                raise error.TestFail("Migration succeed, but not expected!")
            else:
                return
        except error.TestFail:
            if not migrate_again:
                raise

            if abnormal_type == "occupied_disk":
                rdm.remove_path(disk_type, occupied_path)
            elif abnormal_type == "not_exist_file":
                for disk, size in all_disks.items():
                    if disk == file_path:
                        rdm.create_image("file", disk, size, None, None)
                    else:
                        rdm.create_image(disk_type, disk, size, vgname,
                                         os.path.basename(disk))
            elif abnormal_type == "migration_interupted":
                params["thread_timeout"] = 120
            # Raise after cleanup
            if fail_flag:
                raise

            # Migrate it again to confirm failed reason
            copied_migration(vms, params)
    finally:
        # Recover created vm
        if vm.is_alive():
            vm.destroy()
        if disks_count and vm.name == new_vm_name:
            vm.undefine()
        for disk in added_disks_list:
            utlv.delete_local_disk(disk_type, disk)
            rdm.remove_path(disk_type, disk)
        rdm.remove_path("file", file_path)
        if disk_type == "lvm":
            rdm.remove_vg(vgname)
            rdm.iscsi_login_setup(local_host, target2, is_login=False)
            try:
                lv_utils.vg_remove(vgname)
            except:
                pass    # let it go to confirm cleanup iscsi device
            utlv.setup_or_cleanup_iscsi(is_setup=False,
                                        emulated_image="emulated_iscsi1")
            utlv.setup_or_cleanup_iscsi(is_setup=False,
                                        emulated_image="emulated_iscsi2")
def run(test, params, env):
    """
    Convert a remote vm to local libvirt(KVM).
    """
    vm_name = params.get("main_vm")
    # Remote host parameters
    username = params.get("username", "root")
    xen_ip = params.get("xen_ip", "XEN.EXAMPLE")
    xen_pwd = params.get("xen_pwd", "PWD.EXAMPLE")
    vpx_ip = params.get("vpx_ip", "ESX.EXAMPLE")
    vpx_pwd = params.get("vpx_pwd", "PWD.EXAMPLE")
    vpx_pwd_file = params.get("vpx_passwd_file")
    vpx_dc = params.get("vpx_dc", "VPX.DC.EXAMPLE")
    esx_ip = params.get("esx_ip", "ESX.EXAMPLE")
    for param in [vm_name, xen_ip, xen_pwd, vpx_ip, vpx_pwd, vpx_dc, esx_ip]:
        if "EXAMPLE" in param:
            raise error.TestNAError("Please replace %s with real value" %
                                    param)

    hypervisor = params.get("hypervisor")
    pool_type = params.get("pool_type", "dir")
    pool_name = params.get("pool_name", "v2v_test")
    target_path = params.get("target_path", "pool_path")
    emulated_img = params.get("emulated_image_path", "v2v-emulated-img")
    emulated_size = params.get("emulated_image_size", "10G")

    # If target_path is not an abs path, join it to data_dir.tmpdir
    if not os.path.dirname(target_path):
        target_path = os.path.join(data_dir.get_tmp_dir(), target_path)

    # V2V parameters
    input_mode = params.get("input_mode")
    network = params.get("network")
    bridge = params.get("bridge")

    # Set libguestfs environment
    os.environ['LIBGUESTFS_BACKEND'] = 'direct'

    # Extra v2v command options, default to None
    v2v_opts = params.get("v2v_opts")

    if hypervisor == "esx":
        # Create password file to access ESX hypervisor
        remote_ip = vpx_ip
        remote_pwd = vpx_pwd
        fp = open(vpx_pwd_file, 'w')
        fp.write(vpx_pwd)
        fp.close()

    if hypervisor == "xen":
        # Set up ssh access using ssh-agent and authorized_keys
        remote_ip = xen_ip
        remote_pwd = xen_pwd
        ssh_key.setup_ssh_key(remote_ip,
                              user=username,
                              port=22,
                              password=remote_pwd)
        try:
            utils_misc.add_identities_into_ssh_agent()
        except:
            utils.run("ssh-agent -k")
            raise error.TestError("Failed to start 'ssh-agent'")

    # Create remote uri for remote host
    ruri = utils_v2v.Uri(hypervisor)
    remote_uri = ruri.get_uri(remote_ip, vpx_dc, esx_ip)
    logging.debug("The current virsh uri: %s", remote_uri)

    # Check remote vms
    rvirsh_dargs = {
        'uri': remote_uri,
        'remote_ip': remote_ip,
        'remote_user': username,
        'remote_pwd': remote_pwd
    }
    rvirsh = virsh.VirshPersistent(**rvirsh_dargs)
    if not rvirsh.domain_exists(vm_name):
        rvirsh.close_session()
        raise error.TestError("Couldn't find vm '%s' to convert on "
                              "remote uri '%s'." % (vm_name, remote_uri))
    remote_vm = libvirt_vm.VM(vm_name, params, test.bindir,
                              env.get("address_cache"))
    remote_vm.connect_uri = remote_uri

    # Prepare local libvirt storage pool
    rsp = libvirt_storage.StoragePool(rvirsh)
    pvt = utlv.PoolVolumeTest(test, params)
    lsp = libvirt_storage.StoragePool()

    # Maintain a single params for v2v to avoid duplicate parameters
    v2v_params = {
        "hostname": remote_ip,
        "username": username,
        "password": remote_pwd,
        "hypervisor": hypervisor,
        "storage": pool_name,
        "network": network,
        "bridge": bridge,
        "target": "libvirt",
        "main_vm": vm_name,
        "input_mode": input_mode
    }
    if vpx_dc:
        v2v_params.update({"vpx_dc": vpx_dc})
    if esx_ip:
        v2v_params.update({"esx_ip": esx_ip})
    if v2v_opts:
        v2v_params.update({"v2v_opts": v2v_opts})

    try:
        # Create storage pool for test
        pvt.pre_pool(pool_name,
                     pool_type,
                     target_path,
                     emulated_img,
                     image_size=emulated_size)
        logging.debug(lsp.pool_info(pool_name))
        ret = utils_v2v.v2v_cmd(v2v_params)
        logging.debug("virt-v2v verbose messages:\n%s", ret)
        if ret.exit_status != 0:
            if "Input/output error" in ret.stderr:
                raise error.TestFail("Encounter BZ#1146007")
            raise error.TestFail("Convert VM failed")

        # Update parameters for local hypervisor and vm, then start vm
        logging.debug("XML info:\n%s", virsh.dumpxml(vm_name))
        params['main_vm'] = vm_name
        params['target'] = "libvirt"
        vm = env.create_vm("libvirt", "libvirt", vm_name, params, test.bindir)
        vm.start()
    finally:
        if hypervisor == "esx":
            utils.run("rm -rf %s" % vpx_pwd_file)
        else:
            if hypervisor == "xen":
                utils.run("ssh-agent -k")
            rsp.delete_pool(pool_name)
        if rvirsh:
            rvirsh.close_session()
Пример #36
0
def run(test, params, env):
    """
    Test of virt-edit.

    1) Get and init parameters for test.
    2) Prepare environment.
    3) Run virt-edit command and get result.
    5) Recover environment.
    6) Check result.
    """

    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    remote_host = params.get("virt_edit_remote_host", "HOST.EXAMPLE")
    remote_user = params.get("virt_edit_remote_user", "root")
    remote_passwd = params.get("virt_edit_remote_passwd", "PASSWD.EXAMPLE")
    connect_uri = params.get("virt_edit_connect_uri")
    if connect_uri is not None:
        uri = "qemu+ssh://%s@%s/system" % (remote_user, remote_host)
        if uri.count("EXAMPLE"):
            raise error.TestNAError("Please config host and passwd first.")
        # Config ssh autologin for it
        ssh_key.setup_ssh_key(remote_host, remote_user, remote_passwd, port=22)
    else:
        uri = libvirt_vm.normalize_connect_uri(
            params.get("connect_uri", "default"))
    start_vm = params.get("start_vm", "no")
    vm_ref = params.get("virt_edit_vm_ref", vm_name)
    file_ref = params.get("virt_edit_file_ref", "/etc/hosts")
    created_img = params.get("virt_edit_created_img", "/tmp/foo.img")
    foo_line = params.get("foo_line", "")
    options = params.get("virt_edit_options")
    options_suffix = params.get("virt_edit_options_suffix")
    status_error = params.get("status_error", "no")
    backup_extension = params.get("virt_edit_backup_extension")
    test_format = params.get("virt_edit_format")

    # virt-edit should not be used when vm is running.
    # (for normal test)
    if vm.is_alive() and start_vm == "no":
        vm.destroy(gracefully=True)

    dom_disk_dict = vm.get_disk_devices()  # TODO
    dom_uuid = vm.get_uuid()
    # Disk format: raw or qcow2
    disk_format = None
    # If object is a disk file path
    is_disk = False

    if vm_ref == "domdisk":
        if len(dom_disk_dict) != 1:
            raise error.TestError("Only one disk device should exist on "
                                  "%s:\n%s." % (vm_name, dom_disk_dict))
        disk_detail = dom_disk_dict.values()[0]
        vm_ref = disk_detail['source']
        logging.info("disk to be edit:%s", vm_ref)
        if test_format:
            # Get format:raw or qcow2
            info = utils.run("qemu-img info %s" % vm_ref).stdout
            for line in info.splitlines():
                comps = line.split(':')
                if comps[0].count("format"):
                    disk_format = comps[-1].strip()
                    break
            if disk_format is None:
                raise error.TestError("Cannot get disk format:%s" % info)
        is_disk = True
    elif vm_ref == "domname":
        vm_ref = vm_name
    elif vm_ref == "domuuid":
        vm_ref = dom_uuid
    elif vm_ref == "createdimg":
        vm_ref = created_img
        utils.run("dd if=/dev/zero of=%s bs=256M count=1" % created_img)
        is_disk = True

    # Decide whether pass a exprt for virt-edit command.
    if foo_line != "":
        expr = "s/$/%s/" % foo_line
    else:
        expr = ""

    if backup_extension is not None:
        if options is None:
            options = ""
        options += " -b %s" % backup_extension

    # Stop libvirtd if test need.
    libvirtd = params.get("libvirtd", "on")
    if libvirtd == "off":
        utils_libvirtd.libvirtd_stop()

    # Run test
    result = lgf.virt_edit_cmd(vm_ref,
                               file_ref,
                               is_disk=is_disk,
                               disk_format=disk_format,
                               options=options,
                               extra=options_suffix,
                               expr=expr,
                               connect_uri=uri,
                               debug=True)
    status = result.exit_status

    # Recover libvirtd.
    if libvirtd == "off":
        utils_libvirtd.libvirtd_start()

    utils.run("rm -f %s" % created_img)

    # Remove backup file in vm if it exists
    if backup_extension is not None:
        backup_file = file_ref + backup_extension
        cleanup_file_in_vm(vm, backup_file)

    status_error = (status_error == "yes")
    if status != 0:
        if not status_error:
            raise error.TestFail("Command executed failed.")
    else:
        if (expr != ""
                and (not login_to_check_foo_line(vm, file_ref, foo_line))):
            raise error.TestFail("Virt-edit to add %s in %s failed."
                                 "Test failed." % (foo_line, file_ref))
Пример #37
0
def run(test, params, env):
    """
    Test command: virsh domjobabort.

    The command can abort the currently running domain job.
    1.Prepare test environment,destroy or suspend a VM.
    2.Do action to get a subprocess(dump, save, managedsave).
    3.Perform virsh domjobabort operation to abort VM's job.
    4.Recover the VM's status and wait for the subprocess over.
    5.Confirm the test result.
    """

    vm_name = params.get("main_vm", "vm1")
    vm = env.get_vm(vm_name)
    start_vm = params.get("start_vm")
    pre_vm_state = params.get("pre_vm_state", "start")
    if start_vm == "no" and vm.is_alive():
        vm.destroy()

    # Instead of "paused_after_start_vm", use "pre_vm_state".
    # After start the VM, wait for some time to make sure the job
    # can be created on this domain.
    if start_vm == "yes":
        vm.wait_for_login()
        if params.get("pre_vm_state") == "suspend":
            vm.pause()

    domid = vm.get_id()
    domuuid = vm.get_uuid()
    original_speed = virsh.migrate_getspeed(vm_name).stdout.strip()

    def get_subprocess(action, vm_name, file, remote_uri=None):
        """
        Execute background virsh command, return subprocess w/o waiting for exit()

        :param cmd : virsh command.
        :param guest_name : VM's name
        :param file_source : virsh command's file option.
        """
        args = ""
        if action == "managedsave":
            file = ""
        elif action == "migrate":
            # Slow down migration for domjobabort
            virsh.migrate_setspeed(vm_name, "1")
            file = remote_uri
            args = "--unsafe"
        command = "virsh %s %s %s %s" % (action, vm_name, file, args)
        logging.debug("Action: %s", command)
        p = subprocess.Popen(command,
                             shell=True,
                             stdout=subprocess.PIPE,
                             stderr=subprocess.PIPE)
        return p

    action = params.get("jobabort_action", "dump")
    status_error = params.get("status_error", "no")
    job = params.get("jobabort_job", "yes")
    tmp_file = os.path.join(test.tmpdir, "domjobabort.tmp")
    tmp_pipe = os.path.join(test.tmpdir, "domjobabort.fifo")
    vm_ref = params.get("jobabort_vm_ref")
    remote_uri = params.get("jobabort_remote_uri")
    remote_host = params.get("migrate_dest_host")
    remote_user = params.get("migrate_dest_user", "root")
    remote_pwd = params.get("migrate_dest_pwd")
    saved_data = None

    if action == "managedsave":
        tmp_pipe = '/var/lib/libvirt/qemu/save/%s.save' % vm.name

    if action == "restore":
        virsh.save(vm_name, tmp_file, ignore_status=True)

    if action == "migrate":
        if remote_host.count("EXAMPLE"):
            raise error.TestNAError("Remote host should be configured "
                                    "for migrate.")
        else:
            # Config ssh autologin for remote host
            ssh_key.setup_ssh_key(remote_host,
                                  remote_user,
                                  remote_pwd,
                                  port=22)

    if vm_ref == "id":
        vm_ref = domid
    elif vm_ref == "hex_id":
        vm_ref = hex(int(domid))
    elif vm_ref == "uuid":
        vm_ref = domuuid
    elif vm_ref.find("invalid") != -1:
        vm_ref = params.get(vm_ref)
    elif vm_ref == "name":
        vm_ref = vm_name

    # Get the subprocess of VM.
    # The command's effect is to abort the currently running domain job.
    # So before do "domjobabort" action, we must create a job on the domain.
    process = None
    if job == "yes" and start_vm == "yes" and status_error == "no":
        if os.path.exists(tmp_pipe):
            os.unlink(tmp_pipe)
        os.mkfifo(tmp_pipe)

        process = get_subprocess(action, vm_name, tmp_pipe, remote_uri)

        saved_data = None
        if action == "restore":
            saved_data = file(tmp_file, 'r').read(10 * 1024 * 1024)
            f = open(tmp_pipe, 'w')
            f.write(saved_data[:1024 * 1024])
        elif action == "migrate":
            f = None
        else:
            f = open(tmp_pipe, 'r')
            dummy = f.read(1024 * 1024)

    # Give enough time for starting job
    t = 0
    while t < 5:
        jobtype = vm.get_job_type()
        if "None" == jobtype:
            t += 1
            time.sleep(1)
            continue
        elif jobtype is False:
            logging.error("Get job type failed.")
            break
        else:
            logging.debug("Job started: %s", jobtype)
            break
    ret = virsh.domjobabort(vm_ref, ignore_status=True, debug=True)
    status = ret.exit_status

    if process and f:
        if saved_data:
            f.write(saved_data[1024 * 1024:])
        else:
            dummy = f.read()
        f.close()

        if os.path.exists(tmp_pipe):
            os.unlink(tmp_pipe)
        if os.path.exists(tmp_file):
            os.unlink(tmp_file)

    # Recover the environment.
    if pre_vm_state == "suspend":
        vm.resume()
    if process:
        if process.poll():
            try:
                process.kill()
            except OSError:
                pass

    if action == "migrate":
        # Recover migration speed
        virsh.migrate_setspeed(vm_name, original_speed)
        utlv.MigrationTest().cleanup_dest_vm(vm, None, remote_uri)

    # check status_error
    if status_error == "yes":
        if status == 0:
            raise error.TestFail("Run successfully with wrong command!")
    elif status_error == "no":
        if status != 0:
            raise error.TestFail("Run failed with right command")
def run(test, params, env):
    """
    Test qemu-monitor-command blockjobs by migrating with option
    --copy-storage-all or --copy-storage-inc.
    """
    if not libvirt_version.version_compare(1, 0, 1):
        raise error.TestNAError("Blockjob functions - "
                                "complete,pause,resume are"
                                "not supported in current libvirt version.")

    vm = env.get_vm(params.get("main_vm"))
    cpu_size = int(params.get("cpu_size", "1"))
    memory_size = int(params.get("memory_size", "1048576"))
    primary_target = vm.get_first_disk_devices()["target"]
    file_path, file_size = vm.get_device_size(primary_target)
    # Convert to Gib
    file_size = int(file_size) / 1073741824
    image_format = utils_test.get_image_info(file_path)["format"]

    remote_host = params.get("migrate_dest_host", "REMOTE.EXAMPLE")
    remote_user = params.get("remote_user", "root")
    remote_passwd = params.get("migrate_dest_pwd", "PASSWORD.EXAMPLE")
    if remote_host.count("EXAMPLE"):
        raise error.TestNAError("Config remote or local host first.")
    # Config ssh autologin for it
    ssh_key.setup_ssh_key(remote_host, remote_user, remote_passwd, port=22)

    # Define a new vm with modified cpu/memory
    new_vm_name = "%s_blockjob" % vm.name
    if vm.is_alive():
        vm.destroy()
    utlv.define_new_vm(vm.name, new_vm_name)
    try:
        set_cpu_memory(new_vm_name, cpu_size, memory_size)
        vm = libvirt_vm.VM(new_vm_name, vm.params, vm.root_dir,
                           vm.address_cache)
    except:   # Make sure created vm is cleaned up
        virsh.remove_domain(new_vm_name)
        raise

    rdm_params = {"remote_ip": remote_host, "remote_user": remote_user,
                  "remote_pwd": remote_passwd}
    rdm = utils_test.RemoteDiskManager(rdm_params)

    try:
        vm = libvirt_vm.VM(new_vm_name, vm.params, vm.root_dir,
                           vm.address_cache)
        vm.start()

        rdm.create_image("file", file_path, file_size, None, None,
                         img_frmt=image_format)

        logging.debug("Start migration...")
        copied_migration(vm, params, params.get("qmp_blockjob_type"),
                         primary_target)
    finally:
        # Recover created vm
        if vm.is_alive():
            vm.destroy()
        if vm.name == new_vm_name:
            vm.undefine()
        rdm.remove_path("file", file_path)
        rdm.runner.session.close()
Пример #39
0
def run(test, params, env):
    """
    Test migration under stress.
    """
    vm_names = params.get("migration_vms").split()
    if len(vm_names) < 2:
        raise error.TestNAError("Provide enough vms for migration first.")

    src_uri = params.get("migrate_src_uri", "qemu+ssh://EXAMPLE/system")
    if src_uri.count('///') or src_uri.count('EXAMPLE'):
        raise error.TestNAError("The src_uri '%s' is invalid", src_uri)

    dest_uri = params.get("migrate_dest_uri", "qemu+ssh://EXAMPLE/system")
    if dest_uri.count('///') or dest_uri.count('EXAMPLE'):
        raise error.TestNAError("The dest_uri '%s' is invalid", dest_uri)

    # Migrated vms' instance
    vms = []
    for vm_name in vm_names:
        vms.append(libvirt_vm.VM(vm_name, params, test.bindir,
                                 env.get("address_cache")))

    load_vm_names = params.get("load_vms").split()
    # vms for load
    load_vms = []
    for vm_name in load_vm_names:
        load_vms.append(libvirt_vm.VM(vm_name, params, test.bindir,
                                      env.get("address_cache")))
    params['load_vms'] = load_vms

    cpu = int(params.get("smp", 1))
    memory = int(params.get("mem")) * 1024
    stress_type = params.get("migration_stress_type")
    vm_bytes = params.get("stress_vm_bytes")
    stress_args = params.get("stress_args")
    migration_type = params.get("migration_type")
    start_migration_vms = "yes" == params.get("start_migration_vms", "yes")
    thread_timeout = int(params.get("thread_timeout", 120))
    remote_host = params.get("migrate_dest_host")
    username = params.get("migrate_dest_user", "root")
    password = params.get("migrate_dest_pwd")
    prompt = params.get("shell_prompt", r"[\#\$]")

    # Set vm_bytes for start_cmd
    mem_total = utils_memory.memtotal()
    vm_reserved = len(vms) * memory
    if vm_bytes == "half":
        vm_bytes = (mem_total - vm_reserved) / 2
    elif vm_bytes == "shortage":
        vm_bytes = mem_total - vm_reserved + 524288
    if vm_bytes is not None:
        params["stress_args"] = stress_args % vm_bytes

    for vm in vms:
        # Keep vm dead for edit
        if vm.is_alive():
            vm.destroy()
        set_cpu_memory(vm.name, cpu, memory)

    try:
        vm_ipaddr = {}
        if start_migration_vms:
            for vm in vms:
                vm.start()
                vm.wait_for_login()
                vm_ipaddr[vm.name] = vm.get_address()
                # TODO: recover vm if start failed?
        # Config ssh autologin for remote host
        ssh_key.setup_ssh_key(remote_host, username, password, port=22)

        do_stress_migration(vms, src_uri, dest_uri, stress_type,
                            migration_type, params, thread_timeout)
        # Check network of vms on destination
        if start_migration_vms and migration_type != "cross":
            for vm in vms:
                check_dest_vm_network(vm, vm_ipaddr[vm.name], remote_host,
                                      username, password, prompt)
    finally:
        logging.debug("Cleanup vms...")
        for vm_name in vm_names:
            vm = libvirt_vm.VM(vm_name, params, test.bindir,
                               env.get("address_cache"))
            utlv.MigrationTest().cleanup_dest_vm(vm, None, dest_uri)
            if vm.is_alive():
                vm.destroy(gracefully=False)
        env.clean_objects()
    def verify_migration_speed(test, params, env):
        """
        Check if migration speed is effective with twice migration.
        """
        vms = env.get_all_vms()
        src_uri = params.get("migrate_src_uri", "qemu+ssh://EXAMPLE/system")
        dest_uri = params.get("migrate_dest_uri", "qemu+ssh://EXAMPLE/system")

        if src_uri.count('///') or src_uri.count('EXAMPLE'):
            raise error.TestNAError("The src_uri '%s' is invalid", src_uri)

        if dest_uri.count('///') or dest_uri.count('EXAMPLE'):
            raise error.TestNAError("The dest_uri '%s' is invalid", dest_uri)

        remote_host = params.get("migrate_dest_host")
        username = params.get("migrate_dest_user", "root")
        password = params.get("migrate_dest_pwd")
        # Config ssh autologin for remote host
        ssh_key.setup_ssh_key(remote_host, username, password, port=22)

        # Check migrated vms' state
        for vm in vms:
            if vm.is_dead():
                vm.start()

        load_vm_names = params.get("load_vms").split()
        # vms for load
        load_vms = []
        for vm_name in load_vm_names:
            load_vms.append(libvirt_vm.VM(vm_name, params, test.bindir,
                                          env.get("address_cache")))

        bandwidth = int(params.get("bandwidth", "4"))
        stress_type = params.get("stress_type", "load_vms_booting")
        migration_type = params.get("migration_type", "orderly")
        thread_timeout = int(params.get("thread_timeout", "60"))
        delta = float(params.get("allowed_delta", "0.1"))
        virsh_migrate_timeout = int(params.get("virsh_migrate_timeout", 60))
        # virsh migrate options
        virsh_migrate_options = "--live --timeout %s", virsh_migrate_timeout
        # Migrate vms to remote host
        mig_first = utlv.MigrationTest()
        virsh_dargs = {"debug": True}
        for vm in vms:
            set_get_speed(vm.name, bandwidth, virsh_dargs=virsh_dargs)
            vm.wait_for_login()
        utils_test.load_stress(stress_type, vms, params)
        mig_first.do_migration(vms, src_uri, dest_uri, migration_type,
                               options=virsh_migrate_options, thread_timeout=thread_timeout)
        for vm in vms:
            mig_first.cleanup_dest_vm(vm, None, dest_uri)
            # Keep it clean for second migration
            if vm.is_alive():
                vm.destroy()

        # Migrate vms again with new bandwidth
        second_bandwidth = params.get("second_bandwidth", "times")
        if second_bandwidth == "half":
            second_bandwidth = bandwidth / 2
            speed_times = 2
        elif second_bandwidth == "times":
            second_bandwidth = bandwidth * 2
            speed_times = 0.5
        elif second_bandwidth == "same":
            second_bandwidth = bandwidth
            speed_times = 1

        # Migrate again
        for vm in vms:
            if vm.is_dead():
                vm.start()
            vm.wait_for_login()
            set_get_speed(vm.name, second_bandwidth, virsh_dargs=virsh_dargs)
        utils_test.load_stress(stress_type, vms, params)
        mig_second = utlv.MigrationTest()
        mig_second.do_migration(vms, src_uri, dest_uri, migration_type,
                                options="--live", thread_timeout=thread_timeout)
        for vm in vms:
            mig_second.cleanup_dest_vm(vm, None, dest_uri)

        fail_info = []
        # Check whether migration failed
        if len(fail_info):
            raise error.TestFail(fail_info)

        for vm in vms:
            first_time = mig_first.mig_time[vm.name]
            second_time = mig_second.mig_time[vm.name]
            logging.debug("Migration time for %s:\n"
                          "Time with Bandwidth '%s' first: %s\n"
                          "Time with Bandwidth '%s' second: %s", vm.name,
                          bandwidth, first_time, second_bandwidth, second_time)
            shift = float(abs(first_time * speed_times - second_time)) / float(second_time)
            logging.debug("Shift:%s", shift)
            if delta < shift:
                fail_info.append("Spent time for migrating %s is intolerable." % vm.name)

        # Check again for speed result
        if len(fail_info):
            raise error.TestFail(fail_info)
Пример #41
0
def run(test, params, env):
    """
    Convert a remote vm to local libvirt(KVM).
    """
    # VM info
    xen_vm_name = params.get("v2v_xen_vm")
    vmware_vm_name = params.get("v2v_vmware_vm")

    # Remote host parameters
    xen_ip = params.get("remote_xen_ip", "XEN.EXAMPLE")
    vmware_ip = params.get("remote_vmware_ip", "VMWARE.EXAMPLE")
    username = params.get("username", "root")
    xen_pwd = params.get("remote_xen_pwd", "PWD.EXAMPLE")
    vmware_pwd = params.get("remote_vmware_pwd", "PWD.EXAMPLE")
    # To decide which type test it is
    remote_hypervisor = params.get("remote_hypervisor")

    # Local pool parameters
    pool_type = params.get("pool_type", "dir")
    pool_name = params.get("pool_name", "v2v_test")
    target_path = params.get("target_path", "pool_path")
    emulated_img = params.get("emulated_image_path", "v2v_emulated.img")
    emulated_size = params.get("emulated_image_size", "10G")

    # If target_path is not an abs path, join it to data_dir.tmpdir
    if os.path.dirname(target_path) is "":
        target_path = os.path.join(data_dir.get_tmp_dir(), target_path)

    # V2V parameters
    input = params.get("input_method")
    files = params.get("config_files")
    network = params.get("network")
    bridge = params.get("bridge")

    # Result check about
    ignore_virtio = "yes" == params.get("ignore_virtio", "no")

    # Create autologin to remote host
    esx_netrc = params.get("esx_netrc") % (vmware_ip, username, vmware_pwd)
    params['netrc'] = esx_netrc
    if remote_hypervisor == "esx":
        remote_ip = vmware_ip
        remote_pwd = vmware_pwd
        vm_name = vmware_vm_name
        if remote_ip.count("EXAMPLE") or remote_pwd.count("EXAMPLE"):
            raise error.TestNAError("Please provide host or password for "
                                    "vmware test.")
        utils_v2v.build_esx_no_verify(params)
    else:
        remote_ip = xen_ip
        remote_pwd = xen_pwd
        vm_name = xen_vm_name
        if remote_ip.count("EXAMPLE") or remote_pwd.count("EXAMPLE"):
            raise error.TestNAError("Please provide host or password for "
                                    "xen test.")
        ssh_key.setup_ssh_key(xen_ip, user=username, port=22,
                              password=xen_pwd)

    # Create remote uri for remote host
    # Remote virt-v2v uri's instance
    ruri = utils_v2v.Uri(remote_hypervisor)
    remote_uri = ruri.get_uri(remote_ip)

    # Check remote vms
    rvirsh_dargs = {'uri': remote_uri, 'remote_ip': remote_ip,
                    'remote_user': username, 'remote_pwd': remote_pwd}
    rvirsh = virsh.VirshPersistent(**rvirsh_dargs)
    if not rvirsh.domain_exists(vm_name):
        rvirsh.close_session()
        raise error.TestFail("Couldn't find vm '%s' to be converted "
                             "on remote uri '%s'." % (vm_name, remote_uri))

    if remote_hypervisor != "esx":
        remote_vm = libvirt_vm.VM(vm_name, params, test.bindir,
                                  env.get("address_cache"))
        remote_vm.connect_uri = remote_uri
        # Remote storage pool's instance
        rsp = libvirt_storage.StoragePool(rvirsh)
        # Put remote vm's disk into a directory storage pool
        prepare_remote_sp(rsp, remote_vm, pool_name)

    # Prepare local libvirt storage pool
    pvt = utlv.PoolVolumeTest(test, params)

    # Local storage pool's instance
    lsp = libvirt_storage.StoragePool()
    try:
        # Create storage pool for test
        pvt.pre_pool(pool_name, pool_type, target_path, emulated_img,
                     emulated_size)
        logging.debug(lsp.pool_info(pool_name))

        # Maintain a single params for v2v to avoid duplicate parameters
        v2v_params = {"hostname": remote_ip, "username": username,
                      "password": remote_pwd, "hypervisor": remote_hypervisor,
                      "storage": pool_name, "network": network,
                      "bridge": bridge, "target": "libvirt", "vms": vm_name,
                      "netrc": esx_netrc, "input": input, "files": files}
        try:
            result = utils_v2v.v2v_cmd(v2v_params)
            logging.debug(result)
        except error.CmdError, detail:
            raise error.TestFail("Virt v2v failed:\n%s" % str(detail))

        # v2v may be successful, but devices' driver may be not virtio
        error_info = []
        # Check v2v vm on local host
        # Update parameters for local hypervisor and vm
        logging.debug("XML info:\n%s", virsh.dumpxml(vm_name))
        params['vms'] = vm_name
        params['target'] = "libvirt"
        vm_check = utils_v2v.LinuxVMCheck(test, params, env)
        try:
            if not vm_check.is_disk_virtio():
                error_info.append("Error:disk type was not converted to "
                                  "virtio.")
            if not vm_check.is_net_virtio():
                error_info.append("Error:nic type was not converted to "
                                  "virtio.")
        except (remote.LoginError, virt_vm.VMError), detail:
            error_info.append(str(detail))