Пример #1
0
    def setup_test_default(case):
        """
        Default setup for test cases

        :param case: test case
        """
        libvirt_version.is_libvirt_feature_supported(params)
Пример #2
0
    def check_environment(params):
        """
        Check the test environment

        :param params: Dictionary with the test parameters
        """
        libvirt_version.is_libvirt_feature_supported(params)
        utils_misc.is_qemu_function_supported(params)
Пример #3
0
def run(test, params, env):
    """
    Run tests with disk target configurations
    """

    libvirt_version.is_libvirt_feature_supported(params)
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    at_dt = params.get('at_dt')
    cmds_in_guest = eval(params.get('cmds_in_guest'))
    target_rotation = params.get('target_rotation')

    backup_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)

    try:
        if not at_dt:
            libvirt.set_vm_disk(vm, params)

        if not vm.is_alive():
            vm.start()

        logging.debug(vm_xml.VMXML.new_from_dumpxml(vm_name))
        vm_session = vm.wait_for_login()

        pkg_list = params.get("install_pkgs")
        if pkg_list:
            install_pkg(test, eval(pkg_list), vm_session)

        if at_dt:
            old_parts = utils_disk.get_parts_list(vm_session)
            disk_xml = create_second_disk(params)
            virsh.attach_device(vm_name, disk_xml, debug=True, ignore_status=False)
            pat_in_dumpxml = params.get('pattern_in_dumpxml')
            libvirt_vmxml.check_guest_xml(vm_name, pat_in_dumpxml, status_error=False)
            time.sleep(10)
            added_parts = utils_disk.get_added_parts(vm_session, old_parts)
            if not added_parts or len(added_parts) != 1:
                test.error("Only one new partition is expected in the VM, "
                           "but found {}".format(added_parts))
            cmd = cmds_in_guest[0] % added_parts[0]
            run_cmd_in_guest(test, vm_session, cmd)
            virsh.detach_device(vm_name, disk_xml, debug=True, ignore_status=False)
            cmd = cmds_in_guest[1] % added_parts[0]
            run_cmd_in_guest(test, vm_session, cmd, any_error=True)
            libvirt_vmxml.check_guest_xml(vm_name, pat_in_dumpxml, status_error=True)
        else:
            if cmds_in_guest:
                for cmd_index in range(0, len(cmds_in_guest)):
                    any_error = False
                    if not target_rotation and cmd_index == 0:
                        any_error = True
                    run_cmd_in_guest(test, vm_session, cmds_in_guest[cmd_index], any_error=any_error)
    finally:
        backup_xml.sync()
        source_file = params.get('source_file')
        if source_file:
            libvirt.delete_local_disk('file', source_file)
def run(test, params, env):
    '''
    1. Check if the crypto device in host valiable for passthrough
    2. Passthrough the crypto device
    2. Create the mdev
    3. Confirm the mdev was created successfully
    4. Confirm device availability in guest
    5. Destroy the mdev
    6. Confirm the mdev was destroyed successfully

    NOTE: It can take a while after loading vfio_ap for the
          matrix device to become available due to current
          performance issues with the API if there are several
          mdev definitions already available. The test supposes
          no other mdev devices have been defined yet in order
          to avoid complexity in the test code.

    :param test: test object
    :param params: Dict with test parameters
    :param env: Dict with the test environment
    :return:
    '''

    libvirt_version.is_libvirt_feature_supported(params)
    matrix_cap = 'ap_matrix'
    device_file = None
    mask_helper = None

    info = CryptoDeviceInfoBuilder.get()
    if int(info.entries[0].hwtype) < HWTYPE:
        test.cancel("vfio-ap requires HWTYPE bigger than %s." % HWTYPE)
    uuid = str(uuid1())
    adapter = info.entries[0].card
    domain = info.entries[1].domain
    try:
        if not find_devices_by_cap(test, matrix_cap):
            load_vfio_ap()
        if find_devices_by_cap(test, matrix_cap):
            devices = [info.domains[0]]
            mask_helper = APMaskHelper.from_infos(devices)
            device_file = create_nodedev_from_xml(uuid, adapter, domain)
        else:
            raise test.fail("Could not get %s correctly through nodedev-API" %
                            matrix_cap)
        check_device_was_created(test, uuid, adapter, domain)
        # the test assumes there's no other mdev
        dev_name = find_devices_by_cap(test, 'mdev')[0]
        destroy_nodedev(dev_name)
        check_device_was_destroyed(test)
    finally:
        if mask_helper:
            mask_helper.return_to_host_all()
        unload_vfio_ap()
        if device_file:
            os.remove(device_file)
Пример #5
0
def run(test, params, env):
    """
    Test cases for --reset-nvram option
    """

    libvirt_version.is_libvirt_feature_supported(params)
    case = params.get('test_case', '')
    vm_name = params.get('main_vm', '')
    guest_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    bk_guest_xml = guest_xml.copy()
    run_test = eval('test_%s' % case)

    try:
        run_test(guest_xml, params, test)
    finally:
        bk_guest_xml.sync(options='--nvram')
        teardown_reset_nvram(params)
Пример #6
0
def run(test, params, env):
    '''
    1. Check if the crypto device in host valiable for passthrough
    2. Passthrough the crypto device
    2. Create the mdev
    3. Confirm the mdev was created successfully
    4. Confirm device availability in guest
    5. Destroy the mdev
    6. Confirm the mdev was destroyed successfully

    :param test: test object
    :param params: Dict with test parameters
    :param env: Dict with the test environment
    :return:
    '''

    libvirt_version.is_libvirt_feature_supported(params)
    matrix_cap = 'ap_matrix'
    device_file = None

    info = CryptoDeviceInfoBuilder.get()
    if int(info.entries[0].hwtype) < HWTYPE:
        test.cancel("vfio-ap requires HWTYPE bigger than %s." % HWTYPE)
    uuid = str(uuid1())
    adapter = info.entries[0].card
    domain = info.entries[1].domain
    try:
        if not find_devices_by_cap(test, matrix_cap):
            load_vfio_ap()
        if find_devices_by_cap(test, matrix_cap):
            devices = [info.domains[0]]
            APMaskHelper.from_infos(devices)
            device_file = create_nodedev_from_xml(uuid, adapter, domain)
        else:
            raise test.fail("Could not get %s correctly through nodedev-API" %
                            matrix_cap)
        check_device_was_created(test, uuid, adapter, domain)
        # the test assumes there's no other mdev
        dev_name = find_devices_by_cap(test, 'mdev')[0]
        destroy_nodedev(dev_name)
        check_device_was_destroyed(test)
    finally:
        unload_vfio_ap()
        if device_file:
            os.remove(device_file)
Пример #7
0
    def check_environment(vm, params):
        """
        Check the test environment

        :param vm: VM object
        :param params: Dictionary with the test parameters
        """
        libvirt_version.is_libvirt_feature_supported(params)
        utils_misc.is_qemu_function_supported(params)

        guest_required_kernel = params.get('guest_required_kernel')
        if guest_required_kernel:
            if not vm.is_alive():
                vm.start()
            vm_session = vm.wait_for_login()
            vm_kerv = vm_session.cmd_output('uname -r').strip().split('-')[0]
            vm_session.close()
            if vm_kerv not in VersionInterval(guest_required_kernel):
                test.cancel("Got guest kernel version:%s, which is not in %s" %
                            (vm_kerv, guest_required_kernel))

        if params.get("start_vm", "no") == "no":
            vm.destroy()
Пример #8
0
def run(test, params, env):
    """
    Do test for vol-download and vol-upload

    Basic steps are
    1. Create pool with type defined in cfg
    2. Create image with writing data in it
    3. Get md5 value before operation
    4. Do vol-download/upload with options(offset, length)
    5. Check md5 value after operation
    """

    pool_type = params.get("vol_download_upload_pool_type")
    pool_name = params.get("vol_download_upload_pool_name")
    pool_target = params.get("vol_download_upload_pool_target")
    if os.path.dirname(pool_target) is "":
        pool_target = os.path.join(data_dir.get_tmp_dir(), pool_target)
    vol_name = params.get("vol_download_upload_vol_name")
    file_name = params.get("vol_download_upload_file_name")
    file_path = os.path.join(data_dir.get_tmp_dir(), file_name)
    offset = params.get("vol_download_upload_offset")
    length = params.get("vol_download_upload_length")
    capacity = params.get("vol_download_upload_capacity")
    allocation = params.get("vol_download_upload_allocation")
    frmt = params.get("vol_download_upload_format")
    operation = params.get("vol_download_upload_operation")
    create_vol = ("yes" == params.get("vol_download_upload_create_vol", "yes"))
    setup_libvirt_polkit = "yes" == params.get("setup_libvirt_polkit")
    b_luks_encrypt = "luks" == params.get("encryption_method")
    encryption_password = params.get("encryption_password", "redhat")
    secret_uuids = []
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    virsh_dargs = {'debug': True, 'ignore_status': True}
    sparse_option_support = "yes" == params.get("sparse_option_support", "yes")
    with_clusterSize = "yes" == params.get("with_clusterSize")
    vol_clusterSize = params.get("vol_clusterSize", "64")
    vol_clusterSize_unit = params.get("vol_clusterSize_unit")
    vol_format = params.get("vol_format", "qcow2")
    libvirt_version.is_libvirt_feature_supported(params)

    # libvirt acl polkit related params
    uri = params.get("virsh_uri")
    if uri and not utils_split_daemons.is_modular_daemon():
        uri = "qemu:///system"
    unpri_user = params.get('unprivileged_user')
    if unpri_user:
        if unpri_user.count('EXAMPLE'):
            unpri_user = '******'

    if not libvirt_version.version_compare(1, 1, 1):
        if setup_libvirt_polkit:
            test.error("API acl test not supported in current"
                       " libvirt version.")
    # Destroy VM.
    if vm.is_alive():
        vm.destroy(gracefully=False)
    # Back up xml file.
    vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    try:
        pvt = utlv.PoolVolumeTest(test, params)
        pvt.pre_pool(pool_name, pool_type, pool_target, "volumetest",
                     pre_disk_vol=["50M"])
        # According to BZ#1138523, we need inpect the right name
        # (disk partition) for new volume
        if pool_type == "disk":
            vol_name = utlv.new_disk_vol_name(pool_name)
            if vol_name is None:
                test.error("Fail to generate volume name")
            # update polkit rule as the volume name changed
            if setup_libvirt_polkit:
                vol_pat = r"lookup\('vol_name'\) == ('\S+')"
                new_value = "lookup('vol_name') == '%s'" % vol_name
                utlv.update_polkit_rule(params, vol_pat, new_value)
        if create_vol:
            if b_luks_encrypt:
                if not libvirt_version.version_compare(2, 0, 0):
                    test.cancel("LUKS format not supported in "
                                "current libvirt version")
                params['sec_volume'] = os.path.join(pool_target, vol_name)
                luks_sec_uuid = utlv.create_secret(params)
                ret = virsh.secret_set_value(luks_sec_uuid,
                                             encryption_password,
                                             encode=True)
                utlv.check_exit_status(ret)
                secret_uuids.append(luks_sec_uuid)
                vol_arg = {}
                vol_arg['name'] = vol_name
                vol_arg['capacity'] = int(capacity)
                vol_arg['allocation'] = int(allocation)
                if with_clusterSize:
                    vol_arg['format'] = vol_format
                    vol_arg['clusterSize'] = int(vol_clusterSize)
                    vol_arg['clusterSize_unit'] = vol_clusterSize_unit
                create_luks_vol(pool_name, vol_name, luks_sec_uuid, vol_arg)
            else:
                pvt.pre_vol(vol_name, frmt, capacity, allocation, pool_name)

        virsh.pool_refresh(pool_name, debug=True)
        vol_list = virsh.vol_list(pool_name, debug=True).stdout.strip()
        # iscsi volume name is different from others
        if pool_type == "iscsi":
            # Due to BZ 1843791, the volume cannot be obtained sometimes.
            if len(vol_list.splitlines()) < 3:
                test.fail("Failed to get iscsi type volume.")
            vol_name = vol_list.split('\n')[2].split()[0]

        vol_path = virsh.vol_path(vol_name, pool_name,
                                  ignore_status=False).stdout.strip()
        logging.debug("vol_path is %s", vol_path)

        # Add command options
        if pool_type is not None:
            options = " --pool %s" % pool_name
        if offset is not None:
            options += " --offset %s" % offset
            offset = int(offset)
        else:
            offset = 0

        if length is not None:
            options += " --length %s" % length
            length = int(length)
        else:
            length = 0
        logging.debug("%s options are %s", operation, options)

        if operation == "upload":
            # write data to file
            write_file(file_path)

            # Set length for calculate the offset + length in the following
            # func get_pre_post_digest() and digest()
            if length == 0:
                length = 1048576

            def get_pre_post_digest():
                """
                Get pre region and post region digest if have offset and length
                :return: pre digest and post digest
                """
                # Get digest of pre region before offset
                if offset != 0:
                    digest_pre = digest(vol_path, 0, offset)
                else:
                    digest_pre = 0
                logging.debug("pre region digest read from %s 0-%s is %s",
                              vol_path, offset, digest_pre)
                # Get digest of post region after offset+length
                digest_post = digest(vol_path, offset + length, 0)
                logging.debug("post region digest read from %s %s-0 is %s",
                              vol_path, offset + length, digest_post)

                return (digest_pre, digest_post)

            # Get pre and post digest before operation for compare
            (ori_pre_digest, ori_post_digest) = get_pre_post_digest()
            ori_digest = digest(file_path, 0, 0)
            logging.debug("ori digest read from %s is %s", file_path,
                          ori_digest)

            if setup_libvirt_polkit:
                process.run("chmod 666 %s" % file_path, ignore_status=True,
                            shell=True)

            # Do volume upload
            result = virsh.vol_upload(vol_name, file_path, options,
                                      unprivileged_user=unpri_user,
                                      uri=uri, debug=True)
            if result.exit_status == 0:
                # Get digest after operation
                (aft_pre_digest, aft_post_digest) = get_pre_post_digest()
                aft_digest = digest(vol_path, offset, length)
                logging.debug("aft digest read from %s is %s", vol_path,
                              aft_digest)

                # Compare the pre and post part before and after
                if ori_pre_digest == aft_pre_digest and \
                   ori_post_digest == aft_post_digest:
                    logging.info("file pre and aft digest match")
                else:
                    test.fail("file pre or post digests do not"
                              "match, in %s", operation)

        if operation == "download":
            # Write data to volume
            write_file(vol_path)

            # Record the digest value before operation
            ori_digest = digest(vol_path, offset, length)
            logging.debug("original digest read from %s is %s", vol_path,
                          ori_digest)

            process.run("touch %s" % file_path, ignore_status=True, shell=True)
            if setup_libvirt_polkit:
                process.run("chmod 666 %s" % file_path, ignore_status=True,
                            shell=True)

            # Do volume download
            result = virsh.vol_download(vol_name, file_path, options,
                                        unprivileged_user=unpri_user,
                                        uri=uri, debug=True)
            if result.exit_status == 0:
                # Get digest after operation
                aft_digest = digest(file_path, 0, 0)
                logging.debug("new digest read from %s is %s", file_path,
                              aft_digest)

        if operation != "mix":
            if result.exit_status != 0:
                test.fail("Fail to %s volume: %s" %
                          (operation, result.stderr))
            # Compare the change part on volume and file
            if ori_digest == aft_digest:
                logging.info("file digests match, volume %s succeed", operation)
            else:
                test.fail("file digests do not match, volume %s failed"
                          % operation)

        if operation == "mix":
            target = params.get("virt_disk_device_target", "vdb")
            disk_file_path = os.path.join(pool_target, file_name)

            # Create one disk xml and attach it to VM.
            custom_disk_xml = create_disk('file', disk_file_path, 'raw', 'file',
                                          'disk', target, 'virtio')
            ret = virsh.attach_device(vm_name, custom_disk_xml.xml,
                                      flagstr="--config", debug=True)
            libvirt.check_exit_status(ret)
            if vm.is_dead():
                vm.start()

            # Write 100M data into disk.
            data_size = 100
            write_disk(test, vm, target, data_size)
            data_size_in_bytes = data_size * 1024 * 1024

            # Refresh directory pool.
            virsh.pool_refresh(pool_name, debug=True)

            # Download volume to local with sparse option.
            download_spare_file = "download-sparse.raw"
            download_file_path = os.path.join(data_dir.get_tmp_dir(), download_spare_file)
            options += " --sparse"
            result = virsh.vol_download(file_name, download_file_path, options,
                                        unprivileged_user=unpri_user,
                                        uri=uri, debug=True)
            libvirt.check_exit_status(result)

            #Check download image size.
            one_g_in_bytes = 1073741824
            download_img_info = utils_misc.get_image_info(download_file_path)
            download_disk_size = int(download_img_info['dsize'])
            if (download_disk_size < data_size_in_bytes or
               download_disk_size >= one_g_in_bytes):
                test.fail("download image size:%d is less than the generated "
                          "data size:%d or greater than or equal to 1G."
                          % (download_disk_size, data_size_in_bytes))

            # Create one upload sparse image file.
            upload_sparse_file = "upload-sparse.raw"
            upload_file_path = os.path.join(pool_target, upload_sparse_file)
            libvirt.create_local_disk('file', upload_file_path, '1', 'raw')

            # Refresh directory pool.
            virsh.pool_refresh(pool_name, debug=True)
            # Do volume upload, upload sparse file which download last time.
            result = virsh.vol_upload(upload_sparse_file, download_file_path, options,
                                      unprivileged_user=unpri_user,
                                      uri=uri, debug=True)
            upload_img_info = utils_misc.get_image_info(upload_file_path)
            upload_disk_size = int(upload_img_info['dsize'])
            if (upload_disk_size < data_size_in_bytes or
               upload_disk_size >= one_g_in_bytes):
                test.fail("upload image size:%d is less than the generated "
                          "data size:%d or greater than or equal to 1G."
                          % (upload_disk_size, data_size_in_bytes))
    finally:
        # Recover VM.
        if vm.is_alive():
            vm.destroy(gracefully=False)
        vmxml_backup.sync()
        pvt.cleanup_pool(pool_name, pool_type, pool_target, "volumetest")
        for secret_uuid in set(secret_uuids):
            virsh.secret_undefine(secret_uuid)
        if os.path.isfile(file_path):
            os.remove(file_path)
Пример #9
0
def run(test, params, env):
    """
    Test vm backingchain, blockcopy
    """
    vm_name = params.get('main_vm')
    vm = env.get_vm(vm_name)
    case = params.get('case', '')

    vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    bkxml = vmxml.copy()
    libvirt_version.is_libvirt_feature_supported(params)

    file_to_del = []
    tmp_dir = data_dir.get_data_dir()

    try:
        if case:
            if case == 'reuse_external':
                # Create a transient vm for test
                vm.undefine()
                virsh.create(vmxml.xml)

                all_disks = vmxml.get_disk_source(vm_name)
                if not all_disks:
                    test.error('Not found any disk file in vm.')
                image_file = all_disks[0].find('source').get('file')
                disk_dev = all_disks[0].find('target').get('dev')
                logging.debug('Image file of vm: %s', image_file)

                # Get image info
                image_info = utils_misc.get_image_info(image_file)
                logging.info('Image info: %s', image_info)

                # Get Virtual size of the image file
                vsize = image_info['vsize'] / 1073741824.0
                logging.info('Virtual size of image file: %f', vsize)

                new_image_size = vsize
                image_dir = '/'.join(image_file.split('/')[:-1])
                new_image_path = os.path.join(
                    image_dir,
                    'new_image_' + utils_misc.generate_random_string(3))
                file_to_del.append(new_image_path)

                # Create new image file
                cmd_image_create = 'qemu-img create -f qcow2 %s %fG' % (
                    new_image_path, new_image_size)
                process.run(cmd_image_create, shell=True, verbose=True)

                # Do blockcopy with --reuse-external option
                virsh.blockcopy(vm_name,
                                disk_dev,
                                new_image_path,
                                options='--verbose --wait --reuse-external',
                                debug=True,
                                ignore_status=False)
                virsh.blockjob(vm_name,
                               disk_dev,
                               options='--pivot',
                               debug=True,
                               ignore_status=False)
                logging.debug('Current vm xml: %s', vmxml)

                # Current disk source file should be new image
                cur_disks = vmxml.get_disk_source(vm_name)
                cur_sfile = cur_disks[0].find('source').get('file')
                logging.debug('Now disk source file is: %s', cur_sfile)
                if cur_sfile.strip() != new_image_path:
                    test.fail('Disk source file is not updated.')
            if case == 'custom_cluster_size':

                def update_vm_with_cluster_disk():
                    """
                    Update vm's first disk with a image which has customized
                    cluster size

                    :return: The source image params
                    """
                    source_img_params = params.copy()
                    source_img_params['image_name'] = params.get(
                        'source_image_name', 'source_image')
                    source_img = qemu_storage.QemuImg(source_img_params,
                                                      tmp_dir, '')
                    source_img_path, _ = source_img.create(source_img_params)
                    file_to_del.append(source_img_path)
                    source_img_params['disk_source_name'] = source_img_path
                    libvirt.set_vm_disk(vm, source_img_params)
                    return source_img_params

                source_img_params = update_vm_with_cluster_disk()
                all_disks = vmxml.get_disk_source(vm_name)
                if not all_disks:
                    test.error('Not found any disk file in vm.')
                disk_dev = all_disks[0].find('target').get('dev')

                # Blockcopy the source image to the target image path
                target_img_params = source_img_params.copy()
                target_img_name = params.get('target_image_name',
                                             'target_image')
                target_img_params['image_name'] = target_img_name
                target_img_path = os.path.join(tmp_dir,
                                               target_img_name + '.qcow2')
                file_to_del.append(target_img_path)
                virsh.blockcopy(vm_name,
                                disk_dev,
                                target_img_path,
                                options='--verbose --wait --transient-job',
                                debug=True,
                                ignore_status=False)
                target_img = qemu_storage.QemuImg(target_img_params, tmp_dir,
                                                  '')
                target_img_info = json.loads(
                    target_img.info(force_share=True, output='json'))

                # Compare the source and target images' cluster size
                source_img_cluster = str(
                    source_img_params.get('image_cluster_size'))
                target_img_cluster = str(target_img_info['cluster-size'])
                if source_img_cluster != target_img_cluster:
                    test.fail("Images have different cluster size:\n"
                              "Source image cluster size: %s\n"
                              "Target image cluster size: %s" %
                              (source_img_cluster, target_img_cluster))

                # Abort the blockcopy job
                virsh.blockjob(vm_name,
                               disk_dev,
                               options='--abort',
                               debug=True,
                               ignore_status=False)

    finally:
        if case == 'reuse_external':
            # Recover vm and remove the transient vm
            virsh.destroy(vm_name, debug=True)
            virsh.define(bkxml.xml, debug=True)
        bkxml.sync()

        # Remove files to be deleted
        if file_to_del:
            for item in file_to_del:
                if os.path.exists(item):
                    os.remove(item)
Пример #10
0
def run(test, params, env):
    """
    Test domain lifecycle
    """
    def setup_default():
        """
        Default setup
        """
        logging.debug("Remove VM's interface devices.")
        libvirt_vmxml.remove_vm_devices_by_type(vm, 'interface')

    def teardown_default():
        """
        Default cleanup
        """
        pass

    def setup_vdpa():
        """
        Setup vDPA environment
        """
        setup_default()
        test_env_obj = None
        if test_target == "simulator":
            test_env_obj = utils_vdpa.VDPASimulatorTest()
        else:
            pf_pci = utils_vdpa.get_vdpa_pci()
            test_env_obj = utils_vdpa.VDPAOvsTest(pf_pci)
        test_env_obj.setup()
        return test_env_obj

    def teardown_vdpa():
        """
        Cleanup vDPA environment
        """
        if test_target != "simulator":
            service.Factory.create_service("NetworkManager").restart()
        if test_obj:
            test_obj.cleanup()

    def run_test(dev_type, params, test_obj=None):
        """
        Test domain lifecycle

        1) Start the vm and check network
        2) Destroy and start the VM, and check network
        3) Save and restore, and check network
        4) Suspend and resume, and check network
        5) Reboot the VM and check the network
        """
        # Setup Iface device
        vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
        iface_dict = eval(params.get('iface_dict', '{}'))
        iface_dev = interface_base.create_iface(dev_type, iface_dict)
        libvirt.add_vm_device(vmxml, iface_dev)

        logging.info("Start a VM with a '%s' type interface.", dev_type)
        vm.start()
        vm.wait_for_serial_login(timeout=240).close()
        check_points.check_network_accessibility(vm,
                                                 test_obj=test_obj,
                                                 **params)

        logging.info("Destroy and start the VM.")
        virsh.destroy(vm.name, **VIRSH_ARGS)
        virsh.start(vm.name, **VIRSH_ARGS)
        check_points.check_network_accessibility(vm,
                                                 test_obj=test_obj,
                                                 config_vdpa=True,
                                                 **params)

        logging.info("Save the VM.")
        save_error = "yes" == params.get("save_error", "no")
        save_path = os.path.join(data_dir.get_tmp_dir(), vm.name + '.save')
        res = virsh.save(vm.name, 'sss', debug=True)
        libvirt.check_exit_status(res, expect_error=save_error)
        if not save_error:
            logging.info("Restore vm.")
            virsh.restore(save_path, **VIRSH_ARGS)
            check_points.check_network_accessibility(vm,
                                                     test_obj=test_obj,
                                                     config_vdpa=False,
                                                     **params)

        logging.info("Suspend and resume the vm.")
        virsh.suspend(vm.name, **VIRSH_ARGS)
        if not libvirt.check_vm_state(vm_name, "paused"):
            test.fail("VM should be paused!")
        virsh.resume(vm.name, **VIRSH_ARGS)
        if not libvirt.check_vm_state(vm_name, "running"):
            test.fail("VM should be running!")
        check_points.check_network_accessibility(vm,
                                                 test_obj=test_obj,
                                                 config_vdpa=False,
                                                 **params)

        logging.debug("Reboot VM and check network.")
        virsh.reboot(vm.name, **VIRSH_ARGS)
        check_points.check_network_accessibility(vm,
                                                 test_obj=test_obj,
                                                 config_vdpa=False,
                                                 **params)

    libvirt_version.is_libvirt_feature_supported(params)
    utils_misc.is_qemu_function_supported(params)

    # Variable assignment
    test_target = params.get('test_target', '')
    dev_type = params.get('dev_type', '')

    vm_name = params.get('main_vm')
    vm = env.get_vm(vm_name)

    vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
    backup_vmxml = vmxml.copy()

    setup_test = eval("setup_%s" % dev_type) if "setup_%s" % dev_type in \
        locals() else setup_default
    teardown_test = eval("teardown_%s" % dev_type) if "teardown_%s" % \
        dev_type in locals() else teardown_default

    test_obj = None
    try:
        # Execute test
        test_obj = setup_test()
        run_test(dev_type, params, test_obj=test_obj)

    finally:
        backup_vmxml.sync()
        teardown_test()
Пример #11
0
def run(test, params, env):
    """
    Test bridge support from network

    1) create a linux bridge and connect a physical interface to it
    2) define nwfilter with "vdsm-no-mac-spoofing"
    3) redefine the vm with the new create bridge and filter
    4) check if guest can get public ip after vm start
    5) check if guest and host can ping each other
    6) check if guest and host can ping outside
    7) start another vm connected to the same bridge
    8) check if the 2 guests can ping each other
    """

    def create_bridge_network(br_name, net_name, inbound="{'average':'0'}", outbound="{'average':'0'}"):
        """
        Define and start the bridge type network
        """
        # check if network with the same name already exists
        output_all = virsh.net_list("--all").stdout.strip()
        if re.search(net_name, output_all):
            test.cancel("Network with the same name already exists!")
        test_xml = network_xml.NetworkXML(network_name="%s" % net_name)
        test_xml.forward = {"mode": "bridge"}
        test_xml.bridge = {"name": br_name}
        test_xml.bandwidth_inbound = eval(inbound)
        test_xml.bandwidth_outbound = eval(outbound)
        logging.debug("The network's xml is %s", test_xml)
        test_xml.create()

    def define_nwfilter(filter_name):
        """
        Define nwfilter vdsm-no-mac-spoofing with content like:
        <filter name='vdsm-no-mac-spoofing' chain='root'>
            <filterref filter='no-mac-spoofing'/>
            <filterref filter='no-arp-mac-spoofing'/>
        </filter>

        :param filter_name: the name of nwfilter
        :return: filter created or raise exception
        """
        filter_uuid = params.get("filter_uuid", "11111111-b071-6127-b4ec-111111111111")
        filter_params = {"filter_name": "vdsm-no-mac-spoofing",
                         "filter_chain": "root",
                         "filter_uuid": filter_uuid,
                         "filterref_name_1": "no-mac-spoofing",
                         "filterref_name_2": "no-arp-mac-spoofing"}
        filter_xml = libvirt.create_nwfilter_xml(filter_params).xml
        # Run command
        result = virsh.nwfilter_define(filter_xml, ignore_status=True, debug=True)
        if result.exit_status:
            test.fail("Failed to define nwfilter with %s" % filter_xml)

    def ping(src_ip, dest_ip, ping_count, timeout, session=None):
        """
        Wrap of ping

        :param src_ip: source address
        :param dest_ip: destination address
        :param ping_count: count of icmp packet
        :param timeout: timeout for the ping command
        :param session: local execution or session to execute the ping command
        :return: ping succeed or raise exception
        """
        status, output = utils_net.ping(dest=dest_ip, count=ping_count,
                                        interface=src_ip, timeout=timeout,
                                        session=session, force_ipv4=True)
        if status:
            test.fail("Fail to ping %s from %s" % (dest_ip, src_ip))

    def check_net_functions(guest_ip, ping_count, ping_timeout, guest_session, host_ip, remote_url, endpoint_ip):
        # make sure host network works well
        # host ping remote url
        ping(host_ip, remote_url, ping_count, ping_timeout)
        # host ping guest
        ping(host_ip, guest_ip, ping_count, ping_timeout)
        # guest ping host
        ping(guest_ip, host_ip, ping_count, ping_timeout, session=guest_session)
        # guest ping remote url
        ping(guest_ip, remote_url, ping_count, ping_timeout, session=guest_session)
        # guest ping endpoint
        ping(guest_ip, endpoint_ip, ping_count, ping_timeout, session=guest_session)

    # Get test params
    bridge_name = params.get("bridge_name", "test_br0")
    filter_name = params.get("filter_name", "vdsm-no-mac-spoofing")
    ping_count = params.get("ping_count", "5")
    ping_timeout = float(params.get("ping_timeout", "10"))
    iface_name = utils_net.get_net_if(state="UP")[0]
    bridge_script = NETWORK_SCRIPT + bridge_name
    iface_script = NETWORK_SCRIPT + iface_name
    iface_script_bk = os.path.join(data_dir.get_tmp_dir(), "iface-%s.bk" % iface_name)
    attach_interface = "yes" == params.get("attach_interface", "no")
    iface_model = params.get("iface_model", "virtio")
    iface_source = eval(params.get("iface_source", "{'bridge':'test_br0'}"))
    iface_type = params.get("iface_type", 'bridge')
    iface_target = params.get("iface_target", "br_target")
    iface_alias = params.get("iface_alias", None)
    hotplug = "yes" == params.get("hotplug", "no")
    iface_driver = params.get("iface_driver", None)
    reconnect_tap = "yes" == params.get("reconnect_tap", "no")
    restart_net = "yes" == params.get("restart_net", "no")
    start_vm2 = "yes" == params.get("start_vm2", "no")
    create_network = "yes" == params.get("create_network", "no")
    update_device = "yes" == params.get("update_with_diff_type", "no")
    test_qos = "yes" == params.get("test_qos", "no")
    test_net_qos = "yes" == params.get("test_net_qos", "no")
    iface_inbound = params.get("iface_bandwidth_inbound", "{'average':'0'}")
    iface_outbound = params.get("iface_bandwidth_outbound", "{'average':'0'}")
    net_inbound = params.get("net_bandwidth_inbound", "{'average':'0'}")
    net_outbound = params.get("net_bandwidth_outbound", "{'average':'0'}")

    libvirt_version.is_libvirt_feature_supported(params)
    vms = params.get("vms").split()
    vm1_name = vms[0]
    vm1 = env.get_vm(vm1_name)

    if start_vm2:
        if len(vms) <= 1:
            test.cancel("Need two VMs to test")
        else:
            vm2_name = vms[1]
        vm2 = env.get_vm(vm2_name)
        vm2_xml_bak = vm_xml.VMXML.new_from_dumpxml(vm2_name)

    # Back up the interface script
    if os.path.exists(iface_script):
        process.run("cp %s %s" % (iface_script, iface_script_bk),
                    shell=True, verbose=True)
    # Back up vm xml
    vm1_xml_bak = vm_xml.VMXML.new_from_dumpxml(vm1_name)

    # Stop NetworkManager service
    NM_service = service.Factory.create_service("NetworkManager")
    NM_status = NM_service.status()
    if not NM_status:
        NM_service.start()
    mac = utils_net.generate_mac_address_simple()

    try:
        if libvirt.check_iface(bridge_name, "exists", "--all"):
            test.cancel("The bridge %s already exist" % bridge_name)
        s, o = utils_net.create_linux_bridge_tmux(bridge_name, iface_name)
        if s:
            test.fail("Failed to create linux bridge on the host. Status: %s Stdout: %s" % (s, o))
        define_nwfilter(filter_name)
        if create_network:
            create_bridge_network(bridge_name, iface_source["network"], net_inbound, net_outbound)
        if hotplug:
            err_msgs = ("No more available PCI slots",
                        "No more available PCI addresses")
            # delete the original interface on the vm before hot-plug
            if vm1.is_alive():
                vm1.destroy()
            vmxml = vm_xml.VMXML.new_from_dumpxml(vm1_name)
            iface_xml = vmxml.get_devices('interface')[0]
            logging.debug("Delete the original interface")
            vmxml.del_device(iface_xml)
            vmxml.sync()
            vm1.start()
            # do hot-plug
            if attach_interface:
                logging.info("Try to hot-plug interface")
                options = ("%s %s --model %s --mac %s" %
                           (iface_type, iface_source['bridge'],
                            iface_model, mac))
                if test_qos:
                    inbound_value = ','.join(eval(iface_inbound).values())
                    outbound_value = ','.join(eval(iface_outbound).values())
                    options = ("%s %s --model %s --mac %s --inbound %s --outbound %s" %
                               (iface_type, iface_source['bridge'], iface_model, mac, inbound_value, outbound_value))
                ret = virsh.attach_interface(vm1_name, options,
                                             ignore_status=True)
            else:
                logging.info("Try to hot-plug device")
                target = str({'dev': iface_target})
                iface_alias = str({'name': iface_alias})
                vm_iface_source = str(iface_source)
                iface_params = {"type": iface_type, "source": vm_iface_source, "filter": filter_name, "mac": mac,
                                'alias': iface_alias, 'target': target, 'model': iface_model,
                                'driver': iface_driver}
                attach_xml = interface.Interface(iface_params['type'])
                attach_xml.xml = libvirt.modify_vm_iface(vm1_name, 'get_xml', iface_params)
                ret = virsh.attach_device(vm1_name, attach_xml.xml, ignore_status=True, debug=True)
            if ret.exit_status:
                if any([msg in ret.stderr for msg in err_msgs]):
                    test.error("No more pci slots, can't attach more devices")
                else:
                    test.fail("Failed to attach-interface: %s" % ret.stderr.strip())
            else:
                logging.debug("Hot-plug interface or device pass")
                if update_device:
                    # As the interface type will change to actual type "bridge" in live xml, we need to ensure
                    # the update with original "network" type will not fail.
                    # Try to delete the nwfilter with original type in iface_params
                    update_xml = interface.Interface(iface_type)
                    iface_params_update = {"del_filter": "yes", "type": "network", "source": vm_iface_source}
                    update_xml.xml = libvirt.modify_vm_iface(vm1_name, 'get_xml', iface_params_update)
                    ret = virsh.update_device(vm1_name, update_xml.xml, ignore_status=True, debug=True)
                    libvirt.check_exit_status(ret)

        else:
            vm_iface_source = str(iface_source)
            vm1_iface_params = {"type": iface_type, "source": vm_iface_source, "filter": filter_name,
                                "mac": mac, 'driver': iface_driver, "iface_model": iface_model,
                                "inbound": iface_inbound, "outbound": iface_outbound}
            libvirt.modify_vm_iface(vm1_name, "update_iface", vm1_iface_params)

            if vm1.is_alive():
                vm1.destroy()

            vm1.start()
        # apply ip address as it may not be initialized
        session1 = session2 = None
        session1 = vm1.wait_for_serial_login()
        utils_net.restart_guest_network(session1)
        output = session1.cmd_output("ifconfig || ip a")
        logging.debug("guest1 ip info %s" % output)

        # Check guest's network function
        host_ip = utils_net.get_ip_address_by_interface(bridge_name)
        remote_url = params.get("remote_ip", "www.google.com")

        try:
            vm1_ip = utils_net.get_guest_ip_addr(session1, mac)
        except Exception as errs:
            test.fail("vm1 can't get IP with the new create bridge: %s" % errs)
        if test_qos:
            if test_net_qos:
                logging.debug("Test network inbound:")
                res1 = utils_net.check_class_rules(bridge_name, "1:2", ast.literal_eval(net_inbound))
                logging.debug("Test network outbound:")
                res2 = utils_net.check_filter_rules(bridge_name, ast.literal_eval(net_outbound))
            else:
                iface_mac = vm_xml.VMXML.get_first_mac_by_name(vm1_name)
                tap_name = libvirt.get_ifname_host(vm1_name, iface_mac)
                logging.debug("Test inbound:")
                res1 = utils_net.check_class_rules(tap_name, "1:1", ast.literal_eval(iface_inbound))
                logging.debug("Test outbound:")
                res2 = utils_net.check_filter_rules(tap_name, ast.literal_eval(iface_outbound))
            if not res1 or not res2:
                test.fail("Qos test fail!")
        if hotplug:
            # reboot vm1 then check network function to ensure the interface still there and works fine
            logging.info("reboot the vm")
            virsh.reboot(vm1)
            if session1 is None:
                session1 = vm1.wait_for_serial_login()
            ping(vm1_ip, remote_url, ping_count, ping_timeout, session=session1)
            # restart libvirtd service then check the interface still works fine
            libvirtd = utils_libvirtd.Libvirtd()
            libvirtd.restart()
            vm1.cleanup_serial_console()
            vm1.create_serial_console()
            session1 = vm1.wait_for_serial_login()
            ping(vm1_ip, remote_url, ping_count, ping_timeout, session=session1)
            logging.info("after reboot and restart libvirtd, the network works fine")
            if iface_driver:
                try:
                    driver_dict = eval(iface_driver)
                    if session1 is None:
                        session1 = vm1.wait_for_serial_login()
                    guest_iface_info = session1.cmd_output("ip l").strip()
                    guest_iface_name = re.findall(r"^\d+: (\S+?)[@:].*state UP.*$", guest_iface_info, re.MULTILINE)[0]
                    comb_size = driver_dict.get('queues')
                    rx_size = driver_dict.get('rx_queue_size')
                    session1.cmd_status("ethtool -L %s combined %s" % (guest_iface_name, comb_size))
                    ret, outp = session1.cmd_status_output("ethtool -l %s" % guest_iface_name)
                    logging.debug("ethtool cmd output:%s" % outp)
                    if not ret:
                        pre_comb = re.search("Pre-set maximums:[\s\S]*?Combined:.*?(\d+)", outp).group(1)
                        cur_comb = re.search("Current hardware settings:[\s\S]*?Combined:.*?(\d+)", outp).group(1)
                        if int(pre_comb) != int(comb_size) or int(cur_comb) != int(comb_size):
                            test.fail("Fail to check the combined size: setting: %s,"
                                      "Pre-set: %s, Current-set: %s"
                                      % (comb_size, pre_comb, cur_comb))
                        else:
                            logging.info("Getting correct Pre-set and Current set value")
                    else:
                        test.error("ethtool list fail: %s" % outp)
                    # as tx_queue size is only supported for vhost-user interface, only check rx_queue size
                    ret1, outp1 = session1.cmd_status_output("ethtool -g %s" % guest_iface_name)
                    logging.debug("guest queue size setting is %s" % outp1)
                    if not ret1:
                        pre_set = re.search(r"Pre-set maximums:\s*RX:\s*(\d+)", outp1).group(1)
                        cur_set = re.search(r"Current hardware settings:\s*RX:\s*(\d+)", outp1).group(1)
                        if int(pre_set) != int(rx_size) or int(cur_set) != int(rx_size):
                            test.fail("Fail to check the rx_queue_size!")
                except Exception as errs:
                    test.fail("fail to get driver info")
            # hot-unplug interface/device
            if attach_interface:
                ret = virsh.detach_interface(vm1_name, "bridge",
                                             ignore_status=True)
            else:
                ret = virsh.detach_device(vm1_name, attach_xml.xml,
                                          ignore_status=True,
                                          debug=True)
            if ret.exit_status:
                test.fail("Hot-unplug interface/device fail")
            else:
                logging.info("hot-unplug interface/device succeed")

        else:
            if start_vm2:
                # Start vm2 connect to the same bridge
                mac2 = utils_net.generate_mac_address_simple()
                vm2_iface_params = {"type": "bridge", "source": vm_iface_source, "filter": filter_name, "mac": mac2}
                libvirt.modify_vm_iface(vm2_name, "update_iface", vm2_iface_params)
                if vm2.is_alive():
                    vm2.destroy()
                vm2.start()

                # Check if vm1 and vm2 can ping each other
                try:
                    utils_net.update_mac_ip_address(vm2, timeout=120)
                    vm2_ip = vm2.get_address()
                except Exception as errs:
                    test.fail("vm2 can't get IP with the new create bridge: %s" % errs)
                session2 = vm2.wait_for_login()
                # make sure guest has got ip address
                utils_net.restart_guest_network(session2)
                output2 = session2.cmd_output("ifconfig || ip a")
                logging.debug("guest ip info %s" % output2)
                # check 2 guests' network functions
                check_net_functions(vm1_ip, ping_count, ping_timeout, session1,
                                    host_ip, remote_url, vm2_ip)
                check_net_functions(vm2_ip, ping_count, ping_timeout, session2,
                                    host_ip, remote_url, vm1_ip)
        if reconnect_tap:
            iface_mac = vm_xml.VMXML.get_first_mac_by_name(vm1_name)
            tap_name = libvirt.get_ifname_host(vm1_name, iface_mac)
            # For network with shared host bridge, destroy the network will not
            # impact the connection
            if create_network and restart_net:
                virsh.net_destroy(iface_source["network"])
                out1 = libvirt_network.check_tap_connected(tap_name, True,
                                                           bridge_name)
                virsh.net_start(iface_source["network"])
                out2 = libvirt_network.check_tap_connected(tap_name, True,
                                                           bridge_name)
                if not out1 or not out2:
                    test.fail("Network destroy and restart should not impact "
                              "tap connection from bridge network!")
            else:
                # Delete and re-create bridge, check the tap is not connected
                utils_net.delete_linux_bridge_tmux(bridge_name, iface_name)
                utils_net.create_linux_bridge_tmux(bridge_name, iface_name)
                out3 = libvirt_network.check_tap_connected(tap_name, False,
                                                           bridge_name)
                # Check restart libvirtd will recover the connection
                libvirtd = utils_libvirtd.Libvirtd()
                libvirtd.restart()
                out4 = utils_misc.wait_for(
                    lambda: libvirt_network.check_tap_connected(tap_name, True,
                                                                bridge_name), 20)
                if not out3 or not out4:
                    test.fail("Delete and create linux bridge and check tap "
                              "connection is not as expected!")
    finally:
        logging.debug("Start to restore")
        vm1_xml_bak.sync()
        if start_vm2:
            vm2_xml_bak.sync()
        virsh.nwfilter_undefine(filter_name, ignore_status=True)
        if os.path.exists(iface_script_bk):
            process.run("mv %s %s" % (iface_script_bk, iface_script),
                        shell=True, verbose=True)
        if os.path.exists(bridge_script):
            process.run("rm -rf %s" % bridge_script, shell=True, verbose=True)
        br_path = "/sys/class/net/%s" % bridge_name
        if os.path.exists(br_path):
            utils_net.delete_linux_bridge_tmux(bridge_name, iface_name)
        # reload network configuration
        NM_service.restart()
        # recover NetworkManager
        if NM_status is True:
            NM_service.start()
        if 'network' in iface_source and iface_source["network"] in virsh.net_state_dict():
            virsh.net_destroy(iface_source["network"], ignore_status=False)
Пример #12
0
def run(test, params, env):
    """
    Test migration with special network settings
    1) migrate guest with bridge type interface connected to ovs bridge
    2) migrate guest with direct type interface when a macvtap device name
        exists on dest host

    :param test: test object
    :param params: Dictionary with the test parameters
    :param env: Dictionary with test environment.
    """
    def check_vm_network_accessed(ping_dest, session=None):
        """
        The operations to the VM need to be done before or after
        migration happens

        :param ping_dest: The destination to be ping
        :param session: The session object to the host
        :raise: test.fail when ping fails
        """
        # Confirm local/remote VM can be accessed through network.
        logging.info("Check VM network connectivity")
        status, output = utils_net.ping(ping_dest,
                                        count=10,
                                        timeout=20,
                                        output_func=logging.debug,
                                        session=session)
        if status != 0:
            test.fail("Ping failed, status: %s, output: %s" % (status, output))

    def vm_sync(vmxml, vm_name=None, virsh_instance=virsh):
        """
        A wrapper to sync vm xml on localhost and remote host

        :param vmxml: domain VMXML instance
        :param vm_name: The name of VM
        :param virsh_instance: virsh instance object
        """
        if vm_name and virsh_instance != virsh:
            remote.scp_to_remote(server_ip, '22', server_user,
                                 server_pwd,
                                 vmxml.xml, vmxml.xml)
            if virsh_instance.domain_exists(vm_name):
                if virsh_instance.is_alive(vm_name):
                    virsh_instance.destroy(vm_name, ignore_status=True)
                virsh_instance.undefine(vmxml.xml, ignore_status=True)
            virsh_instance.define(vmxml.xml, debug=True)
        else:
            vmxml.sync()

    def update_iface_xml(vm_name, iface_dict, virsh_instance=virsh):
        """
        Update interfaces for guest

        :param vm_name: The name of VM
        :param iface_dict: The interface configurations params
        :param virsh_instance: virsh instance object
        """
        logging.debug("update iface xml")
        vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(
            vm_name, virsh_instance=virsh_instance)
        vmxml.remove_all_device_by_type('interface')
        vm_sync(vmxml, vm_name, virsh_instance=virsh_instance)
        iface = interface.Interface('network')
        iface.xml = libvirt.modify_vm_iface(vm_name, "get_xml", iface_dict,
                                            virsh_instance=virsh_instance)
        vmxml.add_device(iface)
        vmxml.xmltreefile.write()
        vm_sync(vmxml, vm_name, virsh_instance=virsh_instance)
        logging.debug("VM XML after updating interface: %s" % vmxml)

    def update_net_dict(net_dict, runner=utils_net.local_runner):
        """
        Update network dict

        :param net_dict: The network dict to be updated
        :param runner: Command runner
        :return: Updated network dict
        """
        if net_dict.get("net_name", "") == "direct-macvtap":
            logging.info("Updating network iface name")
            iface_name = utils_net.get_net_if(runner=runner, state="UP")[0]
            net_dict.update({"forward_iface": iface_name})
        else:
            # TODO: support other types
            logging.info("No need to update net_dict. We only support to "
                         "update direct-macvtap type for now.")
        logging.debug("net_dict is %s" % net_dict)
        return net_dict

    def get_remote_direct_mode_vm_mac(vm_name, uri):
        """
        Get mac of remote direct mode VM

        :param vm_name: The name of VM
        :param uri: The uri on destination
        :return: mac
        :raise: test.fail when the result of virsh domiflist is incorrect
        """
        vm_mac = None
        res = virsh.domiflist(
            vm_name, uri=uri, ignore_status=False).stdout_text.strip().split("\n")
        if len(res) < 2:
            test.fail("Unable to get remote VM's mac: %s" % res)
        else:
            vm_mac = res[-1].split()[-1]
        return vm_mac

    def create_fake_tap(remote_session):
        """
        Create a fake macvtap on destination host.

        :param remote_session: The session to the destination host.
        :return: The new tap device
        """
        tap_cmd = "ls /dev/tap* |awk -F 'tap' '{print $NF}'"
        tap_idx = remote_session.cmd_output(tap_cmd).strip()
        if not tap_idx:
            test.fail("Unable to get tap index using %s."
                      % tap_cmd)
        fake_tap_dest = 'tap'+str(int(tap_idx)+1)
        logging.debug("creating a fake tap %s...", fake_tap_dest)
        cmd = "touch /dev/%s" % fake_tap_dest
        remote_session.cmd(cmd)
        return fake_tap_dest

    migration_test = migration.MigrationTest()
    migration_test.check_parameters(params)

    libvirt_version.is_libvirt_feature_supported(params)

    # Params to update disk using shared storage
    params["disk_type"] = "file"
    params["disk_source_protocol"] = "netfs"
    params["mnt_path_name"] = params.get("nfs_mount_dir")

    # Local variables
    virsh_args = {"debug": True}
    server_ip = params.get("server_ip")
    server_user = params.get("server_user", "root")
    server_pwd = params.get("server_pwd")
    client_ip = params.get("client_ip")
    client_pwd = params.get("client_pwd")
    virsh_options = params.get("virsh_options", "")
    extra = params.get("virsh_migrate_extra")
    options = params.get("virsh_migrate_options", "--live --p2p --verbose")
    restart_dhclient = params.get("restart_dhclient", "dhclient -r; dhclient")
    ping_dest = params.get("ping_dest", "www.baidu.com")
    extra_args = migration_test.update_virsh_migrate_extra_args(params)

    migrate_vm_back = "yes" == params.get("migrate_vm_back", "no")

    target_vm_name = params.get("target_vm_name")
    direct_mode = "yes" == params.get("direct_mode", "no")
    check_macvtap_exists = "yes" == params.get("check_macvtap_exists", "no")
    create_fake_tap_dest = "yes" == params.get("create_fake_tap_dest", "no")
    macvtap_cmd = params.get("macvtap_cmd")
    modify_target_vm = "yes" == params.get("modify_target_vm", "no")
    ovs_bridge_name = params.get("ovs_bridge_name")
    network_dict = eval(params.get("network_dict", '{}'))
    iface_dict = eval(params.get("iface_dict", '{}'))
    remote_virsh_dargs = {'remote_ip': server_ip, 'remote_user': server_user,
                          'remote_pwd': server_pwd, 'unprivileged_user': None,
                          'ssh_remote_auth': True}
    cmd_parms = {'server_ip': server_ip, 'server_user': server_user,
                 'server_pwd': server_pwd}

    virsh_session_remote = None
    libvirtd_conf = None
    mig_result = None
    target_org_xml = None
    target_vm_session = None
    target_vm = None
    exp_macvtap = []
    fake_tap_dest = None

    # params for migration connection
    params["virsh_migrate_desturi"] = libvirt_vm.complete_uri(
        params.get("migrate_dest_host"))
    params["virsh_migrate_connect_uri"] = libvirt_vm.complete_uri(
        params.get("migrate_source_host"))
    src_uri = params.get("virsh_migrate_connect_uri")
    dest_uri = params.get("virsh_migrate_desturi")

    vm_name = params.get("migrate_main_vm")
    vm = env.get_vm(vm_name)
    vm.verify_alive()
    bk_uri = vm.connect_uri

    postcopy_options = params.get("postcopy_options")
    action_during_mig = None
    if postcopy_options:
        extra = "%s %s" % (extra, postcopy_options)
        action_during_mig = virsh.migrate_postcopy

    # For safety reasons, we'd better back up  xmlfile.
    new_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    orig_config_xml = new_xml.copy()

    try:
        # Create a remote runner for later use
        runner_on_target = remote.RemoteRunner(host=server_ip,
                                               username=server_user,
                                               password=server_pwd)
        remote_session = remote.remote_login("ssh", server_ip, "22",
                                             server_user, server_pwd,
                                             r'[$#%]')
        virsh_session_remote = virsh.VirshPersistent(**remote_virsh_dargs)

        if target_vm_name:
            target_vm = libvirt_vm.VM(target_vm_name, params, vm.root_dir,
                                      vm.address_cache)
            target_vm.connect_uri = dest_uri
            if not virsh_session_remote.domain_exists(target_vm_name):
                test.error("VM %s should be installed on %s."
                           % (target_vm_name, server_ip))
            # Backup guest's xml on remote
            target_org_xml = vm_xml.VMXML.new_from_inactive_dumpxml(
                target_vm_name, virsh_instance=virsh_session_remote)
            # Scp original xml to remote for restoration
            remote.scp_to_remote(server_ip, '22', server_user,
                                 server_pwd,
                                 target_org_xml.xml, target_org_xml.xml)
            logging.debug("target xml is %s" % target_org_xml)

        if ovs_bridge_name:
            status, stdout = utils_net.create_ovs_bridge(ovs_bridge_name)
            if status:
                test.fail("Failed to create ovs bridge on local. Status: %s"
                          "Stdout: %s" % (status, stdout))
            status, stdout = utils_net.create_ovs_bridge(
                ovs_bridge_name, session=remote_session)
            if status:
                test.fail("Failed to create ovs bridge on remote. Status: %s"
                          "Stdout: %s" % (status, stdout))
        if network_dict:
            update_net_dict(network_dict, runner=remote_session.cmd)
            libvirt_network.create_or_del_network(
                network_dict, remote_args=remote_virsh_dargs)
            logging.info("dest: network created")
            update_net_dict(network_dict)
            libvirt_network.create_or_del_network(network_dict)
            logging.info("localhost: network created")

        if target_vm_name:
            if modify_target_vm and iface_dict:
                logging.info("Updating remote VM's interface")
                update_iface_xml(target_vm_name, iface_dict,
                                 virsh_instance=virsh_session_remote)
            target_vm.start()
            target_vm_session = target_vm.wait_for_serial_login(timeout=240)
            check_vm_network_accessed(ping_dest, session=target_vm_session)
            if check_macvtap_exists and macvtap_cmd:
                # Get macvtap device's index on remote after target_vm started
                idx = remote_session.cmd_output(macvtap_cmd).strip()
                if not idx:
                    test.fail("Unable to get macvtap index using %s."
                              % macvtap_cmd)
                # Generate the expected macvtap devices' index list
                exp_macvtap = ['macvtap'+idx, 'macvtap'+str(int(idx)+1)]
                if create_fake_tap_dest:
                    fake_tap_dest = create_fake_tap(remote_session)

        remote_session.close()
        # Change domain network xml
        if iface_dict:
            if "mac" not in iface_dict:
                mac = utils_net.generate_mac_address_simple()
                iface_dict.update({'mac': mac})
            else:
                mac = iface_dict["mac"]

            update_iface_xml(vm_name, iface_dict)

        # Change the disk of the vm
        libvirt.set_vm_disk(vm, params)

        if not vm.is_alive():
            try:
                vm.start()
            except virt_vm.VMStartError as err:
                test.fail("Failed to start VM: %s" % err)

        logging.debug("Guest xml after starting:\n%s",
                      vm_xml.VMXML.new_from_dumpxml(vm_name))

        # Check local guest network connection before migration
        if vm.serial_console is not None:
            vm.cleanup_serial_console()
        vm.create_serial_console()
        vm_session = vm.wait_for_serial_login(timeout=240)
        if not utils_package.package_install('dhcp-client', session=vm_session):
            test.error("Failed to install dhcp-client on guest.")
        utils_net.restart_guest_network(vm_session)
        vm_ip = utils_net.get_guest_ip_addr(vm_session, mac)
        logging.debug("VM IP Addr: %s", vm_ip)

        if direct_mode:
            check_vm_network_accessed(ping_dest, session=vm_session)
        else:
            check_vm_network_accessed(vm_ip)

        # Execute migration process
        vms = [vm]

        migration_test.do_migration(vms, None, dest_uri, 'orderly',
                                    options, thread_timeout=900,
                                    ignore_status=True, virsh_opt=virsh_options,
                                    func=action_during_mig,
                                    extra_opts=extra,
                                    **extra_args)

        mig_result = migration_test.ret

        # Check network accessibility after migration
        if int(mig_result.exit_status) == 0:
            vm.connect_uri = dest_uri
            if vm.serial_console is not None:
                vm.cleanup_serial_console()
            vm.create_serial_console()
            vm_session_after_mig = vm.wait_for_serial_login(timeout=240)
            vm_session_after_mig.cmd(restart_dhclient)
            check_vm_network_accessed(ping_dest, session=vm_session_after_mig)

            if check_macvtap_exists and macvtap_cmd:
                remote_session = remote.remote_login("ssh", server_ip, "22",
                                                     server_user, server_pwd,
                                                     r'[$#%]')
                # Check macvtap devices' index after migration
                idx = remote_session.cmd_output(macvtap_cmd)
                act_macvtap = ['macvtap'+i for i in idx.strip().split("\n")]
                if act_macvtap != exp_macvtap:
                    test.fail("macvtap devices after migration are incorrect!"
                              " Actual: %s, Expected: %s. "
                              % (act_macvtap, exp_macvtap))
        else:
            if fake_tap_dest:
                res = remote.run_remote_cmd("ls /dev/%s" % fake_tap_dest,
                                            params, runner_on_target)
                libvirt.check_exit_status(res)

        if target_vm_session:
            check_vm_network_accessed(ping_dest, session=target_vm_session)
        # Execute migration from remote
        if migrate_vm_back:
            ssh_connection = utils_conn.SSHConnection(server_ip=client_ip,
                                                      server_pwd=client_pwd,
                                                      client_ip=server_ip,
                                                      client_pwd=server_pwd)
            try:
                ssh_connection.conn_check()
            except utils_conn.ConnectionError:
                ssh_connection.conn_setup()
                ssh_connection.conn_check()

            # Pre migration setup for local machine
            migration_test.migrate_pre_setup(src_uri, params)

            cmd = "virsh migrate %s %s %s" % (vm_name, options, src_uri)
            logging.debug("Start migration: %s", cmd)
            cmd_result = remote.run_remote_cmd(cmd, params, runner_on_target)
            logging.info(cmd_result)
            if cmd_result.exit_status:
                test.fail("Failed to run '%s' on remote: %s" % (cmd, cmd_result))
            logging.debug("VM is migrated back.")

            vm.connect_uri = bk_uri
            if vm.serial_console is not None:
                vm.cleanup_serial_console()
            vm.create_serial_console()
            vm_session_after_mig_bak = vm.wait_for_serial_login(timeout=240)
            vm_session_after_mig_bak.cmd(restart_dhclient)
            check_vm_network_accessed(ping_dest, vm_session_after_mig_bak)
    finally:
        logging.debug("Recover test environment")
        vm.connect_uri = bk_uri
        migration_test.cleanup_vm(vm, dest_uri)

        logging.info("Recovery VM XML configration")
        orig_config_xml.sync()
        remote_session = remote.remote_login("ssh", server_ip, "22",
                                             server_user, server_pwd,
                                             r'[$#%]')
        if target_vm and target_vm.is_alive():
            target_vm.destroy(gracefully=False)

        if target_org_xml and target_vm_name:
            logging.info("Recovery XML configration for %s.", target_vm_name)
            virsh_session_remote = virsh.VirshPersistent(**remote_virsh_dargs)
            vm_sync(target_org_xml, vm_name=target_vm_name,
                    virsh_instance=virsh_session_remote)
            virsh_session_remote.close_session()

        if fake_tap_dest:
            remote_session.cmd_output_safe("rm -rf /dev/%s" % fake_tap_dest)

        if network_dict:
            libvirt_network.create_or_del_network(
                network_dict, is_del=True, remote_args=remote_virsh_dargs)
            libvirt_network.create_or_del_network(network_dict, is_del=True)
        if ovs_bridge_name:
            utils_net.delete_ovs_bridge(ovs_bridge_name)
            utils_net.delete_ovs_bridge(ovs_bridge_name, session=remote_session)

        remote_session.close()
        if target_vm_session:
            target_vm_session.close()

        if virsh_session_remote:
            virsh_session_remote.close_session()

        if migrate_vm_back:
            if 'ssh_connection' in locals():
                ssh_connection.auto_recover = True
            migration_test.migrate_pre_setup(src_uri, params,
                                             cleanup=True)
        logging.info("Remove local NFS image")
        source_file = params.get("source_file")
        if source_file:
            libvirt.delete_local_disk("file", path=source_file)
Пример #13
0
def run(test, params, env):
    """
    Test autogenerated tap device name.

    1.Prepare test environment, restart libvirtd to flush the counter.
    2.start the domain.
    3.check the tap device name, then destroy the vm, restart libvirtd if
      needed, then start the vm again;
    4.hotplug an interface and check the tap device name;
    5.detach one interface, restart libvirtd if needed, then hotplug again to
      check the tap device name;
    6.hotplug interface of another type, check tap device name;
    7.recover the env.
    """
    def prepare_vmxml(vm, vm_name, direct=False):
        """
        Ensure there is only 1 requested interface in the vmxml

        param vm: the test vm
        param vm_name: the vm'name
        param direct: True or False, if True, prepare vm xml with a direct type
                      interface(the tap device will be named as macvtap*);
                      if False , prepare vm xml with a network type interface
                      connected to default network(the tap device will be named
                      as vnet* automatically instead)
        :return: None
        """
        libvirt_vmxml.remove_vm_devices_by_type(vm, 'interface')
        iface_dict = prepare_iface_dict(direct)
        libvirt.modify_vm_iface(vm_name, 'update_iface', iface_dict)

    def prepare_iface_dict(direct=False):
        """
        Prepare the iface_dict for the function 'libvirt.modify_vm_iface()' to use

        :param direct: True or False
        :return: a dictionary
        """
        if direct:
            iface_name = utils_net.get_net_if(state="UP")[0]
            source = "{'dev': '%s', 'mode': 'bridge'}" % iface_name
            iface_dict = {
                "model": "virtio",
                'type': 'direct',
                'source': source,
                'del_addr': 'yes',
                'del_alias': 'yes',
                'del_target': 'yes'
            }
        else:
            iface_dict = {
                "model": "virtio",
                'type': 'network',
                'source': "{'network': 'default'}",
                'del_addr': 'yes',
                'del_alias': 'yes',
                'del_target': 'yes'
            }
        return iface_dict

    def check_target_name(index, direct=False):
        """
        Check the auto generated tap device name on the host

        param index: an integer range in {-1,}, the max index occupied, if there
                    is no occupied, initial index is -1
        param direct: True or False.
                      True: the expected auto-generated tap device name should
                            be 'macvtap${index+1}'
                      False: the expected auto-generated tap device name should
                            be "vnet${index+1}"
        :return: None
        """
        cmd_output = process.run("ls /sys/class/net",
                                 shell=True,
                                 ignore_status=True).stdout_text
        logging.debug("Current network interfaces on the host includes: %s",
                      cmd_output)
        index_ = int(index)
        if direct:
            expected_name = 'macvtap' + str(index_ + 1)
        else:
            expected_name = "vnet" + str(index_ + 1)
        if expected_name not in cmd_output:
            test.fail("Can not get the expected tap: %s" % expected_name)
        else:
            logging.debug("Get the expected tap: %s" % expected_name)
        return

    libvirt_version.is_libvirt_feature_supported(params)
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    # Destroy VM first
    if vm.is_alive():
        vm.destroy(gracefully=False)

    test_macvtap = "yes" == params.get("test_macvtap", "no")
    flush_with_occupation = "yes" == params.get("flush_with_occupation", "no")
    flush_after_detach = "yes" == params.get("flush_after_detach", "no")

    # if there is existing vnet* or macvtap* even the test vm is destroyed,
    # the env is not clean, cancel the test
    cmd_output = process.run("ls /sys/class/net",
                             shell=True,
                             ignore_status=True).stdout_text
    if ('vnet' in cmd_output and not test_macvtap) or \
            ('macvtap' in cmd_output and test_macvtap):
        test.cancel("The env is not clean, there is existing tap device!")
    # Back up xml file.
    vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    libvirtd = utils_libvirtd.Libvirtd()
    try:
        prepare_vmxml(vm, vm_name, test_macvtap)
        libvirt_network.ensure_default_network()
        libvirtd.restart()
        # if there is no vm running, restart libvirtd will flush the counter
        # to the initial value: -1
        counter = -1
        vm.start()
        # check the auto generated tap device name after fresh initialized
        logging.debug("1. Check the tap device name after initialized:")
        check_target_name(counter, test_macvtap)
        # if the vm start successfully and tap device create successfully,
        # current counter should increase by 1
        counter = counter + 1
        # destroy and start the vm again
        vm.destroy()
        time.sleep(2)
        # flush when vm down, if flushed, the counter is initialized to -1
        if flush_with_occupation:
            libvirtd.restart()
            counter = -1
        time.sleep(2)
        vm.start()
        logging.debug(
            "2. Check tap name after destroy and start vm again with "
            "libvirtd restart: %s:", flush_with_occupation)
        check_target_name(counter, test_macvtap)
        # new tap created after vm start, counter increase by 1
        counter = counter + 1
        # add another interface with the same interface type
        if_dict = prepare_iface_dict(test_macvtap)
        mac_addr = utils_net.generate_mac_address_simple()
        if_dict['mac'] = mac_addr
        iface_add_xml = libvirt.modify_vm_iface(vm_name, 'get_xml', if_dict)
        virsh.attach_device(vm_name,
                            iface_add_xml,
                            debug=True,
                            ignore_status=False)
        time.sleep(2)
        logging.debug("3. Check tap name after hotplug an interface:")
        check_target_name(counter, test_macvtap)
        # one interface with same iface type attached, counter increase by 1
        counter = counter + 1
        # Make sure the guest boots up. Otherwise detach_device won't complete
        vm.wait_for_serial_login(timeout=180).close()
        # detach the new attached interface
        virsh.detach_device(vm_name,
                            iface_add_xml,
                            wait_for_event=True,
                            debug=True,
                            ignore_status=False)
        if flush_after_detach:
            libvirtd.restart()
            # the latest occupied name is recycled after restart
            # the counter is not initialized to -1 as the vm is running and
            # first tap name is occupied
            counter = counter - 1
        virsh.attach_device(vm_name,
                            iface_add_xml,
                            debug=True,
                            ignore_status=False)
        logging.debug(
            "4 Check tap name after detach and reattach with "
            "flushed: %s:", flush_after_detach)
        check_target_name(counter, test_macvtap)
    finally:
        if vm.is_alive():
            vm.destroy(gracefully=False)
        vmxml_backup.sync()
Пример #14
0
def run(test, params, env):
    """
    Run the test

    :param test: test object
    :param params: Dictionary with the test parameters
    :param env: Dictionary with test environment.
    """
    libvirt_version.is_libvirt_feature_supported(params)

    vm_name = params.get("migrate_main_vm")
    vm = env.get_vm(vm_name)
    vm.verify_alive()
    bk_uri = vm.connect_uri

    migration_test = migration.MigrationTest()
    migration_test.check_parameters(params)
    extra_args = migration_test.update_virsh_migrate_extra_args(params)

    extra = params.get("virsh_migrate_extra")
    postcopy_options = params.get("postcopy_options")
    if postcopy_options:
        extra = "%s %s" % (extra, postcopy_options)
    params["virsh_migrate_desturi"] = libvirt_vm.complete_uri(
        params.get("migrate_dest_host"))
    dest_uri = params.get("virsh_migrate_desturi")
    options = params.get("virsh_migrate_options",
                         "--live --p2p --persistent --verbose")
    virsh_options = params.get("virsh_options", "")
    stress_package = params.get("stress_package")
    action_during_mig = params.get("action_during_mig")
    migrate_speed = params.get("migrate_speed")
    migrate_speed_again = params.get("migrate_speed_again")
    migrate_again = "yes" == params.get("migrate_again", "no")
    vm_state_after_abort = params.get("vm_state_after_abort")
    return_port = "yes" == params.get("return_port", "no")
    params['server_pwd'] = params.get("migrate_dest_pwd")
    params['server_ip'] = params.get("migrate_dest_host")
    params['server_user'] = params.get("remote_user", "root")
    is_storage_migration = True if extra.count('--copy-storage-all') else False
    setup_tls = "yes" == params.get("setup_tls", "no")
    qemu_conf_dest = params.get("qemu_conf_dest", "{}")
    migrate_tls_force_default = "yes" == params.get("migrate_tls_force_default", "no")
    poweroff_src_vm = "yes" == params.get("poweroff_src_vm", "no")
    check_port = "yes" == params.get("check_port", "no")
    server_ip = params.get("migrate_dest_host")
    server_user = params.get("remote_user", "root")
    server_pwd = params.get("migrate_dest_pwd")
    server_params = {'server_ip': server_ip,
                     'server_user': server_user,
                     'server_pwd': server_pwd}
    qemu_conf_list = eval(params.get("qemu_conf_list", "[]"))
    qemu_conf_path = params.get("qemu_conf_path")
    min_port = params.get("min_port")

    vm_session = None
    qemu_conf_remote = None
    (remove_key_local, remove_key_remote) = (None, None)

    # For safety reasons, we'd better back up  xmlfile.
    new_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    orig_config_xml = new_xml.copy()
    local_both_conf_obj = None
    remote_file_list = []
    conn_obj_list = []

    try:
        # Setup default value for migrate_tls_force
        if migrate_tls_force_default:
            value_list = ["migrate_tls_force"]
            # Setup migrate_tls_force default value on remote
            server_params['file_path'] = "/etc/libvirt/qemu.conf"
            remove_key_remote = libvirt_config.remove_key_in_conf(value_list,
                                                                  "qemu",
                                                                  remote_params=server_params)
            # Setup migrate_tls_force default value on local
            remove_key_local = libvirt_config.remove_key_in_conf(value_list,
                                                                 "qemu")

        if check_port:
            server_params['file_path'] = qemu_conf_path
            remove_key_remote = libvirt_config.remove_key_in_conf(qemu_conf_list,
                                                                  "qemu",
                                                                  remote_params=server_params)

        # Update only remote qemu conf
        if qemu_conf_dest:
            qemu_conf_remote = libvirt_remote.update_remote_file(
                server_params, qemu_conf_dest, "/etc/libvirt/qemu.conf")
        # Update local or both sides configuration files
        local_both_conf_obj = update_local_or_both_conf_file(params)
        # Setup TLS
        if setup_tls:
            conn_obj_list.append(migration_base.setup_conn_obj('tls',
                                                               params,
                                                               test))
        # Update guest disk xml
        if not is_storage_migration:
            libvirt.set_vm_disk(vm, params)
        else:
            remote_file_list.append(libvirt_disk.create_remote_disk_by_same_metadata(vm,
                                                                                     params))
        if check_port:
            # Create a remote runner
            runner_on_target = remote_old.RemoteRunner(host=server_ip,
                                                       username=server_user,
                                                       password=server_pwd)
            cmd = "nc -l -p %s &" % min_port
            remote_old.run_remote_cmd(cmd, params, runner_on_target, ignore_status=False)

        if not vm.is_alive():
            vm.start()

        logging.debug("Guest xml after starting:\n%s",
                      vm_xml.VMXML.new_from_dumpxml(vm_name))

        vm_session = vm.wait_for_login()
        if action_during_mig:
            if poweroff_src_vm:
                params.update({'vm_session': vm_session})
            action_during_mig = migration_base.parse_funcs(action_during_mig,
                                                           test, params)

        if stress_package:
            migration_test.run_stress_in_vm(vm, params)
        mode = 'both' if '--postcopy' in postcopy_options else 'precopy'
        if migrate_speed:
            migration_test.control_migrate_speed(vm_name,
                                                 int(migrate_speed),
                                                 mode)
        # Execute migration process
        migration_base.do_migration(vm, migration_test, None, dest_uri,
                                    options, virsh_options, extra,
                                    action_during_mig,
                                    extra_args)

        func_returns = dict(migration_test.func_ret)
        migration_test.func_ret.clear()
        logging.debug("Migration returns function results:%s", func_returns)
        if return_port:
            port_used = get_used_port(func_returns)
        if check_port:
            port_used = get_used_port(func_returns)
            if int(port_used) != int(min_port) + 1:
                test.fail("Wrong port for migration.")

        if vm_state_after_abort:
            check_vm_state_after_abort(vm_name, vm_state_after_abort,
                                       bk_uri, dest_uri, test)

        if migrate_again:
            if not vm.is_alive():
                vm.start()
            vm_session = vm.wait_for_login()
            action_during_mig = migration_base.parse_funcs(params.get('action_during_mig_again'),
                                                           test, params)
            extra_args['status_error'] = params.get("migrate_again_status_error", "no")

            if params.get("virsh_migrate_extra_mig_again"):
                extra = params.get("virsh_migrate_extra_mig_again")

            if params.get('scp_list_client_again'):
                params['scp_list_client'] = params.get('scp_list_client_again')
                # Recreate tlsconnection object using new parameter values
                conn_obj_list.append(migration_base.setup_conn_obj('tls',
                                                                   params,
                                                                   test))

            if migrate_speed_again:
                migration_test.control_migrate_speed(vm_name,
                                                     int(migrate_speed_again),
                                                     mode)

            migration_base.do_migration(vm, migration_test, None, dest_uri,
                                        options, virsh_options,
                                        extra, action_during_mig,
                                        extra_args)
            if return_port:
                func_returns = dict(migration_test.func_ret)
                logging.debug("Migration returns function "
                              "results:%s", func_returns)
                port_second = get_used_port(func_returns)
                if port_used != port_second:
                    test.fail("Expect same port '{}' is used as previous one, "
                              "but found new one '{}'".format(port_used,
                                                              port_second))
                else:
                    logging.debug("Same port '%s' was used as "
                                  "expected", port_second)
        if int(migration_test.ret.exit_status) == 0:
            migration_test.post_migration_check([vm], params, uri=dest_uri)
    finally:
        logging.info("Recover test environment")
        vm.connect_uri = bk_uri
        if vm_session:
            vm_session.close()
        # Clean VM on destination and source
        migration_test.cleanup_vm(vm, dest_uri)
        # Restore remote qemu conf and restart libvirtd
        if qemu_conf_remote:
            logging.debug("Recover remote qemu configurations")
            del qemu_conf_remote
        # Restore local or both sides conf and restart libvirtd

        recover_config_file(local_both_conf_obj, params)
        if remove_key_remote:
            del remove_key_remote
        if remove_key_local:
            libvirt.customize_libvirt_config(None,
                                             config_object=remove_key_local,
                                             config_type='qemu')
        # Clean up connection object, like TLS
        migration_base.cleanup_conn_obj(conn_obj_list, test)

        for one_file in remote_file_list:
            if one_file:
                remote_old.run_remote_cmd("rm -rf %s" % one_file, params)

        orig_config_xml.sync()
Пример #15
0
def run(test, params, env):
    """
    Test Hotplug/unplug interface device(s)
    """
    def setup_default():
        """
        Default setup
        """
        logging.debug("Remove VM's interface devices.")
        libvirt_vmxml.remove_vm_devices_by_type(vm, 'interface')
        vm_attrs = eval(params.get('vm_attrs', '{}'))
        if vm_attrs:
            vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
            vmxml.setup_attrs(**vm_attrs)
            vmxml.sync()

    def teardown_default():
        """
        Default cleanup
        """
        pass

    def setup_vdpa():
        """
        Setup vDPA environment
        """
        setup_default()
        test_env_obj = None
        if test_target == "simulator":
            test_env_obj = utils_vdpa.VDPASimulatorTest()
            test_env_obj.setup()
        else:
            vdpa_mgmt_tool_extra = params.get("vdpa_mgmt_tool_extra", "")
            pf_pci = utils_vdpa.get_vdpa_pci()
            test_env_obj = utils_vdpa.VDPAOvsTest(
                pf_pci, mgmt_tool_extra=vdpa_mgmt_tool_extra)
            test_env_obj.setup()
            params['mac_addr'] = test_env_obj.vdpa_mac.get(
                params.get("vdpa_dev", "vdpa0"))

        return test_env_obj

    def teardown_vdpa():
        """
        Cleanup vDPA environment
        """
        if test_target != "simulator":
            service.Factory.create_service("NetworkManager").restart()
        if test_obj:
            test_obj.cleanup()

    def test_vdpa():
        """
        Hotplug/unplug vDPA type interface

        1) Start the vm, hotplug the interface
        2) Login to the vm and check the network function
        3) Hot-unplug the interface
        """
        vm.start()
        vm_session = vm.wait_for_serial_login(timeout=240)

        br_name = None
        if test_target == "mellanox":
            br_name = test_obj.br_name
        for _i in range(eval(params.get('repeat_times', '1'))):
            interface_base.attach_iface_device(vm_name, dev_type, params)
            vdpa_base.check_vdpa_conn(vm_session, test_target, br_name)
            check_points.check_vm_iface_queues(vm_session, params)
            interface_base.detach_iface_device(vm_name, dev_type)

    libvirt_version.is_libvirt_feature_supported(params)
    supported_qemu_ver = eval(
        params.get('func_supported_since_qemu_kvm_ver', '()'))
    if supported_qemu_ver:
        if not utils_misc.compare_qemu_version(*supported_qemu_ver, False):
            test.cancel("Current qemu version doesn't support this test!")

    # Variable assignment
    test_target = params.get('test_target', '')
    dev_type = params.get('dev_type', '')

    vm_name = params.get('main_vm')
    vm = env.get_vm(vm_name)

    vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
    backup_vmxml = vmxml.copy()

    run_test = eval("test_%s" % dev_type)
    setup_test = eval("setup_%s" % dev_type) if "setup_%s" % dev_type in \
        locals() else setup_default
    teardown_test = eval("teardown_%s" % dev_type) if "teardown_%s" % \
        dev_type in locals() else teardown_default

    test_obj = None
    try:
        # Execute test
        test_obj = setup_test()
        run_test()

    finally:
        backup_vmxml.sync()
        teardown_test()
Пример #16
0
def run(test, params, env):
    """
    Test for basic controller device function.

    1) Define the VM w/o specified controller device and check result meets
       expectation.
    2) Start the guest and check if start result meets expectation
    3) Test the function of started controller device
    4) Shutdown the VM and clean up environment
    """

    def setup_os_xml():
        """
        Prepare os part of VM XML.

        """
        osxml = vm_xml.os
        orig_machine = osxml.machine
        # avocado-vt only use virt machine type on aarch64
        if platform.machine() == 'aarch64':
            osxml.machine = 'virt'
            return

        if os_machine:
            osxml.machine = os_machine
            vm_xml.os = osxml
        else:
            cur_machine = orig_machine

    def setup_controller_xml(index, addr_target=None):
        """
        Prepare controller devices of VM XML.

        :param index: The index of controller
        :param addr_target: The controller address

        """
        ctrl = Controller(type_name=cntlr_type)
        if model:
            ctrl.model = model
        if pcihole:
            ctrl.pcihole64 = pcihole
        if vectors:
            ctrl.vectors = vectors
        if index:
            ctrl.index = index
        if chassisNr:
            ctrl.target = {'chassisNr': chassisNr}
        if model_name:
            ctrl.model_name = {'name': model_name}

        if addr_target:
            match = re.match(r"(?P<bus>[0-9]*):(?P<slot>[0-9a-f]*).(?P<function>[0-9])", addr_target)
            if match:
                addr_dict = match.groupdict()
                addr_dict['bus'] = hex(int(addr_dict['bus'], 16))
                addr_dict['slot'] = hex(int(addr_dict['slot'], 16))
                addr_dict['function'] = hex(int(addr_dict['function'], 16))
                addr_dict['domain'] = '0x0000'
                ctrl.address = ctrl.new_controller_address(attrs=addr_dict)

        logging.debug("Controller XML is:%s", ctrl)
        vm_xml.add_device(ctrl)

        if cmpnn_cntlr_model is not None:
            for num in range(int(cmpnn_cntlr_num)):
                ctrl = Controller(type_name=cntlr_type)
                ctrl.model = cmpnn_cntlr_model + str(num+1)
                ctrl.index = index
                logging.debug("Controller XML is:%s", ctrl)
                vm_xml.add_device(ctrl)

    def define_and_check(guest_xml):
        """
        Define the guest and check the result.

        :param guest_xml: The guest VMXML instance
        """
        fail_patts = []
        if expect_err_msg:
            fail_patts.append(expect_err_msg)
        guest_xml.undefine()
        res = vm_xml.virsh.define(guest_xml.xml)
        logging.debug("Expect failures: %s", fail_patts)
        libvirt.check_result(res, expected_fails=fail_patts)
        return not res.exit_status

    def start_and_check():
        """
        Predict the error message when starting and try to start the guest.
        """
        fail_patts = []
        if expect_err_msg:
            fail_patts.append(expect_err_msg)
        res = virsh.start(vm_name)
        logging.debug("Expect failures: %s", fail_patts)
        libvirt.check_result(res, expected_fails=fail_patts)
        return not res.exit_status

    def prepare_qemu_pattern(elem):
        """
        Collect the patterns to be searched in qemu command line.

        :param elem: a Controller object

        :return: A list including search patterns
        """
        search_qemu_cmd = []

        bus = int(elem.address.attrs.get('bus'), 0)
        slot = int(elem.address.attrs.get('slot'), 0)
        func = int(elem.address.attrs.get('function'), 0)
        addr_str = '%02d:%02d.%1d' % (bus, slot, func)
        name = elem.alias.get('name')
        if elem.model != 'dmi-to-pci-bridge':
            chassisNR = elem.target.get('chassisNr')
            value = "pci-bridge,chassis_nr=%s" % chassisNR
            value = "%s,id=%s,bus=pci.%d,addr=%#x" % (value, name, bus, slot)
        else:
            value = "%s" % elem.model_name['name']
            value = "%s,id=%s,bus=pcie.%d,addr=%#x" % (value, name, bus, slot)

        tup = ('-device', value)
        search_qemu_cmd.append(tup)
        return search_qemu_cmd

    def get_patt_inx_ctl(cur_vm_xml, qemu_list, inx):
        """
        Get search pattern in qemu line for some kind of cases

        :param cur_vm_xml: Guest xml
        :param qemu_list: List for storing qemu search patterns
        :param inx: Controller index used

        :return: a tuple for (search_result, qemu_list)

        """
        (search_result, qemu_search) = check_cntrl(cur_vm_xml,
                                                   cntlr_type,
                                                   model,
                                                   inx, None, True)
        if qemu_search:
            qemu_list.extend(qemu_search)
        return (search_result, qemu_list)

    def get_patt_non_zero_bus(cur_vm_xml):
        """
        Get search pattern for multiple controllers with non-zero bus.

        :param cur_vm_xml: The guest VMXML instance
        :return: List, The search pattern list
        """
        actual_set = set()
        for elem in cur_vm_xml.devices.by_device_tag('controller'):
            if (elem.type == cntlr_type and elem.model == model):
                actual_set.add(int(elem.index))
                qemu_list = prepare_qemu_pattern(elem)
        expect_set = set()
        for num in range(1, int(pci_bus_number) + 1):
            expect_set.add(num)

        logging.debug("expect: %s, actual: %s", expect_set, actual_set)
        if (not actual_set.issubset(expect_set) or
                not expect_set.issubset(actual_set)):
            test.fail("The actual index set (%s)does "
                      "not match the expect index set "
                      "(%s)." % (actual_set, expect_set))
        return qemu_list

    def get_search_patt_qemu_line():
        """
        Check if the guest XML has the expected content.

        :return: -device pci-bridge,chassis_nr=1,id=pci.1,bus=pci.0,addr=0x3
        """
        cur_vm_xml = VMXML.new_from_dumpxml(vm_name)
        qemu_list = []
        # Check the pci-root controller has index = 0
        if no_pci_controller == "yes":
            (_, qemu_list) = get_patt_inx_ctl(cur_vm_xml,
                                              qemu_list, '0')
            return qemu_list

        # Check index numbers of pci-bridge controllers should be equal
        # to the pci_bus_number
        if int(pci_bus_number) > 0:
            return get_patt_non_zero_bus(cur_vm_xml)
        # All controllers should exist if there is a gap between two PCI
        # controller indexes
        if index and index_second and int(index) > 0 and int(index_second) > 0:
            for idx in range(int(index_second), int(index) + 1):
                (_, qemu_list) = get_patt_inx_ctl(cur_vm_xml,
                                                  qemu_list, str(idx))
            return qemu_list

        # All controllers should exist with index among [1..index]
        if index and int(index) > 0 and not index_second:
            for idx in range(1, int(index) + 1):
                (search_result, qemu_list) = get_patt_inx_ctl(cur_vm_xml,
                                                              qemu_list,
                                                              str(idx))
                if not search_result:
                    test.fail("Can not find %s controller "
                              "with index %s." % (model, str(idx)))
            return qemu_list

    def get_controller_addr(cntlr_type=None, model=None, index=None, cntlr_bus=None):
        """
        Get the address of testing controller from VM XML as a string with
        format
        a. "bus:slot.function" for pci address type
        b. "cssid:ssid.devno" for ccw address type

        :param cntlr_type: controller type, e.g. pci
        :param model: controller model, e.g. pcie-root-port
        :param index: controller index, e.g. '0'
        :param cntlr_bus: controller bus type, e.g. pci, ccw
        :return: a tuple including an address string, bus, slot,
                        function, multifunction
        """
        if model in ['pci-root', 'pcie-root']:
            return (None, None, None, None, None)

        addr_str = None
        cur_vm_xml = VMXML.new_from_dumpxml(vm_name)

        for elem in cur_vm_xml.devices.by_device_tag('controller'):
            if (
                    (cntlr_type is None or elem.type == cntlr_type) and
                    (model is None or elem.model == model) and
                    (index is None or elem.index == index)):
                addr_elem = elem.address
                if addr_elem is None:
                    test.error("Can not find 'Address' "
                               "element for the controller")
                p4 = None
                if 'ccw' == cntlr_bus:
                    p1 = int(addr_elem.attrs.get('cssid'), 0)
                    p2 = int(addr_elem.attrs.get('ssid'), 0)
                    p3 = int(addr_elem.attrs.get('devno'), 0)
                else:
                    p1 = int(addr_elem.attrs.get('bus'), 0)
                    p2 = int(addr_elem.attrs.get('slot'), 0)
                    p3 = int(addr_elem.attrs.get('function'), 0)
                    p4 = addr_elem.attrs.get('multifunction')
                addr_str = '%02d:%02x.%1d' % (p1, p2, p3)
                logging.debug("Controller address is %s", addr_str)
                return (addr_str, p1, p2, p3, p4)

        return (None, None, None, None, None)

    def check_controller_addr(cntlr_bus=None):
        """
        Check test controller address against expectation.

        :param cntlr_bus: controller bus type, e.g. pci, ccw
        """
        (addr_str, _, _, _, _) = get_controller_addr(cntlr_type, model, index, cntlr_bus)
        if model in ['pci-root', 'pcie-root']:
            if addr_str is None:
                return
            else:
                test.fail('Expect controller do not have address, '
                          'but got "%s"' % addr_str)

        if index != 0:
            if '00:00' in addr_str:
                test.fail("Invalid PCI address 0000:00:00, "
                          "at least one of domain, bus, "
                          "or slot must be > 0")

        exp_addr_patt = r'00:[0-9]{2}.[0-9]'
        if model in ['ehci']:
            exp_addr_patt = r'0[1-9]:[0-9]{2}.[0-9]'
        if addr_str:
            exp_addr_patt = addr_str
        if 'ccw' == cntlr_bus:
            exp_addr_patt = r'254:\d+\.\d+'

        if not re.match(exp_addr_patt, addr_str):
            test.fail('Expect get controller address "%s", '
                      'but got "%s"' % (exp_addr_patt, addr_str))

    def check_qemu_cmdline(search_pattern=None):
        """
        Check domain qemu command line against expectation.

        :param search_pattern: search list with tuple objects
        """
        with open('/proc/%s/cmdline' % vm.get_pid()) as proc_file:
            cmdline = proc_file.read()
        options = cmdline.split('\x00')
        logging.debug(options)
        # Search the command line options for the given patterns
        if search_pattern and isinstance(search_pattern, list):
            for pattern in search_pattern:
                key = pattern[0]
                value = pattern[1]
                logging.debug("key=%s, value=%s", key, value)
                found = False
                check_value = False
                for opt in options:
                    if check_value:
                        if re.findall(value, opt):
                            logging.debug("Found the expected (%s %s) in qemu "
                                          "command line" % (key, value))
                            found = True
                            break
                        check_value = False
                    if key == opt:
                        check_value = True
                if not found:
                    test.fail("Can not find '%s %s' in qemu "
                              "command line" % (key, value))

        # Get pcihole options from qemu command line
        pcihole_opt = ''
        for idx, opt in enumerate(options):
            if 'pci-hole64-size' in opt:
                pcihole_opt = opt

        # Get expected pcihole options from params
        exp_pcihole_opt = ''
        if (cntlr_type == 'pci' and model in ['pci-root', 'pcie-root'] and
           pcihole):
            if 'pc' in cur_machine:
                exp_pcihole_opt = 'i440FX-pcihost'
            elif 'q35' in cur_machine:
                exp_pcihole_opt = 'q35-pcihost'
            exp_pcihole_opt += '.pci-hole64-size=%sK' % pcihole

        # Check options against expectation
        if pcihole_opt != exp_pcihole_opt:
            test.fail('Expect get qemu command serial option "%s", '
                      'but got "%s"' % (exp_pcihole_opt, pcihole_opt))

        # Check usb options against expectation
        if cntlr_type == "usb":
            pattern = ""
            if cmpnn_cntlr_num is not None:
                for num in range(int(cmpnn_cntlr_num)):
                    name = (cmpnn_cntlr_model+str(num+1)).split('-')
                    pattern = pattern + r"-device.%s-usb-%s.*" % (name[0], name[1])
            elif model == "ehci":
                pattern = r"-device.usb-ehci"
            elif model == "qemu-xhci":
                pattern = r"-device.qemu-xhci"

            logging.debug("pattern is %s", pattern)

            if pattern and not re.search(pattern, cmdline):
                test.fail("Expect get usb model info in qemu cmdline, but failed!")

    def check_guest(cntlr_type, cntlr_model, cntlr_index=None, cntlr_bus=""):
        """
        Check status within the guest against expectation.

        :param cntlr_type: //controller@type, e.g. ide
        :param cntlr_model: //controller@model, e.g. virtio-scsi
        :param cntlr_index: //controller@index, e.g. '0'
        :param cntlr_bus: //controller/address@type, e.g. pci
        :raise avocado.core.exceptions.TestFail: Fails the test if checks fail
        :raise avocado.core.exceptions.TestError: Fails if test couldn't be fully executed
        :return: None
        """
        if model == 'pci-root' or model == 'pcie-root':
            return

        (addr_str, _, _, _, _) = get_controller_addr(cntlr_type=cntlr_type,
                                                     model=cntlr_model,
                                                     index=cntlr_index,
                                                     cntlr_bus=cntlr_bus)

        if 'ccw' == cntlr_bus:
            check_ccw_bus_type(addr_str)
        else:
            check_pci_bus_type(addr_str, cntlr_index, cntlr_model, cntlr_type)

    def check_ccw_bus_type(addr_str):
        """
        Uses lszdev to check for device info in guest.

        :param addr_str: Device address from libvirt
        :raise avocado.core.exceptions.TestFail: Fails the test if unexpected test values
        :raise avocado.core.exceptions.TestError: Fails if can't query dev info in guest
        :return: None
        """
        session = vm.wait_for_login(serial=True)
        cmd = 'lszdev generic-ccw --columns ID'
        status, output = session.cmd_status_output(cmd)
        logging.debug("lszdev output is: %s", output)
        if status:
            test.error("Failed to get guest device info, check logs.")
        devno = int(addr_str.split('.')[-1])
        devno_str = hex(devno).replace('0x', '').zfill(4)
        if devno_str not in output:
            test.fail("Can't find device with number %s in guest. Searched for %s in %s"
                      % (devno, devno_str, output))

    def check_pci_bus_type(addr_str, cntlr_index, cntlr_model, cntlr_type):
        """
        Uses lspci to check for device info in guest.

        :param addr_str: Device address from libvirt
        :param cntlr_index: controller index
        :param cntlr_model: controller model
        :param cntlr_type: controller type
        :raise avocado.core.exceptions.TestError: Fails if device info not found
        :raise avocado.core.exceptions.TestFail: Fails if unexcepted test values
        :return: None
        """
        pci_name = 'PCI bridge:'
        verbose_option = ""
        if cntlr_type == 'virtio-serial':
            verbose_option = '-vvv'
        if (addr_str is None and model != 'pci-root' and model != 'pcie-root'):
            test.error("Can't find target controller in XML")
        if cntlr_index:
            logging.debug("%s, %s, %s", cntlr_type, cntlr_model, cntlr_index)
        if (addr_str is None and cntlr_model != 'pci-root' and cntlr_model != 'pcie-root'):
            test.error("Can't find target controller in XML")
        session = vm.wait_for_login(serial=True)
        status, output = session.cmd_status_output('lspci %s -s %s'
                                                   % (verbose_option, addr_str))
        logging.debug("lspci output is: %s", output)
        if (cntlr_type == 'virtio-serial' and
                (vectors and int(vectors) == 0)):
            if 'MSI' in output:
                test.fail("Expect MSI disable with zero vectors, "
                          "but got %s" % output)
        if (cntlr_type == 'virtio-serial' and
                (vectors is None or int(vectors) != 0)):
            if 'MSI' not in output:
                test.fail("Expect MSI enable with non-zero vectors, "
                          "but got %s" % output)
        if (cntlr_type == 'pci'):
            if pci_name not in output:
                test.fail("Can't find target pci device"
                          " '%s' on guest " % addr_str)

    def check_guest_by_pattern(patterns):
        """
        Search the command output with specified patterns

        :param patterns: patterns to search in guest. Type: str or list
        """
        logging.debug("Search pattern:{}".format(patterns))
        session = vm.wait_for_login(serial=True)
        libvirt.check_cmd_output('lspci', eval(patterns), session=session)
        session.close()

    def check_cntrl(vm_xml, cntlr_type, cntlr_model, cntlr_index,
                    check_dict, qemu_pattern):
        """
        Check the controller or get the controller's search patterns.
        Currently check_dict and qemu_pattern are not
        supported to be used at same time.

        :param vm_xml, the guest VMXML instance
        :param cntlr_type, the controller type
        :param cntlr_model, the controller's model
        :param cntlr_index, the controller's index
        :param check_dict, the dict for checking in the controller
        :param qemu_pattern: True if it needs to be checked with qemu
                              command line. False if not.
        :return Tuple (Controller, List) if qemu_pattern
                       Controller: the controller found.
                       List: a list including qemu search patterns
        :return None if check_dict
        :raise test.fail if the model name is not expected
        :raise test.error if the controller is not found
        """
        qemu_list = None
        for elem in vm_xml.devices.by_device_tag('controller'):
            if (cntlr_type == elem.type and cntlr_model == elem.model):
                if cntlr_index and cntlr_index != elem.index:
                    continue
                if qemu_pattern:
                    if cntlr_model not in ['pci-root', 'pcie-root']:
                        qemu_list = prepare_qemu_pattern(elem)
                    return (elem, qemu_list)
                if check_dict:
                    logging.debug("Checking list {}".format(check_dict))
                    if ('modelname' in check_dict and
                            elem.model_name['name'] != check_dict['modelname']):
                        test.fail("Can't find the expected model name {} "
                                  "with (type:{}, model:{}, index:{}), "
                                  "found {}".format(check_dict['modelname'],
                                                    cntlr_type,
                                                    cntlr_model,
                                                    cntlr_index,
                                                    elem.model_name['name']))
                    if ('busNr' in check_dict and
                            elem.target['busNr'] != check_dict['busNr']):
                        test.fail("Can't find the expected busNr {} "
                                  "with (type:{}, model:{}, index:{}), "
                                  "found {}".format(check_dict['busNr'],
                                                    cntlr_type,
                                                    cntlr_model,
                                                    cntlr_index,
                                                    elem.target['busNr']))
                    else:
                        logging.debug("Check controller successfully")
                        return
        test.error("Can't find the specified controller with "
                   "(type:{}, model:{}, index:{})".format(cntlr_type,
                                                          cntlr_model,
                                                          cntlr_index))

    def detach_device(vm_name):
        """
        Detach a device from the given guest

        :param vm_name: The guest name
        :return: None
        """
        attach_dev_type = params.get("attach_dev_type", 'disk')
        detach_option = params.get("detach_option")
        if attach_dev_type == 'interface':
            ret = virsh.detach_interface(vm_name, detach_option,
                                         **virsh_dargs)
        else:
            logging.debug("No need to detach any device.")

    def attach_device(vm_name):
        """
        Attach devices to the guest for some times

        :param vm_name: The guest name
        :return: None
        """
        attach_count = params.get("attach_count", '1')
        attach_dev_type = params.get("attach_dev_type", 'disk')
        attach_option = params.get("attach_option")
        if attach_option.count('--address '):
            index_str = "%02x" % int(auto_indexes_dict['pcie-root-port'][0])
            attach_option = attach_option % index_str
        for count in range(0, int(attach_count)):
            if attach_dev_type == 'disk':
                file_path = tempfile.mktemp(dir=data_dir.get_tmp_dir())
                libvirt.create_local_disk('file', file_path, size='1')
                ret = virsh.attach_disk(vm_name,
                                        file_path,
                                        params.get('dev_target', 'vdb'),
                                        extra=attach_option,
                                        **virsh_dargs)
            elif attach_dev_type == 'interface':
                ret = virsh.attach_interface(vm_name,
                                             attach_option,
                                             **virsh_dargs)
            else:
                logging.debug("No need to attach any device.")
                break

    def check_detach_attach_result(vm_name, cmd, pattern, expect_output, option='--hmp'):
        """
        Check the attach/detach result by qemu_monitor_command.

        :param vm_name: guest name
        :param cmd: the command for qemu_monitor_command
        :param pattern: regular expr used to search
                        the output of qemu_monitor_command
        :param expect_output: the expected output for qemu_monitor_command
        :param option: option for qemu_monitor_command
        :raise test.fail if the pattern is not matched
        :return: the qemu_monitor_command output
        """
        ret = virsh.qemu_monitor_command(vm_name, cmd, option)
        libvirt.check_result(ret)
        if pattern and expect_output:
            if not re.findall(pattern, ret.stdout.strip()):
                test.fail("Can't find the pattern '{}' in "
                          "qemu monitor command "
                          "output'{}'".format(pattern,
                                              ret.stdout.strip()))
        else:
            return expect_output == ret.stdout.strip()

    def check_guest_by_cmd(cmds, expect_error=False):
        """
        Execute the command within guest and check status

        :param cmds: Str or List, The command executed in guest
        :param expect_error: True if the command is expected to fail
        :return: None
        :raise test.fail if command status is not as expected
        """
        def _check_cmd_result(cmd):
            logging.debug("Command in guest gets result: %s", output)
            if status and not expect_error:
                test.fail("Command '{}' fails in guest with status "
                          "'{}'".format(cmd, status))
            elif status and expect_error:
                logging.debug("Command '{}' fails in guest as "
                              "expected".format(cmd))
            elif not status and not expect_error:
                logging.debug("Check guest by command successfully")
            else:
                test.fail("Check guest by command successfully, "
                          "but expect failure")

        logging.debug("Execute command '{}' in guest".format(cmds))
        session = vm.wait_for_login(serial=True)
        (status, output) = (None, None)
        if isinstance(cmds, str):
            status, output = session.cmd_status_output(cmds)
            _check_cmd_result(cmds)
        elif isinstance(cmds, list):
            for cmd in cmds:
                if isinstance(cmd, str):
                    status, output = session.cmd_status_output(cmd)
                    _check_cmd_result(cmd)
                elif isinstance(cmd, dict):
                    for cmd_key in cmd.keys():
                        status, output = session.cmd_status_output(cmd_key)
                        if output.strip() != cmd[cmd_key]:
                            test.fail("Command '{}' does not get "
                                      "expect result {}, but found "
                                      "{}".format(cmd_key,
                                                  cmd[cmd_key],
                                                  output.strip()))

    def get_device_bus(vm_xml, device_type):
        """
        Get the bus that the devices are attached to.

        :param vm_xml: Guest xml
        :param device_type: The type of device, like disk, interface
        :return a list includes buses the devices attached to
        """
        devices = vm_xml.get_devices(device_type=device_type)
        bus_list = []
        for device in devices:
            logging.debug("device:{}".format(device))
            bus = device.address.attrs['bus']
            logging.debug("This device's bus:{}".format(bus))
            bus_list.append(bus)
        return bus_list

    def add_device_xml(vm_xml, device_type, device_cfg_dict):
        """
        Add a device xml to the existing vm xml

        :param vm_xml: the existing vm xml object
        :param device_type: type of device to be added
        :param device_cfg_dict: the configuration of the device
        :return: None
        """
        vm_xml.remove_all_device_by_type(device_type)
        dev_obj = vm_xml.get_device_class(device_type)()

        dev_cfg = eval(device_cfg_dict)
        if device_type == 'sound':
            dev_obj.model_type = dev_cfg.get("model")
        elif device_type == 'rng':
            dev_obj.rng_model = dev_cfg.get("model")
            rng_backend = dev_obj.Backend()
            rng_backend.backend_model = "random"
            dev_obj.backend = rng_backend
        elif device_type == 'memballoon':
            dev_obj.model = dev_cfg.get("model")
        if 'bus' in dev_cfg:
            addr_dict = {'bus': dev_cfg.get("bus"),
                         'type': dev_cfg.get("type", "pci"),
                         'slot': dev_cfg.get("slot", "0x00")}
            if device_type == 'rng':
                dev_obj.address = dev_obj\
                    .new_rng_address(**{"attrs": addr_dict})
            else:
                dev_obj.address = addr_dict
        vm_xml.add_device(dev_obj)

    def check_multifunction():
        """
        Check if multifunction is found in vm xml for specified controller

        :raise: test.fail if multifunction is not as expected
        """
        (_, _, _, _, multi_func) = get_controller_addr(cntlr_type, model, '0')
        if not multi_func or multi_func != 'on':
            test.fail("Can't find multifunction=on in certain "
                      "controller(type:{}, model:{}, "
                      "index:{})".format(cntlr_type, model, 0))

    os_machine = params.get('machine_type', None)
    libvirt.check_machine_type_arch(os_machine)
    cntlr_type = params.get('controller_type', None)
    model = params.get('controller_model', None)
    index = params.get('controller_index', None)
    vectors = params.get('controller_vectors', None)
    pcihole = params.get('controller_pcihole64', None)
    chassisNr = params.get('chassisNr', None)
    addr_str = params.get('controller_address', None)
    cmpnn_cntlr_model = params.get('companion_controller_model', None)
    cmpnn_cntlr_num = params.get('companion_controller_num', None)
    vm_name = params.get("main_vm", "avocado-vt-vm1")
    no_pci_controller = params.get("no_pci_controller", "no")
    pci_bus_number = params.get("pci_bus_number", "0")
    remove_address = "yes" == params.get("remove_address", "yes")
    remove_contr = "yes" == params.get("remove_contr", "yes")
    setup_controller = params.get("setup_controller", "yes")
    index_second = params.get("controller_index_second", None)
    cntlr_bus = params.get('controller_bus')
    cur_machine = os_machine
    check_qemu = "yes" == params.get("check_qemu", "no")
    check_within_guest = "yes" == params.get("check_within_guest", "no")
    run_vm = "yes" == params.get("run_vm", "no")
    second_level_controller_num = params.get("second_level_controller_num", "0")
    check_contr_addr = "yes" == params.get("check_contr_addr", "yes")
    qemu_patterns = params.get("qemu_patterns")
    status_error = "yes" == params.get("status_error", "no")
    model_name = params.get("model_name", None)
    expect_err_msg = params.get("err_msg", None)
    new_pcie_root_port_model = params.get("new_model")
    old_pcie_root_port_model = params.get("old_model")
    add_contrl_list = params.get("add_contrl_list")
    auto_bus = "yes" == params.get("auto_bus", "no")
    check_cntrls_list = params.get("check_cntrls_list")
    sound_dict = params.get("sound_dict")
    balloon_dict = params.get("balloon_dict")
    rng_dict = params.get("rng_dict")
    guest_patterns = params.get("guest_patterns")
    attach_option = params.get("attach_option")
    detach_option = params.get("detach_option")
    attach_dev_type = params.get("attach_dev_type", 'disk')
    remove_nic = "yes" == params.get("remove_nic", 'no')
    qemu_monitor_cmd = params.get("qemu_monitor_cmd")
    cmd_in_guest = params.get("cmd_in_guest")
    check_dev_bus = "yes" == params.get("check_dev_bus", "no")
    cpu_numa_cells = params.get("cpu_numa_cells")
    virsh_dargs = {'ignore_status': False, 'debug': True}
    auto_indexes_dict = {}
    auto_index = params.get('auto_index', 'no') == 'yes'
    auto_slot = params.get('auto_slot', 'no') == 'yes'

    libvirt_version.is_libvirt_feature_supported(params)

    if index and index_second:
        if int(index) > int(index_second):
            test.error("Invalid parameters")

    vm = env.get_vm(vm_name)
    vm_xml = VMXML.new_from_inactive_dumpxml(vm_name)
    vm_xml_backup = vm_xml.copy()

    try:
        if remove_contr:
            vm_xml.remove_all_device_by_type('controller')
        if remove_address:
            remove_devices(vm_xml, 'address')
        remove_devices(vm_xml, 'usb')
        if remove_nic:
            remove_devices(vm_xml, 'interface')
        # Get the max controller index in current vm xml
        the_model = 'pci-root' if 'ppc' in platform.machine() else 'pcie-root-port'
        if add_contrl_list:
            ret_indexes = libvirt_pcicontr.get_max_contr_indexes(vm_xml, 'pci', the_model)
            if ret_indexes and len(ret_indexes) > 0:
                if auto_bus:
                    new_index = "0x%02x" % (int(ret_indexes[0]) + 1)
                    add_contrl_list = re.sub(r"'bus': '%s'", "'bus': '%s'" % new_index, add_contrl_list, count=5)
                    logging.debug("bus is set automatically with %s", new_index)
                if auto_slot:
                    available_slot = libvirt_pcicontr.get_free_pci_slot(vm_xml)
                    if not available_slot:
                        test.error("No pci slot is available any more. Please check your vm xml.")
                    add_contrl_list = re.sub(r"'slot': '%s'", "'slot': '%s'" % available_slot, add_contrl_list, count=5)
                    logging.debug("slot is set automatically with %s", available_slot)
                if auto_index:
                    new_index = int(ret_indexes[0]) + 1
                    add_contrl_list = re.sub(r"'index': '%s'", "'index': '%s'" % new_index, add_contrl_list, count=5)
                    logging.debug("index is set automatically with %s", new_index)
        logging.debug("Now add_contrl_list=%s", add_contrl_list)

        if setup_controller == "yes":
            if add_contrl_list:
                contrls = eval(add_contrl_list)
                for one_contrl in contrls:
                    contr_dict = {}
                    cntl_target = ''
                    if 'model' in one_contrl:
                        contr_dict.update({'controller_model': one_contrl['model']})
                    if 'busNr' in one_contrl:
                        cntl_target = "{'busNr': %s}" % one_contrl['busNr']
                    if 'chassisNr' in one_contrl:
                        cntl_target += "{'chassisNr': '%s'}" % one_contrl['chassisNr']
                    if 'alias' in one_contrl:
                        contr_dict.update({'contr_alias': one_contrl['alias']})
                    if 'type' in one_contrl:
                        contr_dict.update({'controller_type': one_contrl['type']})
                    else:
                        contr_dict.update({'controller_type': 'pci'})
                    if 'node' in one_contrl:
                        contr_dict.update({'controller_node': one_contrl['node']})
                    if 'index' in one_contrl:
                        contr_dict.update({'controller_index': one_contrl['index']})
                    contr_dict.update({'controller_target': cntl_target})
                    addr = None
                    if 'bus' in one_contrl:
                        addr = {'bus': one_contrl['bus']}
                        if 'slot' in one_contrl:
                            addr.update({'slot': one_contrl['slot']})
                            if 'func' in one_contrl:
                                addr.update({'function': one_contrl['func']})
                    if addr:
                        contr_dict.update({'controller_addr': str(addr)})
                    logging.debug(contr_dict)
                    controller_add = libvirt.create_controller_xml(contr_dict)
                    vm_xml.add_device(controller_add)
                    logging.debug("Add a controller: %s" % controller_add)
            else:
                if index_second:
                    setup_controller_xml(index_second)
                setup_controller_xml(index, addr_str)
                if second_level_controller_num:
                    for indx in range(2, int(second_level_controller_num) + 2):
                        addr_second = "0%s:0%s.0" % (index, str(indx))
                        setup_controller_xml(str(indx), addr_second)

        setup_os_xml()
        if int(pci_bus_number) > 0:
            address_params = {'bus': "%0#4x" % int(pci_bus_number), 'slot': "%0#4x" % int(pci_bus_number)}
            libvirt.set_disk_attr(vm_xml, 'vda', 'address', address_params)
        if cpu_numa_cells:
            if not vm_xml.cpu:
                vmxml_cpu = VMCPUXML()
                vmxml_cpu.xml = "<cpu mode='host-model'><numa/></cpu>"
            else:
                vmxml_cpu = vm_xml.cpu
                logging.debug("Existing cpu configuration in guest xml:\n%s", vmxml_cpu)
                vmxml_cpu.mode = 'host-model'
                if platform.machine() == 'aarch64':
                    vmxml_cpu.mode = 'host-passthrough'
                vmxml_cpu.remove_elem_by_xpath('/model')
                vmxml_cpu.remove_elem_by_xpath('/numa')
            vmxml_cpu.numa_cell = VMCPUXML.dicts_to_cells(eval(cpu_numa_cells))
            vm_xml.cpu = vmxml_cpu
            vm_xml.vcpu = int(params.get('vcpu_count', 4))
        if sound_dict:
            add_device_xml(vm_xml, 'sound', sound_dict)
        if rng_dict:
            add_device_xml(vm_xml, 'rng', rng_dict)
        if balloon_dict:
            add_device_xml(vm_xml, 'memballoon', balloon_dict)

        logging.debug("Test VM XML before define is %s" % vm_xml)
        if not define_and_check(vm_xml):
            logging.debug("Can't define the VM, exiting.")
            return
        vm_xml = VMXML.new_from_dumpxml(vm_name)
        logging.debug("Test VM XML after define is %s" % vm_xml)
        if auto_index:
            contrls = eval(add_contrl_list)
            for one_contrl in contrls:
                ret_indexes = libvirt_pcicontr.get_max_contr_indexes(vm_xml,
                                                                     one_contrl.get('type', 'pci'),
                                                                     one_contrl.get('model'))
                auto_indexes_dict.update({one_contrl['model']: ret_indexes})
        if check_contr_addr:
            check_controller_addr(cntlr_bus)
        if new_pcie_root_port_model and old_pcie_root_port_model:
            if utils_misc.compare_qemu_version(2, 9, 0, False):
                expect_model = new_pcie_root_port_model
            else:
                expect_model = old_pcie_root_port_model
            logging.debug("Expect the model for 'pcie-root-port': "
                          "%s" % expect_model)
            check_dict = {'modelname': expect_model}
            check_cntrl(vm_xml, 'pci', 'pcie-root-port',
                        '2', check_dict, False)
        if check_cntrls_list:
            for check_one in eval(check_cntrls_list):
                logging.debug("The controller to be checked: {}".format(check_one))
                check_cntrl(vm_xml, check_one.get('type', 'pci'), check_one.get('model'),
                            check_one.get('index'), check_one, False)
        if run_vm:
            try:
                if not start_and_check():
                    logging.debug("Can't start the VM, exiting.")
                    return
            except virt_vm.VMStartError as detail:
                test.fail(detail)

        # Need coldplug/hotplug
        if attach_option:
            attach_device(vm_name)
            vm_xml = VMXML.new_from_dumpxml(vm_name)
            logging.debug("Guest xml after attaching device:{}".format(vm_xml))
            # Check device's bus if needed
            if check_dev_bus:
                buses = get_device_bus(vm_xml, attach_dev_type)
                if len(buses) == 0:
                    test.fail("No bus was found")
                if buses[0] != params.get("expect_bus"):
                    test.fail("The expected bus for device is {}, "
                              "but found {}".format(params.get("expect_bus"),
                                                    buses[0]))
        if qemu_monitor_cmd:
            check_detach_attach_result(vm_name,
                                       qemu_monitor_cmd,
                                       params.get("qemu_monitor_pattern"),
                                       None)
        # Check guest xml
        if attach_dev_type == 'interface' and 'e1000e' in attach_option:
            cntls = vm_xml.get_controllers(controller_type='pci', model='pcie-root-port')
            cntl_index_list = []
            for cntl in cntls:
                cntl_index_list.append(cntl.get('index'))
            logging.debug("All pcie-root-port controllers' "
                          "index: {}".format(cntl_index_list))
            bus_list = get_device_bus(vm_xml, "interface")
            for bus in bus_list:
                if str(int(bus, 16)) not in cntl_index_list:
                    test.fail("The attached NIC with bus '{}' is not attached "
                              "to any pcie-root-port by default".format(bus))
        if check_qemu:
            if qemu_patterns:
                if auto_index:
                    index_str = "%x" % int(auto_indexes_dict['pcie-root-port'][0])
                    qemu_patterns = qemu_patterns % index_str
                    logging.debug("qemu_patterns=%s", qemu_patterns)
                if qemu_patterns.count('multifunction=on'):
                    check_multifunction()
                search_qemu_cmd = eval(qemu_patterns)
                logging.debug(search_qemu_cmd)
            else:
                search_qemu_cmd = get_search_patt_qemu_line()
            check_qemu_cmdline(search_pattern=search_qemu_cmd)
            vm.wait_for_login().close()

        if check_within_guest:
            try:
                if int(pci_bus_number) > 0:
                    for contr_idx in range(1, int(pci_bus_number) + 1):
                        check_guest(cntlr_type, model, str(contr_idx))
                    return
                if index:
                    check_max_index = int(index) + int(second_level_controller_num)
                    for contr_idx in range(1, int(check_max_index) + 1):
                        check_guest(cntlr_type, model, str(contr_idx))
                elif guest_patterns:
                    check_guest_by_pattern(guest_patterns)
                elif cmd_in_guest:
                    check_guest_by_cmd(eval(cmd_in_guest))
                else:
                    check_guest(cntlr_type, model, cntlr_bus=cntlr_bus)
                    if model == 'pcie-root':
                        # Need check other auto added controller
                        check_guest(cntlr_type, 'dmi-to-pci-bridge', '1')
                        check_guest(cntlr_type, 'pci-bridge', '2')
            except remote.LoginTimeoutError as e:
                logging.debug(e)
                if not status_error:
                    raise
        # Need hotunplug
        if detach_option:
            detach_device(vm_name)
            if qemu_monitor_cmd:
                check_detach_attach_result(vm_name,
                                           qemu_monitor_cmd,
                                           params.get("qemu_monitor_pattern"),
                                           "")
            if cmd_in_guest:
                check_guest_by_cmd(eval(cmd_in_guest), expect_error=True)

    finally:
        vm_xml_backup.sync()
Пример #17
0
def run(test, params, env):
    """
    Test when the PCI configuration file is in read-only mode
    """
    def test_vf_hotplug():
        """
        Hot-plug VF to VM

        """
        logging.info("Preparing a running guest...")
        libvirt_vmxml.remove_vm_devices_by_type(vm, 'interface')
        vm.start()
        vm_session = vm.wait_for_serial_login(timeout=180)

        logging.info("Attaching VF to the guest...")
        mac_addr = utils_net.generate_mac_address_simple()
        iface_dict = eval(
            params.get('iface_dict', '{"hostdev_addr": "%s"}') %
            utils_sriov.pci_to_addr(vf_pci))
        iface = interface.Interface("hostdev")
        iface.xml = libvirt.modify_vm_iface(vm.name, "get_xml", iface_dict)
        virsh.attach_device(vm_name,
                            iface.xml,
                            debug=True,
                            ignore_status=False)

        logging.info("Checking VF in the guest...")
        vm_iface_types = [
            iface.get_type_name() for iface in vm_xml.VMXML.new_from_dumpxml(
                vm_name).devices.by_device_tag("interface")
        ]
        if 'hostdev' not in vm_iface_types:
            test.fail('Unable to get hostdev interface!')
        if cmd_in_vm:
            if not utils_misc.wait_for(
                    lambda: not vm_session.cmd_status(cmd_in_vm), 30, 10):
                test.fail("Can not get the Virtual Function info on vm!")
        vm_session.close()

    libvirt_version.is_libvirt_feature_supported(params)

    test_case = params.get("test_case", "")
    run_test = eval("test_%s" % test_case)
    cmd_in_vm = params.get("cmd_in_vm")

    vm_name = params.get("main_vm", "avocado-vt-vm1")
    vm = env.get_vm(vm_name)
    pf_pci = utils_sriov.get_pf_pci()
    if not pf_pci:
        test.cancel("NO available pf found.")
    default_vf = sriov_base.setup_vf(pf_pci, params)
    vf_pci = utils_sriov.get_vf_pci_id(pf_pci)
    dev_name = utils_sriov.get_device_name(vf_pci)

    vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    orig_config_xml = vmxml.copy()
    libvirtd = utils_libvirtd.Libvirtd('virtqemud')
    try:
        virsh.nodedev_detach(dev_name, debug=True, ignore_status=False)
        logging.info("Re-mounting sysfs with ro mode...")
        utils_misc.mount('/sys', '', None, 'remount,ro')
        libvirtd.restart()
        run_test()
    finally:
        logging.info("Recover test enviroment.")
        utils_misc.mount('/sys', '', None, 'remount,rw')
        sriov_base.recover_vf(pf_pci, params, default_vf)
        if vm.is_alive():
            vm.destroy(gracefully=False)
        orig_config_xml.sync()
        virsh.nodedev_reattach(dev_name, debug=True)
Пример #18
0
def run(test, params, env):
    """
    Sriov net failover related test.
    """
    def setup_hotplug_hostdev_iface_with_teaming():
        logging.info("Create hostdev network.")
        net_hostdev_fwd = params.get("net_hostdev_fwd",
                                     '{"mode": "hostdev", "managed": "yes"}')
        net_hostdev_dict = {
            "net_name": net_hostdev_name,
            "net_forward": net_hostdev_fwd,
            "net_forward_pf": '{"dev": "%s"}' % pf_name
        }
        libvirt_network.create_or_del_network(net_hostdev_dict)

        logging.info("Clear up VM interface.")
        libvirt_vmxml.remove_vm_devices_by_type(vm, 'interface')
        iface = interface.Interface("network")
        iface.xml = create_bridge_iface_xml(vm, mac_addr, params)
        virsh.attach_device(vm_name,
                            iface.xml,
                            flagstr='--persistent',
                            debug=True,
                            ignore_status=False)
        vm.start()
        vm.wait_for_serial_login(timeout=180).close()

    def teardown_hotplug_hostdev_iface_with_teaming():
        logging.info("Delete hostdev network.")
        net_hostdev_dict = {"net_name": net_hostdev_name}
        libvirt_network.create_or_del_network(net_hostdev_dict, is_del=True)

    def test_hotplug_hostdev_iface_with_teaming():
        logging.info("Attach a hostdev interface.")
        hostdev_iface_xml = create_hostdev_iface_xml(vm, mac_addr, params)
        virsh.attach_device(vm_name,
                            hostdev_iface_xml,
                            debug=True,
                            ignore_status=False)
        check_ifaces(vm_name, expected_ifaces={"bridge", "hostdev"})

        vm_session = vm.wait_for_serial_login(timeout=240)
        ping_ip = get_ping_dest(vm_session, mac_addr)
        check_vm_network_accessed(vm_session,
                                  ping_dest=ping_ip,
                                  tcpdump_iface=bridge_name,
                                  tcpdump_status_error=True)

        logging.info("Detach the hostdev interface.")
        hostdev_iface = interface.Interface("network")
        for ifc in vm_xml.VMXML.new_from_dumpxml(
                vm_name).devices.by_device_tag("interface"):
            if ifc.type_name == "hostdev":
                ifc.del_address()
                hostdev_iface = ifc
        virsh.detach_device(vm_name,
                            hostdev_iface.xml,
                            wait_remove_event=True,
                            debug=True,
                            ignore_status=False)
        check_ifaces(vm_name, expected_ifaces={"hostdev"}, status_error=True)

        check_vm_network_accessed(vm_session,
                                  2,
                                  ping_dest=ping_ip,
                                  tcpdump_iface=bridge_name,
                                  tcpdump_status_error=False)

        libvirt_vfio.check_vfio_pci(vf_pci, status_error=True)
        logging.info("Re-attach the hostdev interface.")
        virsh.attach_device(vm_name,
                            hostdev_iface.xml,
                            debug=True,
                            ignore_status=False)
        check_vm_network_accessed(vm_session,
                                  ping_dest=ping_ip,
                                  tcpdump_iface=bridge_name,
                                  tcpdump_status_error=True)

    def setup_hotplug_hostdev_device_with_teaming():
        libvirt_vmxml.remove_vm_devices_by_type(vm, 'interface')
        vm.start()
        vm.wait_for_serial_login(timeout=240).close()

    def test_hotplug_hostdev_device_with_teaming():
        default_vf_mac = utils_sriov.get_vf_mac(pf_name)
        utils_sriov.set_vf_mac(pf_name, mac_addr)
        logging.info("Attach the bridge interface.")
        brg_iface_xml = create_bridge_iface_xml(vm, mac_addr, params)
        virsh.attach_device(vm_name,
                            brg_iface_xml,
                            debug=True,
                            ignore_status=False)
        # Wait for 10s before attaching the hostdev device
        time.sleep(10)
        logging.info("Attach the hostdev device.")
        hostdev_dev = libvirt.create_hostdev_xml(vf_pci,
                                                 teaming=hostdev_teaming_dict)
        virsh.attach_device(vm_name,
                            hostdev_dev.xml,
                            debug=True,
                            ignore_status=False)
        vm_session = vm.wait_for_serial_login(timeout=240)
        ping_ip = get_ping_dest(vm_session, mac_addr)
        check_vm_network_accessed(vm_session,
                                  ping_dest=ping_ip,
                                  tcpdump_iface=bridge_name,
                                  tcpdump_status_error=True)
        logging.info("Detach the hostdev device.")
        virsh.detach_device(vm_name,
                            hostdev_dev.xml,
                            wait_remove_event=True,
                            debug=True,
                            ignore_status=False)
        logging.debug("Recover vf's mac to %s.", default_vf_mac)
        utils_sriov.set_vf_mac(pf_name, default_vf_mac)

        check_hostdev = vm_xml.VMXML.new_from_dumpxml(vm_name)\
            .devices.by_device_tag('hostdev')
        if check_hostdev:
            test.fail("The hostdev device exists after detaching %s." %
                      check_hostdev)
        libvirt_vfio.check_vfio_pci(vf_pci, status_error=True)
        check_vm_network_accessed(vm_session,
                                  2,
                                  ping_dest=ping_ip,
                                  tcpdump_iface=bridge_name,
                                  tcpdump_status_error=False)

    def setup_save_restore_hostdev_device_with_teaming():
        logging.info("Start a VM with bridge iface and hostdev device.")
        libvirt_vmxml.remove_vm_devices_by_type(vm, 'interface')
        vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
        iface = interface.Interface("network")
        iface.xml = create_bridge_iface_xml(vm, mac_addr, params)
        vmxml.add_device(iface)

        hostdev_dev = libvirt.create_hostdev_xml(vf_pci,
                                                 teaming=hostdev_teaming_dict)
        vmxml.add_device(hostdev_dev)
        vmxml.sync()
        vm.start()
        utils_sriov.set_vf_mac(pf_name, mac_addr)
        vm.wait_for_serial_login(timeout=240).close()

    def test_save_restore_hostdev_device_with_teaming():
        logging.info("Save/restore VM.")
        save_file = os.path.join(data_dir.get_tmp_dir(), "save_file")
        virsh.save(vm_name,
                   save_file,
                   debug=True,
                   ignore_status=False,
                   timeout=10)
        if not libvirt.check_vm_state(vm_name, "shut off"):
            test.fail("The guest should be down after executing 'virsh save'.")
        virsh.restore(save_file, debug=True, ignore_status=False)
        if not libvirt.check_vm_state(vm_name, "running"):
            test.fail(
                "The guest should be running after executing 'virsh restore'.")
        vm.cleanup_serial_console()
        vm.create_serial_console()
        vm_session = vm.wait_for_serial_login()
        ping_ip = get_ping_dest(vm_session, mac_addr)
        check_vm_network_accessed(vm_session,
                                  ping_dest=ping_ip,
                                  tcpdump_iface=bridge_name,
                                  tcpdump_status_error=True)
        logging.info("Detach the hostdev device.")
        hostdev_dev = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name).devices.\
            by_device_tag("hostdev")
        virsh.detach_device(vm_name,
                            hostdev_dev.xml,
                            wait_remove_event=True,
                            debug=True,
                            ignore_status=False)
        check_hostdev = vm_xml.VMXML.new_from_dumpxml(vm_name)\
            .devices.by_device_tag('hostdev')
        if check_hostdev:
            test.fail("The hostdev device exists after detaching %s." %
                      check_hostdev)

        check_vm_network_accessed(vm_session,
                                  2,
                                  ping_dest=ping_ip,
                                  tcpdump_iface=bridge_name,
                                  tcpdump_status_error=False)
        logging.info("Attach the hostdev device.")
        virsh.attach_device(vm_name,
                            hostdev_dev.xml,
                            debug=True,
                            ignore_status=False)
        check_vm_network_accessed(vm_session,
                                  ping_dest=ping_ip,
                                  tcpdump_iface=bridge_name,
                                  tcpdump_status_error=True)

    def setup_save_restore_hostdev_iface_with_teaming():
        logging.info("Create hostdev network.")
        net_hostdev_fwd = params.get("net_hostdev_fwd",
                                     '{"mode": "hostdev", "managed": "yes"}')
        net_hostdev_dict = {
            "net_name": net_hostdev_name,
            "net_forward": net_hostdev_fwd,
            "net_forward_pf": '{"dev": "%s"}' % pf_name
        }
        libvirt_network.create_or_del_network(net_hostdev_dict)
        libvirt_vmxml.remove_vm_devices_by_type(vm, 'interface')
        vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
        iface = interface.Interface("network")
        iface.xml = create_bridge_iface_xml(vm, mac_addr, params)
        vmxml.add_device(iface)
        iface.xml = create_hostdev_iface_xml(vm, mac_addr, params)
        vmxml.add_device(iface)
        vmxml.sync()
        logging.debug("VMXML after updating ifaces: %s.",
                      vm_xml.VMXML.new_from_dumpxml(vm_name))
        vm.start()
        vm_session = vm.wait_for_serial_login(timeout=240)
        ping_ip = get_ping_dest(vm_session, mac_addr)
        check_vm_network_accessed(vm_session,
                                  ping_dest=ping_ip,
                                  tcpdump_iface=bridge_name,
                                  tcpdump_status_error=True)

    def teardown_save_restore_hostdev_iface_with_teaming():
        teardown_hotplug_hostdev_iface_with_teaming()

    def test_save_restore_hostdev_iface_with_teaming():
        logging.info("Save/restore VM.")
        save_file = os.path.join(data_dir.get_tmp_dir(), "save_file")
        virsh.save(vm_name,
                   save_file,
                   debug=True,
                   ignore_status=False,
                   timeout=10)
        if not libvirt.check_vm_state(vm_name, "shut off"):
            test.fail("The guest should be down after executing 'virsh save'.")
        virsh.restore(save_file, debug=True, ignore_status=False)
        if not libvirt.check_vm_state(vm_name, "running"):
            test.fail(
                "The guest should be running after executing 'virsh restore'.")

        vm.cleanup_serial_console()
        vm.create_serial_console()
        vm_session = vm.wait_for_serial_login(timeout=240)
        ping_ip = get_ping_dest(vm_session, mac_addr, False)
        logging.debug(ping_ip)
        check_vm_network_accessed(vm_session,
                                  ping_dest=ping_ip,
                                  tcpdump_iface=bridge_name,
                                  tcpdump_status_error=True)

    def check_vm_iface_num(session, exp_num=3):
        """
        Check he number of interfaces

        :param session: The session to the guest
        :param exp_num: The expected number
        :return: True when interfaces' number is equal to exp_num
        """
        p_iface = utils_net.get_remote_host_net_ifs(session)[0]
        logging.debug("Ifaces in VM: %s", p_iface)

        return len(p_iface) == exp_num

    def check_vm_network_accessed(vm_session,
                                  expected_iface_no=3,
                                  ping_dest="8.8.8.8",
                                  timeout=30,
                                  tcpdump_iface=None,
                                  tcpdump_status_error=False):
        """
        Test VM's network by checking ifaces' number and the accessibility

        :param vm_session: The session object to the guest
        :param expected_iface_no: The expected number of ifaces
        :param ping_dest: The destination to be ping
        :param timeout: The timeout of the checking
        :param tcpdump_iface: The interface to check
        :param tcpdump_status_error: Whether the tcpdump's output should include
            the string "ICMP echo request"
        :raise: test.fail when ifaces' number is incorrect or ping fails.
        """
        if not utils_misc.wait_for(
                lambda: check_vm_iface_num(vm_session, expected_iface_no),
                first=3,
                timeout=timeout):
            test.fail("%d interfaces should be found on the vm." %
                      expected_iface_no)
        if tcpdump_iface:
            cmd = "tcpdump  -i %s icmp" % tcpdump_iface
            tcpdump_session = aexpect.ShellSession('bash')
            tcpdump_session.sendline(cmd)

        if not utils_misc.wait_for(
                lambda: not utils_test.ping(ping_dest,
                                            count=3,
                                            timeout=5,
                                            output_func=logging.debug,
                                            session=vm_session)[0],
                first=5,
                timeout=timeout):
            test.fail("Failed to ping %s." % ping_dest)
        if tcpdump_iface:
            output = tcpdump_session.get_stripped_output()
            logging.debug("tcpdump's output: %s.", output)
            pat_str = "ICMP echo request"
            if re.search(pat_str, output):
                if tcpdump_status_error:
                    test.fail(
                        "Get incorrect tcpdump output: {}, it should not "
                        "include '{}'.".format(output, pat_str))
            else:
                if not tcpdump_status_error:
                    test.fail("Get incorrect tcpdump output: {}, it should "
                              "include '{}'.".format(output, pat_str))

    def get_ping_dest(vm_session, mac_addr, restart_network=True):
        """
        Get an ip address to ping

        :param vm_session: The session object to the guest
        :param mac_addr: mac address of given interface
        :param restart_network:  Whether to restart guest's network
        :return: ip address
        """
        if restart_network:
            utils_misc.cmd_status_output("dhclient -r; sleep 5; dhclient",
                                         shell=True,
                                         session=vm_session)
        vm_iface_info = utils_net.get_linux_iface_info(
            mac_addr, vm_session)['addr_info'][0]['local']
        return re.sub('\d+$', '1', vm_iface_info)

    def create_bridge_iface_xml(vm, mac_addr, params):
        """
        Create xml of bridge interface

        :param vm: The vm object
        :param mac_address: The mac address
        :param params: Dictionary with the test parameters
        :return: The interface xml
        """
        net_bridge_name = params.get("net_bridge_name", "host-bridge")
        iface_bridge_dict = {
            "type": "network",
            "source": "{'network': '%s'}" % net_bridge_name,
            "mac": mac_addr,
            "model": "virtio",
            "teaming": '{"type":"persistent"}',
            "alias": '{"name": "ua-backup0"}'
        }
        return libvirt.modify_vm_iface(vm.name, "get_xml", iface_bridge_dict)

    def create_hostdev_iface_xml(vm, mac_addr, params):
        """
        Create xml of hostdev interface

        :param vm: The vm object
        :param mac_address: The mac address
        :param params: Dictionary with the test parameters
        :return: The interface xml
        """
        net_hostdev_name = params.get("net_hostdev_name", "hostdev-net")
        hostdev_iface_dict = {
            "type": "network",
            "source": "{'network': '%s'}" % net_hostdev_name,
            "mac": mac_addr,
            "teaming": '{"type":"transient", "persistent": "ua-backup0"}'
        }
        return libvirt.modify_vm_iface(vm.name, "get_xml", hostdev_iface_dict,
                                       4)

    def check_ifaces(vm_name,
                     expected_ifaces={"bridge", "hostdev"},
                     status_error=False):
        """
        Check VM's interfaces

        :param vm_name: The name of VM
        :param expected_ifaces: The expected interfaces
        :param status_error: Whether the ifaces should be same with the expected_ifaces
        :raise: test.fail if the interface(s) is(are) as expected
        """
        if not expected_ifaces:
            return
        else:
            expected_ifaces = set(expected_ifaces)
        vm_ifaces = [
            iface for iface in vm_xml.VMXML.new_from_dumpxml(
                vm_name).devices.by_device_tag("interface")
        ]
        ifaces_net = {iface.get_type_name() for iface in vm_ifaces}
        if expected_ifaces.issubset(ifaces_net) == status_error:
            test.fail(
                "Unable to get expected interface. The interface %s "
                "should%s be %s." %
                (ifaces_net, ' not' if status_error else '', expected_ifaces))
        else:
            logging.debug("{}Found iface(s) as expected: {}.".format(
                'Not ' if status_error else '', expected_ifaces))

    test_case = params.get("test_case", "")
    run_test = eval("test_%s" % test_case)
    setup_test = eval("setup_%s" % test_case) if "setup_%s" % test_case in \
        locals() else "setup_%s" % test_case
    teardown_test = eval("teardown_%s" % test_case) if "teardown_%s" % \
        test_case in locals() else "teardown_%s" % test_case
    vm_name = params.get("main_vm", "avocado-vt-vm1")
    vm = env.get_vm(params["main_vm"])

    driver = params.get("driver", "ixgbe")
    bridge_name = params.get("bridge_name", "br0")
    net_bridge_name = params.get("net_bridge_name", "host-bridge")
    net_bridge_fwd = params.get("net_bridge_fwd", '{"mode": "bridge"}')
    net_hostdev_name = params.get("net_hostdev_name", "hostdev-net")
    bridge_name = params.get("bridge_name", "br0")
    hostdev_teaming_dict = params.get("hostdev_device_teaming_dict", '{}')

    default_vf = 0
    try:
        vf_no = int(params.get("vf_no", "4"))
    except ValueError as e:
        test.error(e)

    libvirt_version.is_libvirt_feature_supported(params)

    mac_addr = utils_net.generate_mac_address_simple()
    pf_pci = utils_sriov.get_pf_pci()
    if not pf_pci:
        test.cancel("NO available pf found.")
    pf_name = utils_sriov.get_pf_info_by_pci(pf_pci).get('iface')
    brg_dict = {'pf_name': pf_name, 'bridge_name': bridge_name}
    bridge_dict = {
        "net_name": net_bridge_name,
        "net_forward": net_bridge_fwd,
        "net_bridge": '{"name": "%s"}' % bridge_name
    }
    pf_pci_path = utils_misc.get_pci_path(pf_pci)
    cmd = "cat %s/sriov_numvfs" % (pf_pci_path)
    default_vf = process.run(cmd, shell=True, verbose=True).stdout_text

    new_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    orig_config_xml = new_xml.copy()

    try:
        if not utils_sriov.set_vf(pf_pci_path, vf_no):
            test.error("Failed to set vf.")
        utils_sriov.add_or_del_connection(brg_dict, is_del=False)
        libvirt_network.create_or_del_network(bridge_dict)

        vf_pci = utils_sriov.get_vf_pci_id(pf_pci)
        exec_function(setup_test)
        run_test()

    finally:
        logging.info("Recover test enviroment.")
        utils_sriov.add_or_del_connection(brg_dict, is_del=True)
        libvirt_network.create_or_del_network(bridge_dict, is_del=True)
        if 'pf_pci_path' in locals() and default_vf != vf_no:
            utils_sriov.set_vf(pf_pci_path, default_vf)

        if vm.is_alive():
            vm.destroy(gracefully=False)

        try:
            orig_config_xml.sync()
        except:
            # FIXME: Workaround for 'save'/'managedsave' hanging issue
            utils_libvirtd.Libvirtd().restart()
            orig_config_xml.sync()

        exec_function(teardown_test)
Пример #19
0
def run(test, params, env):
    """
    Test blockcopy with different options.

    1) Prepare an running guest.
    2) Create snap.
    3) Do blockcopy.
    4) Check status by 'qemu-img info'.
    """
    def setup_blockcopy_extended_l2():
        """
        Prepare running domain with extended_l2=on type image.
        """
        # prepare image
        image_path = test_obj.tmp_dir + '/new_image'

        libvirt.create_local_disk("file",
                                  path=image_path,
                                  size='500M',
                                  disk_format=disk_format,
                                  extra=disk_extras)
        check_obj.check_image_info(image_path,
                                   check_item='extended l2',
                                   expected_value='true')
        test_obj.new_image_path = image_path
        # start get old parts
        session = vm.wait_for_login()
        test_obj.old_parts = utils_disk.get_parts_list(session)
        session.close()
        # attach new disk
        if encryption_disk:
            secret_disk_dict = eval(params.get("secret_disk_dict", '{}'))
            test_obj.prepare_secret_disk(image_path, secret_disk_dict)
            if not vm.is_alive():
                vm.start()
        else:
            virsh.attach_disk(vm.name,
                              source=image_path,
                              target=device,
                              extra=attach_disk_extra,
                              debug=True,
                              ignore_status=False)
        test_obj.new_dev = device
        # clean copy file
        if os.path.exists(tmp_copy_path):
            process.run('rm -f %s' % tmp_copy_path)

    def test_blockcopy_extended_l2():
        """
        Do blockcopy after creating snapshot with extended_l2 in disk image
        """
        # create snap chain and check snap path extended_l2 status
        test_obj.prepare_snapshot(snap_num=1)
        check_obj.check_image_info(test_obj.snap_path_list[0],
                                   check_item='extended l2',
                                   expected_value='true')
        # Do blockcopy
        virsh.blockcopy(vm_name,
                        device,
                        tmp_copy_path,
                        options=blockcopy_options,
                        ignore_status=False,
                        debug=True)
        # Check domain exist blockcopy file and extended_l2 status
        if len(vmxml.get_disk_source(vm_name)) < 2:
            test.fail('Domain disk num is less than 2, may attach failed')
        else:
            image_file = vmxml.get_disk_source(vm_name)[1].find('source').get(
                'file')
            if image_file != tmp_copy_path:
                test.fail(
                    'Blockcopy path is not in domain disk ,'
                    ' blockcopy image path is %s ,actual image path '
                    'is :%s', tmp_copy_path, image_file)
            check_obj.check_image_info(tmp_copy_path,
                                       check_item='extended l2',
                                       expected_value='true')
        # Check domain write file
        session = vm.wait_for_login()
        new_parts = utils_disk.get_parts_list(session)
        added_parts = list(set(new_parts).difference(set(test_obj.old_parts)))
        utils_disk.linux_disk_check(session, added_parts[0])
        session.close()

    def teardown_blockcopy_extended_l2():
        """
        Clean env.
        """
        if encryption_disk:
            libvirt_secret.clean_up_secrets()
        test_obj.backingchain_common_teardown()
        # detach disk
        virsh.detach_disk(vm_name,
                          target=device,
                          wait_for_event=True,
                          debug=True)
        process.run('rm -f %s' % test_obj.new_image_path)

    def setup_blockcopy_synchronous_writes():
        """
        Start domain and clean exist copy file
        """
        if not vm.is_alive():
            vm.start()
        if os.path.exists(tmp_copy_path):
            process.run('rm -f %s' % tmp_copy_path)

    def test_blockcopy_synchronous_writes():
        """
        Test blockcopy with --synchronous-writes option.
        """
        ret = virsh.blockcopy(vm_name,
                              device,
                              tmp_copy_path,
                              options=blockcopy_options,
                              ignore_status=False,
                              debug=True)
        if not ret.stdout_text.count("Now in mirroring phase"):
            test.fail("Not in mirroring phase")
        test_obj.new_image_path = tmp_copy_path
        # Check exist mirror tag after blockcopy.
        vmxml = vm_xml.VMXML.new_from_dumpxml(vm.name)
        disk_list = vmxml.get_disk_all()[device]
        if not disk_list.find('mirror'):
            test.fail('No mirror tag in current domain xml :%s' % vmxml)
        else:
            # Check file in mirror should be new copy file
            mirror_file = disk_list.find('mirror').get('file')
            if mirror_file != tmp_copy_path:
                test.fail('Current mirror tag file:%s is not same as:%s' %
                          (mirror_file, tmp_copy_path))
            # Check file in mirror >source > file should be new copy file
            mirror_source_file = disk_list.find('mirror').\
                find('source').get('file')
            if mirror_source_file != tmp_copy_path:
                test.fail('Current source tag file:%s is not same as:%s' %
                          (mirror_source_file, tmp_copy_path))

        # Check domain write file
        session = vm.wait_for_login()
        utils_disk.dd_data_to_vm_disk(session, device)
        session.close()
        # Abort job and check disk source changed.
        virsh.blockjob(vm_name,
                       device,
                       options=' --pivot',
                       debug=True,
                       ignore_status=False)
        current_source = libvirt_disk.get_first_disk_source(vm)
        if current_source != tmp_copy_path:
            test.fail("Current source: %s is not same as original blockcopy"
                      " path:%s" % (current_source, tmp_copy_path))
        # Check domain write file after
        session = vm.wait_for_login()
        utils_disk.dd_data_to_vm_disk(session, device)
        session.close()

    def teardown_blockcopy_synchronous_writes():
        """
        Clean env
        """
        if os.path.exists(test_obj.new_image_path):
            process.run('rm -f %s' % test_obj.new_image_path)

    libvirt_version.is_libvirt_feature_supported(params)

    # Process cartesian parameters
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    case_name = params.get('case_name', '')
    device = params.get('target_disk')
    disk_extras = params.get('extras_options')
    blockcopy_options = params.get('blockcopy_option')
    attach_disk_extra = params.get("attach_disk_options")
    encryption_disk = params.get('enable_encrypt_disk', 'no') == "yes"
    disk_format = params.get('disk_format', 'qcow2')
    # Create object
    test_obj = blockcommand_base.BlockCommand(test, vm, params)
    check_obj = check_functions.Checkfunction(test, vm, params)
    # Get vm xml
    vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    bkxml = vmxml.copy()
    test_obj.original_disk_source = libvirt_disk.get_first_disk_source(vm)
    tmp_copy_path = os.path.join(
        os.path.dirname(libvirt_disk.get_first_disk_source(vm)),
        "%s_blockcopy.img" % vm_name)
    # MAIN TEST CODE ###
    run_test = eval("test_%s" % case_name)
    setup_test = eval("setup_%s" % case_name)
    teardown_test = eval("teardown_%s" % case_name)

    try:
        # Execute test
        setup_test()
        run_test()

    finally:
        teardown_test()
        bkxml.sync()
Пример #20
0
def run(test, params, env):
    """
    Test misc tests of virtual cpu features

    1) check dumpxml after snapshot-create/revert
    2) check vendor_id
    3) check maximum vcpus with topology settings

    :param test: test object
    :param params: Dictionary with the test parameters
    :param env: Dictionary with test environment.
    """
    def update_cpu_xml():
        """
        Update cpu xml for test
        """
        vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)

        # Create cpu xml for test
        if vmxml.xmltreefile.find('cpu'):
            cpu_xml = vmxml.cpu
        else:
            cpu_xml = vm_xml.VMCPUXML()

        if customize_cpu_features:
            for idx in range(len(cpu_xml.get_feature_list()) - 1, -1, -1):
                cpu_xml.remove_feature(idx)
            domcapa_xml = domcapability_xml.DomCapabilityXML()
            features = domcapa_xml.get_additional_feature_list(
                'host-model', ignore_features=None)
            for feature in features:
                for feature_name, feature_policy in feature.items():
                    # For host-passthrough mode, adding "invtsc" requires
                    # more settings, so it will be ignored.
                    if feature_name != "invtsc":
                        cpu_xml.add_feature(feature_name, feature_policy)

        if cpu_mode:
            cpu_xml.mode = cpu_mode
        if cpu_vendor_id:
            cpu_xml.vendor_id = cpu_vendor_id

        # Update vm's cpu
        vmxml.cpu = cpu_xml
        vmxml.sync()

        if vcpu_max:
            if with_topology:
                vm_xml.VMXML.set_vm_vcpus(vm_name,
                                          int(vcpu_max),
                                          cores=int(vcpu_max),
                                          sockets=1,
                                          threads=1,
                                          add_topology=with_topology,
                                          topology_correction=with_topology)
            else:
                vm_xml.VMXML.set_vm_vcpus(vm_name, int(vcpu_max))

    def do_snapshot(vm_name, expected_str):
        """
        Run snapshot related commands: snapshot-create-as, snapshot-list
        snapshot-dumpxml, snapshot-revert

        :param vm_name: vm name
        :param expected_str: expected string in snapshot-dumpxml
        :raise: test.fail if virsh command failed
        """
        snapshot_name = vm_name + "-snap"
        virsh_dargs = {'debug': True}

        cmd_result = virsh.snapshot_create_as(vm_name, snapshot_name,
                                              **virsh_dargs)
        libvirt.check_exit_status(cmd_result)

        try:
            snapshots = virsh.snapshot_list(vm_name, **virsh_dargs)
        except process.CmdError:
            test.fail("Failed to get snapshots list for %s" % vm_name)
        if snapshot_name not in snapshots:
            test.fail("The snapshot '%s' was not in snapshot-list." %
                      snapshot_name)
        cmd_result = virsh.snapshot_dumpxml(vm_name, snapshot_name,
                                            **virsh_dargs)
        libvirt.check_result(cmd_result, expected_match=expected_str)

        cmd_result = virsh.snapshot_revert(vm_name, "", "--current",
                                           **virsh_dargs)
        libvirt.check_exit_status(cmd_result)

    def check_feature_list(vm, original_dict):
        """
        Compare new cpu feature list and original cpu

        :param vm: VM object
        :original_dict: Cpu feature dict , {"name1":"policy1","name2":"policy2"}
        """
        new_cpu_xml = vm_xml.VMXML.new_from_dumpxml(vm.name).cpu
        new_feature_dict = new_cpu_xml.get_dict_type_feature()
        if new_feature_dict != original_dict:
            test.fail('CPU feature lists are different, original is :%s,'
                      ' new is %s:' % (original_dict, new_feature_dict))

    libvirt_version.is_libvirt_feature_supported(params)
    vm_name = params.get('main_vm')
    vm = env.get_vm(vm_name)

    cpu_mode = params.get('cpu_mode')
    vcpu_max = params.get('vcpu_max')
    expected_str_before_startup = params.get("expected_str_before_startup")
    expected_str_after_startup = params.get("expected_str_after_startup")

    test_operations = params.get("test_operations")
    check_vendor_id = "yes" == params.get("check_vendor_id", "no")
    virsh_edit_cmd = params.get("virsh_edit_cmd")
    with_topology = "yes" == params.get("with_topology", "no")

    status_error = "yes" == params.get("status_error", "no")
    err_msg = params.get("err_msg")

    cpu_vendor_id = None
    expected_qemuline = None
    cmd_in_guest = params.get("cmd_in_guest")
    customize_cpu_features = "yes" == params.get("customize_cpu_features",
                                                 "no")
    bkxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    managed_save_file = "/var/lib/libvirt/qemu/save/%s.save" % vm_name

    try:
        if check_vendor_id:
            output = virsh.capabilities(debug=True)
            host_vendor = re.findall(r'<vendor>(\w+)<', output)[0]

            cpu_vendor_id = 'GenuineIntel'
            if host_vendor != "Intel":
                cpu_vendor_id = 'AuthenticAMD'
            logging.debug("Set cpu vendor_id to %s on this host.",
                          cpu_vendor_id)

            expected_qemuline = "vendor=" + cpu_vendor_id
            cmd_in_guest = ("cat /proc/cpuinfo | grep vendor_id | grep {}".
                            format(cpu_vendor_id))

        # Update xml for test
        update_cpu_xml()

        vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
        logging.debug("Pre-test xml is %s", vmxml.xmltreefile)
        cpu_xml = vmxml.cpu
        feature_dict = cpu_xml.get_dict_type_feature()

        if expected_str_before_startup:
            libvirt.check_dumpxml(vm, expected_str_before_startup)

        if test_operations:
            for action in test_operations.split(","):
                if action == "do_snapshot":
                    do_snapshot(vm_name, expected_str_before_startup)

        if virsh_edit_cmd:
            status = libvirt.exec_virsh_edit(vm_name,
                                             virsh_edit_cmd.split(","))
            if status == status_error:
                test.fail("Virsh edit got unexpected result.")

        # Check if vm could start successfully
        if not status_error:
            result = virsh.start(vm_name, debug=True)
            libvirt.check_exit_status(result)

            if expected_str_after_startup:
                libvirt.check_dumpxml(vm, expected_str_after_startup)

            if expected_qemuline:
                libvirt.check_qemu_cmd_line(expected_qemuline)

            if cmd_in_guest:
                vm_session = vm.wait_for_login()
                status, output = vm_session.cmd_status_output(cmd_in_guest)
                if status:
                    vm_session.close()
                    test.fail("Failed to run '{}' in vm with "
                              "messages:\n{}".format(cmd_in_guest, output))
                vm_session.close()
                if cpu_mode == 'maximum':
                    check_vm_cpu_model(output.strip(), cmd_in_guest, test)

            # Add case: Check cpu xml after domain Managedsaved and restored
            if test_operations:
                for item in test_operations.split(','):
                    if item == "managedsave_restore":
                        # (1)Domain Manage saved
                        virsh.managedsave(vm_name,
                                          ignore_status=False,
                                          debug=True)
                        check_feature_list(vm, feature_dict)
                        # (2)Domain Restore
                        virsh.restore(managed_save_file,
                                      ignore_status=False,
                                      debug=True)
                        # (5)Check mode and feature list here
                        libvirt.check_dumpxml(vm, cpu_mode)
                        check_feature_list(vm, feature_dict)

    finally:
        logging.debug("Recover test environment")
        if os.path.exists(managed_save_file):
            virsh.managedsave_remove(vm_name, debug=True)
        if vm.is_alive():
            vm.destroy()
        libvirt.clean_up_snapshots(vm_name, domxml=bkxml)
        bkxml.sync()
Пример #21
0
def run(test, params, env):
    """
    Test command: virsh vol-resize

    Resize the capacity of the given volume (default bytes).
    1. Define and start a given type pool.
    2. Create a volume in the pool.
    3. Do vol-resize.
    4. Check the volume info.
    5. Delete the volume and pool.

    TODO:
    Add volume shrink test after libvirt upstream support it.
    """

    pool_name = params.get("pool_name")
    pool_type = params.get("pool_type")
    pool_target = params.get("pool_target")
    emulated_image = params.get("emulated_image")
    emulated_image_size = params.get("emulated_image_size")
    vol_name = params.get("vol_name")
    vol_format = params.get("vol_format")
    vol_capacity = params.get("vol_capacity")
    vol_new_capacity = params.get("vol_new_capacity")
    resize_option = params.get("resize_option", "")
    check_vol_size = "yes" == params.get("check_vol_size", "yes")
    status_error = "yes" == params.get("status_error", "no")
    b_luks_encrypt = "luks" == params.get("encryption_method")
    encryption_password = params.get("encryption_password", "redhat")
    secret_uuids = []
    with_clusterSize = "yes" == params.get("with_clusterSize")
    libvirt_version.is_libvirt_feature_supported(params)

    if not libvirt_version.version_compare(1, 0, 0):
        if "--allocate" in resize_option:
            test.cancel("'--allocate' flag is not supported in"
                        " current libvirt version.")

    # libvirt acl polkit related params
    uri = params.get("virsh_uri")
    if uri and not utils_split_daemons.is_modular_daemon():
        uri = "qemu:///system"
    unpri_user = params.get('unprivileged_user')
    if unpri_user:
        if unpri_user.count('EXAMPLE'):
            unpri_user = '******'

    if not libvirt_version.version_compare(1, 1, 1):
        if params.get('setup_libvirt_polkit') == 'yes':
            test.cancel("API acl test not supported in current"
                        " libvirt version.")

    libv_pvt = libvirt.PoolVolumeTest(test, params)
    try:
        libv_pool = libvirt_storage.StoragePool()
        # Raise error if given name pool already exist
        if libv_pool.pool_exists(pool_name):
            test.error("Pool '%s' already exist", pool_name)
        else:
            # Create a new pool
            libv_pvt.pre_pool(pool_name,
                              pool_type,
                              pool_target,
                              emulated_image,
                              image_size=emulated_image_size)
            pool_info = libv_pool.pool_info(pool_name)
            for key in pool_info:
                logging.debug("Pool info: %s = %s", key, pool_info[key])
            # Deal with vol_new_capacity, '--capacity' only accept integer
            if vol_new_capacity == "pool_available":
                pool_avai = pool_info["Available"].split()
                vol_new_capacity = pool_avai[0].split('.')[0] + pool_avai[1]
            if vol_new_capacity == "pool_capacity":
                pool_capa = pool_info["Capacity"].split()
                vol_new_capacity = pool_capa[0].split('.')[0] + pool_capa[1]

        # Create a volume
        if b_luks_encrypt:
            luks_sec_uuid = create_luks_secret(
                os.path.join(pool_target, vol_name), test)
            secret_uuids.append(luks_sec_uuid)
            set_secret_value(encryption_password, luks_sec_uuid)
            create_luks_vol(vol_name, luks_sec_uuid, params, test)
        else:
            libv_pvt.pre_vol(vol_name=vol_name,
                             vol_format=vol_format,
                             capacity=vol_capacity,
                             allocation=None,
                             pool_name=pool_name)
        libv_vol = libvirt_storage.PoolVolume(pool_name)
        check_vol_info(libv_vol, vol_name, test)

        # The volume size may not accurate as we expect after resize, such as:
        # 1) vol_new_capacity = 1b with --delta option, the volume size will not
        #    change; run
        # 2) vol_new_capacity = 1KB with --delta option, the volume size will
        #    increase 1024 not 1000
        # So we can disable volume size check after resize
        if check_vol_size:
            vol_path = libv_vol.list_volumes()[vol_name]
            expect_info = get_expect_info(vol_new_capacity, vol_path, test,
                                          resize_option)
            logging.debug("Expect volume info: %s", expect_info)
        else:
            expect_info = {}

        # Run vol-resize
        result = virsh.vol_resize(vol_name,
                                  vol_new_capacity,
                                  pool_name,
                                  resize_option,
                                  uri=uri,
                                  unprivileged_user=unpri_user,
                                  debug=True)
        if not status_error:
            if result.exit_status != 0:
                test.fail(result.stdout.strip() + result.stderr.strip())
            else:
                if check_vol_info(libv_vol, vol_name, test, expect_info):
                    logging.debug("Volume %s resize check pass.", vol_name)
                else:
                    test.fail("Volume %s resize check fail." % vol_name)
        elif result.exit_status == 0:
            test.fail("Expect resize fail but run successfully.")
    finally:
        # Clean up
        try:
            libv_pvt.cleanup_pool(pool_name, pool_type, pool_target,
                                  emulated_image)
            for secret_uuid in set(secret_uuids):
                virsh.secret_undefine(secret_uuid)
        except exceptions.TestFail as detail:
            logging.error(str(detail))
Пример #22
0
def run(test, params, env):
    """
    Test misc tests of virtual cpu features

    1) check dumpxml after snapshot-create/revert
    2) check vendor_id
    3) check maximum vcpus with topology settings

    :param test: test object
    :param params: Dictionary with the test parameters
    :param env: Dictionary with test environment.
    """
    def update_cpu_xml():
        """
        Update cpu xml for test
        """
        vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)

        # Create cpu xml for test
        if vmxml.xmltreefile.find('cpu'):
            cpu_xml = vmxml.cpu
        else:
            cpu_xml = vm_xml.VMCPUXML()
        if cpu_mode:
            cpu_xml.mode = cpu_mode
        if cpu_vendor_id:
            cpu_xml.vendor_id = cpu_vendor_id

        # Update vm's cpu
        vmxml.cpu = cpu_xml
        vmxml.sync()

        if vcpu_max:
            if with_topology:
                vm_xml.VMXML.set_vm_vcpus(vm_name,
                                          int(vcpu_max),
                                          cores=int(vcpu_max),
                                          sockets=1,
                                          threads=1,
                                          add_topology=with_topology,
                                          topology_correction=with_topology)
            else:
                vm_xml.VMXML.set_vm_vcpus(vm_name, int(vcpu_max))

    def do_snapshot(vm_name, expected_str):
        """
        Run snapshot related commands: snapshot-create-as, snapshot-list
        snapshot-dumpxml, snapshot-revert

        :param vm_name: vm name
        :param expected_str: expected string in snapshot-dumpxml
        :raise: test.fail if virsh command failed
        """
        snapshot_name = vm_name + "-snap"
        virsh_dargs = {'debug': True}

        cmd_result = virsh.snapshot_create_as(vm_name, snapshot_name,
                                              **virsh_dargs)
        libvirt.check_exit_status(cmd_result)

        try:
            snapshots = virsh.snapshot_list(vm_name, **virsh_dargs)
        except process.CmdError:
            test.fail("Failed to get snapshots list for %s" % vm_name)
        if snapshot_name not in snapshots:
            test.fail("The snapshot '%s' was not in snapshot-list." %
                      snapshot_name)
        cmd_result = virsh.snapshot_dumpxml(vm_name, snapshot_name,
                                            **virsh_dargs)
        libvirt.check_result(cmd_result, expected_match=expected_str)

        cmd_result = virsh.snapshot_revert(vm_name, "", "--current",
                                           **virsh_dargs)
        libvirt.check_exit_status(cmd_result)

    libvirt_version.is_libvirt_feature_supported(params)
    vm_name = params.get('main_vm')
    vm = env.get_vm(vm_name)

    cpu_mode = params.get('cpu_mode')
    vcpu_max = params.get('vcpu_max')
    expected_str_before_startup = params.get("expected_str_before_startup")
    expected_str_after_startup = params.get("expected_str_after_startup")

    test_operations = params.get("test_operations")
    check_vendor_id = "yes" == params.get("check_vendor_id", "no")
    virsh_edit_cmd = params.get("virsh_edit_cmd")
    with_topology = "yes" == params.get("with_topology", "no")

    status_error = "yes" == params.get("status_error", "no")
    err_msg = params.get("err_msg")

    cpu_vendor_id = None
    expected_qemuline = None
    cmd_in_guest = params.get("cmd_in_guest")

    bkxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)

    try:
        if check_vendor_id:
            output = virsh.capabilities(debug=True)
            host_vendor = re.findall(r'<vendor>(\w+)<', output)[0]

            cpu_vendor_id = 'GenuineIntel'
            if host_vendor != "Intel":
                cpu_vendor_id = 'AuthenticAMD'
            logging.debug("Set cpu vendor_id to %s on this host.",
                          cpu_vendor_id)

            expected_qemuline = "vendor=" + cpu_vendor_id
            cmd_in_guest = ("cat /proc/cpuinfo | grep vendor_id | grep {}".
                            format(cpu_vendor_id))

        # Update xml for test
        update_cpu_xml()

        vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
        logging.debug("Pre-test xml is %s", vmxml.xmltreefile)

        if expected_str_before_startup:
            libvirt.check_dumpxml(vm, expected_str_before_startup)

        if test_operations:
            for action in test_operations.split(","):
                if action == "do_snapshot":
                    do_snapshot(vm_name, expected_str_before_startup)

        if virsh_edit_cmd:
            status = libvirt.exec_virsh_edit(vm_name,
                                             virsh_edit_cmd.split(","))
            if status == status_error:
                test.fail("Virsh edit got unexpected result.")

        # Check if vm could start successfully
        if not status_error:
            result = virsh.start(vm_name, debug=True)
            libvirt.check_exit_status(result)

            if expected_str_after_startup:
                libvirt.check_dumpxml(vm, expected_str_after_startup)

            if expected_qemuline:
                libvirt.check_qemu_cmd_line(expected_qemuline)

            if cmd_in_guest:
                vm_session = vm.wait_for_login()
                status, output = vm_session.cmd_status_output(cmd_in_guest)
                if status:
                    vm_session.close()
                    test.fail("Failed to run '{}' in vm with "
                              "messages:\n{}".format(cmd_in_guest, output))
                vm_session.close()
                if cpu_mode == 'maximum':
                    check_vm_cpu_model(output.strip(), cmd_in_guest, test)
    finally:
        logging.debug("Recover test environment")
        if vm.is_alive():
            vm.destroy()

        libvirt.clean_up_snapshots(vm_name, domxml=bkxml)
        bkxml.sync()
Пример #23
0
def run(test, params, env):
    """
    Test interface xml options.

    1.Prepare test environment,destroy or suspend a VM.
    2.Edit xml and start the domain.
    3.Perform test operation.
    4.Recover test environment.
    5.Confirm the test result.
    """
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    if vm.is_alive():
        vm.wait_for_login()

    def create_iface_xml(mac=None):
        """
        Create interface xml file
        """
        iface = Interface(type_name=iface_type)
        iface.source = iface_source
        iface.model = iface_model if iface_model else "virtio"
        if iface_target:
            iface.target = {'dev': iface_target}
        if mac:
            iface.mac_address = mac
        if iface_rom:
            iface.rom = eval(iface_rom)
        logging.debug("Create new interface xml: %s", iface)
        return iface

    def get_all_mac_in_vm():
        """
        get the mac address list of all the interfaces from a running vm os

        return: a list of the mac addresses
        """
        mac_list = []
        interface_list = vm.get_interfaces()
        for iface_ in interface_list:
            mac_ = vm.get_interface_mac(iface_)
            mac_list.append(mac_)
        return mac_list

    # Interface specific attributes.
    iface_num = params.get("iface_num", '1')
    iface_type = params.get("iface_type", "network")
    iface_source = eval(params.get("iface_source", "{'network':'default'}"))
    iface_model = params.get("iface_model")
    iface_target = params.get("iface_target")
    iface_mac = params.get("iface_mac")
    iface_rom = params.get("iface_rom")
    attach_device = "yes" == params.get("attach_device", "no")
    attach_iface = "yes" == params.get("attach_iface", "no")
    attach_option = params.get("attach_option", "")
    detach_device = "yes" == params.get("detach_device")
    stress_test = "yes" == params.get("stress_test")
    restart_libvirtd = "yes" == params.get("restart_libvirtd", "no")
    start_vm = "yes" == params.get("start_vm", "yes")
    options_test = "yes" == params.get("options_test", "no")
    username = params.get("username")
    password = params.get("password")
    poll_timeout = int(params.get("poll_timeout", 10))
    err_msgs1 = params.get("err_msgs1")
    err_msgs2 = params.get("err_msgs2")
    err_msg_rom = params.get("err_msg_rom")
    del_pci = "yes" == params.get("del_pci", "no")
    del_mac = "yes" == params.get("del_mac", "no")
    del_alias = "yes" == params.get("del_alias", "no")
    set_pci = "yes" == params.get("set_pci", "no")
    set_mac = "yes" == params.get("set_mac", "no")
    set_alias = "yes" == params.get("set_alias", "no")
    status_error = "yes" == params.get("status_error", "no")
    pci_addr = params.get("pci")
    check_mac = "yes" == params.get("check_mac", "no")
    vnet_mac = params.get("vnet_mac", None)
    customer_alias = "yes" == params.get("customer_alias", "no")
    detach_error = params.get("detach_error", None)

    libvirt_version.is_libvirt_feature_supported(params)
    # stree_test require detach operation
    stress_test_detach_device = False
    stress_test_detach_interface = False
    if stress_test:
        if attach_device:
            stress_test_detach_device = True
        if attach_iface:
            stress_test_detach_interface = True

    # The following detach-device step also using attach option
    detach_option = attach_option

    # Back up xml file.
    vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    #iface_mac = vm_xml.VMXML.get_first_mac_by_name(vm_name)
    libvirtd = utils_libvirtd.Libvirtd()

    # Check virsh command option
    check_cmds = []
    sep_options = attach_option.split()
    logging.debug("sep_options: %s" % sep_options)
    for sep_option in sep_options:
        if attach_device and sep_option:
            check_cmds.append(('attach-device', sep_option))
        if attach_iface and sep_option:
            check_cmds.append(('attach-interface', sep_option))
        if (detach_device or stress_test_detach_device) and sep_option:
            check_cmds.append(('detach-device', sep_option))
        if stress_test_detach_interface and sep_option:
            check_cmds.append(('detach-device', sep_option))

    for cmd, option in check_cmds:
        libvirt.virsh_cmd_has_option(cmd, option)

    try:
        try:
            # Attach an interface when vm is running
            iface_list = []
            err_msgs = ("No more available PCI slots",
                        "No more available PCI addresses")
            if not start_vm:
                virsh.destroy(vm_name)
            for i in range(int(iface_num)):
                if attach_device:
                    logging.info("Try to attach device loop %s" % i)
                    if iface_mac:
                        mac = iface_mac
                        iface_xml_obj = create_iface_xml(mac)
                    elif check_mac:
                        iface_xml_obj = create_iface_xml()
                    else:
                        mac = utils_net.generate_mac_address_simple()
                        iface_xml_obj = create_iface_xml(mac)
                    if customer_alias:
                        random_id = process.run(
                            "uuidgen", ignore_status=True,
                            shell=True).stdout_text.strip()
                        alias_str = "ua-" + random_id
                        iface_xml_obj.alias = {"name": alias_str}
                        logging.debug("Update number %s interface xml: %s", i,
                                      iface_xml_obj)
                    iface_xml_obj.xmltreefile.write()
                    if check_mac:
                        mac_bef = get_all_mac_in_vm()
                    ret = virsh.attach_device(vm_name,
                                              iface_xml_obj.xml,
                                              flagstr=attach_option,
                                              ignore_status=True,
                                              debug=True)
                elif attach_iface:
                    logging.info("Try to attach interface loop %s" % i)
                    if iface_mac:
                        mac = iface_mac
                    else:
                        mac = utils_net.generate_mac_address_simple()
                    options = ("%s %s --model %s --mac %s %s" %
                               (iface_type, iface_source['network'],
                                iface_model, mac, attach_option))
                    ret = virsh.attach_interface(vm_name,
                                                 options,
                                                 ignore_status=True)
                if ret.exit_status:
                    if any([msg in ret.stderr for msg in err_msgs]):
                        logging.debug(
                            "No more pci slots, can't attach more devices")
                        break
                    elif ret.stderr.count("doesn't support option %s" %
                                          attach_option):
                        test.cancel(ret.stderr)
                    elif err_msgs1 in ret.stderr:
                        logging.debug(
                            "option %s is not supported when domain running is %s"
                            % (attach_option, vm.is_alive()))
                        if start_vm or ("--live" not in sep_options
                                        and attach_option):
                            test.fail(
                                "return not supported, but it is unexpected")
                    elif err_msgs2 in ret.stderr:
                        logging.debug("options %s are mutually exclusive" %
                                      attach_option)
                        if not ("--current" in sep_options
                                and len(sep_options) > 1):
                            test.fail(
                                "return mutualy exclusive, but it is unexpected"
                            )
                    elif err_msg_rom and err_msg_rom in ret.stderr:
                        logging.debug("Attach failed with expect err msg: %s" %
                                      err_msg_rom)
                    else:
                        test.fail("Failed to attach-interface: %s" %
                                  ret.stderr.strip())
                elif stress_test:
                    if attach_device:
                        # Detach the device immediately for stress test
                        ret = virsh.detach_device(vm_name,
                                                  iface_xml_obj.xml,
                                                  flagstr=detach_option,
                                                  ignore_status=True)
                    elif attach_iface:
                        # Detach the device immediately for stress test
                        options = ("--type %s --mac %s %s" %
                                   (iface_type, mac, detach_option))
                        ret = virsh.detach_interface(vm_name,
                                                     options,
                                                     ignore_status=True)
                    libvirt.check_exit_status(ret)
                else:
                    if attach_device:
                        if check_mac:
                            mac_aft = get_all_mac_in_vm()
                            add_mac = list(
                                set(mac_aft).difference(set(mac_bef)))
                            try:
                                mac = add_mac[0]
                                logging.debug(
                                    "The mac address of the attached interface is %s"
                                    % mac)
                            except IndexError:
                                test.fail(
                                    "Can not find the new added interface in the guest os!"
                                )
                        iface_list.append({
                            'mac': mac,
                            'iface_xml': iface_xml_obj
                        })
                    elif attach_iface:
                        iface_list.append({'mac': mac})
            # Restart libvirtd service
            if restart_libvirtd:
                libvirtd.restart()
                # After restart libvirtd, the old console was invalidated,
                # so we need create a new serial console
                vm.cleanup_serial_console()
                vm.create_serial_console()
            # in options test, check if the interface is attached
            # in current state when attach return true

            def check_iface_exist():
                try:
                    session = vm.wait_for_serial_login(username=username,
                                                       password=password)
                    if utils_net.get_linux_ifname(session, iface['mac']):
                        return True
                    else:
                        logging.debug("can not find interface in vm")
                except Exception:
                    return False

            if options_test:
                for iface in iface_list:
                    if 'mac' in iface:
                        # Check interface in dumpxml output
                        if_attr = vm_xml.VMXML.get_iface_by_mac(
                            vm_name, iface['mac'])
                        if vm.is_alive() and attach_option == "--config":
                            if if_attr:
                                test.fail("interface should not exists "
                                          "in current live vm while "
                                          "attached by --config")
                        else:
                            if if_attr:
                                logging.debug("interface %s found current "
                                              "state in xml" % if_attr['mac'])
                            else:
                                test.fail("no interface found in "
                                          "current state in xml")

                        if if_attr:
                            if if_attr['type'] != iface_type or \
                                    if_attr['source'] != \
                                    iface_source['network']:
                                test.fail("Interface attribute doesn't "
                                          "match attachment options")
                        # check interface in vm only when vm is active
                        if vm.is_alive():
                            logging.debug("check interface in current state "
                                          "in vm")

                            if not utils_misc.wait_for(check_iface_exist,
                                                       timeout=20):
                                if not attach_option == "--config":
                                    test.fail("Can't see interface "
                                              "in current state in vm")
                                else:
                                    logging.debug("find interface in "
                                                  "current state in vm")
                            else:
                                logging.debug("find interface in "
                                              "current state in vm")
                            # in options test, if the attach is performed
                            # when the vm is running
                            # need to destroy and start to check again
                            vm.destroy()

            # Start the domain if needed
            if vm.is_dead():
                vm.start()
            session = vm.wait_for_serial_login(username=username,
                                               password=password)

            # check if interface is attached
            for iface in iface_list:
                if 'mac' in iface:
                    logging.debug("check interface in xml")
                    # Check interface in dumpxml output
                    if_attr = vm_xml.VMXML.get_iface_by_mac(
                        vm_name, iface['mac'])
                    logging.debug(if_attr)
                    if if_attr:
                        logging.debug("interface {} is found in xml".format(
                            if_attr['mac']))
                        if (if_attr['type'] != iface_type or
                                if_attr['source'] != iface_source['network']):
                            test.fail("Interface attribute doesn't "
                                      "match attachment options")
                        if options_test and start_vm and attach_option \
                                in ("--current", "--live", ""):
                            test.fail("interface should not exists when "
                                      "restart vm in options_test")
                    else:
                        logging.debug("no interface found in xml")
                        if options_test and start_vm and attach_option in \
                                ("--current", "--live", ""):
                            logging.debug("interface not exists next state "
                                          "in xml with %s" % attach_option)
                        else:
                            test.fail("Can't see interface in dumpxml")

                    # Check interface on guest
                    if not utils_misc.wait_for(check_iface_exist, timeout=20):
                        logging.debug("can't see interface next state in vm")
                        if start_vm and attach_option in \
                                ("--current", "--live", ""):
                            logging.debug("it is expected")
                        else:
                            test.fail("should find interface "
                                      "but no seen in next state in vm")
                    if vnet_mac:
                        # get the name of the backend tap device
                        iface_params = vm_xml.VMXML.get_iface_by_mac(
                            vm_name, mac)
                        target_name = iface_params['target']['dev']
                        # check the tap device mac on host
                        tap_info = process.run("ip l show %s" % target_name,
                                               shell=True,
                                               ignore_status=True).stdout_text
                        logging.debug("vnet_mac should be %s" % vnet_mac)
                        logging.debug(
                            "check on host for the details of tap device %s: %s"
                            % (target_name, tap_info))
                        if vnet_mac not in tap_info:
                            test.fail(
                                "The mac address of the tap device do not match!"
                            )
            # Detach hot/cold-plugged interface at last
            if detach_device:
                logging.debug("detach interface here:")
                if attach_device:
                    vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
                    iface_xml_ls = vmxml.get_devices("interface")
                    iface_xml_ls_det = iface_xml_ls[1:]
                    for iface_xml_det in iface_xml_ls_det:
                        if del_mac:
                            iface_xml_det.del_mac_address()
                        if del_pci:
                            iface_xml_det.del_address()
                        if del_alias:
                            iface_xml_det.del_alias()
                        if set_mac:
                            mac = utils_net.generate_mac_address_simple()
                            iface_xml_det.set_mac_address(mac)
                        if set_pci:
                            pci_dict = ast.literal_eval(pci_addr)
                            addr = iface_xml_det.new_iface_address(
                                **{"attrs": pci_dict})
                            iface_xml_det.set_address(addr)
                        if set_alias:
                            random_id = process.run(
                                "uuidgen", ignore_status=True,
                                shell=True).stdout_text.strip()
                            alias_str = "ua-" + random_id
                            iface_xml_det.set_alias({"name": alias_str})
                        ori_pid_libvirtd = process.getoutput("pidof libvirtd")
                        logging.debug(
                            "The xml of the interface to detach is %s",
                            iface_xml_det)
                        ret = virsh.detach_device(vm_name,
                                                  iface_xml_det.xml,
                                                  flagstr="",
                                                  ignore_status=True)
                        libvirt.check_exit_status(ret, status_error)
                        if detach_device and status_error and detach_error:
                            if not ret.stderr.count(detach_error):
                                test.error(
                                    "Detach fail as expected, but the error msg %s can not match!"
                                    % ret.stderr)
                        aft_pid_libvirtd = process.getoutput("pidof libvirtd")
                        if not utils_libvirtd.Libvirtd.is_running or ori_pid_libvirtd != aft_pid_libvirtd:
                            test.fail(
                                "Libvirtd crash after detach non-exists interface"
                            )
                else:
                    for iface in iface_list:
                        options = ("%s --mac %s" % (iface_type, iface['mac']))
                        ret = virsh.detach_interface(vm_name,
                                                     options,
                                                     ignore_status=True)
                        libvirt.check_exit_status(ret)

                # Check if interface was detached
                if not status_error:
                    for iface in iface_list:
                        if 'mac' in iface:
                            polltime = time.time() + poll_timeout
                            while True:
                                # Check interface in dumpxml output
                                if not vm_xml.VMXML.get_iface_by_mac(
                                        vm_name, iface['mac']):
                                    break
                                else:
                                    time.sleep(2)
                                    if time.time() > polltime:
                                        test.fail("Interface still "
                                                  "exists after detachment")
            session.close()
        except virt_vm.VMStartError as e:
            logging.info(str(e))
            test.fail('VM failed to start:\n%s' % e)

    finally:
        # Recover VM.
        logging.info("Restoring vm...")
        if vm.is_alive():
            vm.destroy(gracefully=False)
        vmxml_backup.sync()
Пример #24
0
def run(test, params, env):
    """
    Test the libvirt API to report deprecation status of machine-types and
    devices.
    """
    libvirt_version.is_libvirt_feature_supported(params)
    vm_name = params.get("main_vm")
    deprecated_domain = params.get("deprecated_domain", "no") == "yes"
    check = params.get("check", "no") == "yes"
    vm = env.get_vm(vm_name)

    if vm.is_alive():
        vm.destroy()
    backup_xml = libvirt_xml.VMXML.new_from_dumpxml(vm_name)
    deprecated_vm = backup_xml.copy()

    try:
        vm.start()
        vm.wait_for_login().close()
        if check:
            qmp_cmd = params.get("qmp_cmd")
            domain_tree = params.get("domain_tree")
            virsh_function = params.get("virsh_function")
            # Get a list of deprecated CPU architectures/machine types by
            # executing QMP command
            qmp_list = get_deprecated_name_list_qmp(vm_name, qmp_cmd)
            # and (dom)capabilities
            domain_list = get_deprecated_domain_capabilities_list(
                eval(virsh_function), domain_tree)
            check_deprecated_output(test, qmp_list, domain_list)

        if deprecated_domain:
            deprecated_list = prepare_deprecated_vm_xml_and_provide_deprecated_list(
                params, deprecated_vm)
            if not deprecated_list:
                test.cancel("There is no deprecated cpu or machine type in "
                            "current qemu version, skipping the test.")
            # No "Messages" in the output since the default VM is still running.
            check_dominfo(test, vm_name, deprecated_list, empty=True)
            vm.destroy()
            # Update VM with a deprecated items and check dominfo
            deprecated_vm.sync()
            logging.debug("vm xml is %s", deprecated_vm)
            vm.start()
            vm.wait_for_login().close()
            check_dominfo(test, vm_name, deprecated_list)
            # Reboot the VM and check a dominfo again
            vm.reboot()
            check_dominfo(test, vm_name, deprecated_list)
            # Restart libvirtd and check a dominfo again
            libvirtd = utils_libvirtd.Libvirtd()
            libvirtd.restart()
            check_dominfo(test, vm_name, deprecated_list)
            # Save a VM to file and check a dominfo - No "Messages"
            deprecated_vm_file = "deprecated_vm"
            vm.save_to_file(deprecated_vm_file)
            check_dominfo(test, vm_name, deprecated_list, empty=True)
            # Restore a VM from file and check dominfo
            vm.restore_from_file(deprecated_vm_file)
            check_dominfo(test, vm_name, deprecated_list)
            # Destroy VM and check dominfo - No "Messages"
            vm.destroy()
            check_dominfo(test, vm_name, deprecated_list, empty=True)
            # Start the VM and shut it down internally - No "Messages" in
            # dominfo output
            vm.start()
            session = vm.wait_for_login()
            utils_misc.cmd_status_output("shutdown now", session=session)
            utils_misc.wait_for(lambda: vm.state() == 'shut off', 60)
            check_dominfo(test, vm_name, deprecated_list, empty=True)

    except (exceptions.TestFail, exceptions.TestCancel):
        raise
    except Exception as e:
        test.error('Unexpected error: {}'.format(e))
    finally:
        if vm.is_alive:
            vm.destroy()
        backup_xml.sync()
Пример #25
0
def run(test, params, env):
    """
    Test <transient/> disks.

    1.Prepare test environment, destroy VMs.
    2.Perform 'qemu-img create' operation.
    3.Edit disks xml and start the domains.
    4.Perform test operation.
    5.Recover test environment.
    6.Confirm the test result.
    """
    def check_transient_disk_keyword(vm_names):
        """
        Check VM disk with TRANSIENT keyword.

        :param vm_names. VM names list.
        """
        logging.info("Checking disk with transient keyword...")

        output0 = ""
        output1 = ""
        for i in list(range(2)):
            ret = virsh.dumpxml(vm_names[i], ignore_status=False)

            cmd = ("echo \"%s\" | grep '<source file=.*TRANSIENT.*/>'" %
                   ret.stdout_text)
            if process.system(cmd, ignore_status=False, shell=True):
                test.fail("Check transident disk on %s failed" % vm_names[i])
            if i == 0:
                output0 = astring.to_text(
                    process.system_output(cmd, ignore_status=False,
                                          shell=True))
            else:
                output1 = astring.to_text(
                    process.system_output(cmd, ignore_status=False,
                                          shell=True))
        if output0 == output1:
            test.fail("Two vms have same source transident disk %s" % output0)

    def check_share_transient_disk(vms_list):
        """
        Check share base image of <transient/> disks.

        :param vms_list. VM object list.
        """
        logging.info("Checking share base image of transient disk...")

        try:
            test_str = "teststring"
            sha_cmd = ("sha1sum /dev/%s" % disk_target)
            cmd = ("fdisk -l /dev/%s && mkfs.ext4 -F /dev/%s && mount /dev/%s"
                   " /mnt && echo '%s' > /mnt/test && umount /mnt" %
                   (disk_target, disk_target, disk_target, test_str))

            # check on vm0.
            session0 = vms_list[0]['vm'].wait_for_login(timeout=10)
            s, o = session0.cmd_status_output(cmd)
            logging.debug("session in vm0 exit %s; output: %s", s, o)
            if s:
                session0.close()
                test.fail("Shared disk on vm0 doesn't work well")

            vm0_disk_sha1 = session0.cmd_output(sha_cmd)
            session0.close()
            vms_list[0]['vm'].destroy(gracefully=False)

            # check on vm1.
            session = vms_list[1]['vm'].wait_for_login(timeout=10)
            vm1_disk_sha1 = session.cmd_output(sha_cmd)
            if vm0_disk_sha1 == vm1_disk_sha1:
                session.close()
                test.fail(
                    "Still can find file created in transient disk of vm0")

            s, o = session.cmd_status_output(cmd)
            logging.debug("session in vm1 exit %s; output: %s", s, o)
            if s:
                session.close()
                test.fail("Shared disk on vm1 doesn't work well")
            session.close()
        except (remote.LoginError, virt_vm.VMError, aexpect.ShellError) as e:
            logging.error(str(e))
            test.error("Test transient disk shareable: login failed")

    vm_names = params.get("vms").split()
    if len(vm_names) < 2:
        test.cancel("No multi vms provided.")

    # Disk specific attributes.
    disk_bus = params.get("virt_disk_bus", "virtio")
    disk_target = params.get("virt_disk_target", "vdb")
    disk_type = params.get("virt_disk_type", "file")
    disk_device = params.get("virt_disk_device", "disk")
    disk_format = params.get("virt_disk_format", "qcow2")
    target_format = params.get("virt_target_format", "qcow2")
    hotplug = "yes" == params.get("virt_disk_vms_hotplug", "no")
    status_error = params.get("status_error").split()
    sharebacking = params.get("share_transient").split()
    on_reboot_destroy = "yes" == params.get("on_reboot_destroy", "no")
    disk_source_path = data_dir.get_data_dir()
    disk_path = ""

    libvirt_version.is_libvirt_feature_supported(params)

    # Backup vm xml files.
    vms_backup = []
    # We just use 2 VMs for testing.
    for i in list(range(2)):
        vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_names[i])
        vms_backup.append(vmxml_backup)
    # Initialize VM list
    vms_list = []
    try:
        # Create disk images if needed.
        disks = []
        image_size = params.get("image_size", "1G")
        disk_path = "%s/test.%s" % (disk_source_path, disk_format)
        disk_source = libvirt.create_local_disk("file",
                                                disk_path,
                                                image_size,
                                                disk_format=disk_format)
        disk_src_dict = {"attrs": {"file": disk_path}}
        disks.append({"format": disk_format, "source": disk_source})

        # Compose the new domain xml
        for i in list(range(2)):
            vm = env.get_vm(vm_names[i])
            # Destroy domain first.
            if vm.is_alive():
                vm.destroy(gracefully=False)

            # Configure vm disk options and define vm
            vmxml = vm_xml.VMXML.new_from_dumpxml(vm_names[i])
            disk_xml = libvirt_disk.create_primitive_disk_xml(
                disk_type, disk_device, disk_target, disk_bus, target_format,
                disk_src_dict, None)

            if sharebacking[i] == "yes":
                disk_xml.sharebacking = "yes"
                if on_reboot_destroy:
                    vmxml.on_reboot = "destroy"
            else:
                disk_xml.transient = "yes"

            logging.debug("The disk xml is: %s" % disk_xml.xmltreefile)

            if not hotplug:
                # If we are not testing hotplug,
                # add disks to domain xml and sync.
                vmxml.add_device(disk_xml)
                logging.debug("vm xml is {}".format(vmxml))
                vmxml.sync()
            vms_list.append({
                "name": vm_names[i],
                "vm": vm,
                "status": "yes" == status_error[i],
                "disk": disk_xml
            })
            logging.debug("vms_list %s" % vms_list)

        for i in list(range(len(vms_list))):
            try:
                # Try to start the domain.
                vms_list[i]['vm'].start()
                # Check if VM is started as expected.
                if not vms_list[i]['status']:
                    test.fail('VM started unexpectedly.')

                session = vms_list[i]['vm'].wait_for_login()
                # if we are testing hotplug, it need to start domain and
                # then run virsh attach-device command.
                if hotplug:
                    vms_list[i]['disk'].xmltreefile.write()
                    result = virsh.attach_device(vms_list[i]['name'],
                                                 vms_list[i]['disk'].xml,
                                                 debug=True).exit_status
                    os.remove(vms_list[i]['disk'].xml)

                    # Check if the return code of attach-device
                    # command is as expected.
                    if 0 != result and vms_list[i]['status']:
                        test.fail('Failed to hotplug disk device')
                    elif 0 == result and not vms_list[i]['status']:
                        test.fail('Hotplug disk device unexpectedly.')

                if i == 1:
                    check_transient_disk_keyword(vm_names)
                    check_share_transient_disk(vms_list)

                session.close()
            except virt_vm.VMStartError as start_error:
                if vms_list[i]['status']:
                    test.fail("VM failed to start."
                              "Error: %s" % str(start_error))
    finally:
        # Stop VMs.
        for i in list(range(len(vms_list))):
            if vms_list[i]['vm'].is_alive():
                vms_list[i]['vm'].destroy(gracefully=False)

        # Recover VMs.
        for vmxml_backup in vms_backup:
            vmxml_backup.sync()

        # Remove disks.
        for img in disks:
            if "source" in img:
                os.remove(img["source"])
Пример #26
0
def run(test, params, env):
    """
    Test network connectivity
    """

    def setup_default():
        """
        Default setup
        """
        logging.debug("Remove VM's interface devices.")
        libvirt_vmxml.remove_vm_devices_by_type(vm, 'interface')
        vm_attrs = eval(params.get('vm_attrs', '{}'))
        if vm_attrs:
            vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
            vmxml.setup_attrs(**vm_attrs)
            vmxml.sync()

    def teardown_default():
        """
        Default cleanup
        """
        pass

    def setup_vdpa():
        """
        Setup vDPA environment
        """
        setup_default()
        test_env_obj = None
        if test_target == "simulator":
            test_env_obj = utils_vdpa.VDPASimulatorTest()
            test_env_obj.setup()
        else:
            vdpa_mgmt_tool_extra = params.get("vdpa_mgmt_tool_extra", "")
            pf_pci = utils_vdpa.get_vdpa_pci()
            test_env_obj = utils_vdpa.VDPAOvsTest(pf_pci, mgmt_tool_extra=vdpa_mgmt_tool_extra)
            test_env_obj.setup()
            params['mac_addr'] = test_env_obj.vdpa_mac.get(params.get("vdpa_dev", "vdpa0"))

        return test_env_obj

    def teardown_vdpa():
        """
        Cleanup vDPA environment
        """
        if test_target != "simulator":
            service.Factory.create_service("NetworkManager").restart()
        if test_obj:
            test_obj.cleanup()

    def run_test(dev_type, params, test_obj=None):
        """
        Test the connectivity of vm's interface

        1) Start the vm with a interface
        2) Check the network driver of VM's interface
        3) Check the network connectivity
        4) Destroy the VM
        """
        # Setup Iface device
        vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
        iface_dict = interface_base.parse_iface_dict(params)
        iface_dev = interface_base.create_iface(dev_type, iface_dict)
        libvirt.add_vm_device(vmxml, iface_dev)

        logging.info("Start a VM with a '%s' type interface.", dev_type)
        vm.start()
        vm_session = vm.wait_for_serial_login(timeout=240)
        vm_iface_info = interface_base.get_vm_iface_info(vm_session)
        if params.get('vm_iface_driver'):
            if vm_iface_info.get('driver') != params.get('vm_iface_driver'):
                test.fail("VM iface should be {}, but got {}."
                          .format(params.get('vm_iface_driver'),
                                  vm_iface_info.get('driver')))

        logging.info("Check the network connectivity")
        check_points.check_network_accessibility(
            vm, test_obj=test_obj, **params)
        virsh.destroy(vm.name, **VIRSH_ARGS)

    libvirt_version.is_libvirt_feature_supported(params)
    utils_misc.is_qemu_function_supported(params)

    # Variable assignment
    test_target = params.get('test_target', '')
    dev_type = params.get('dev_type', '')

    vm_name = params.get('main_vm')
    vm = env.get_vm(vm_name)

    vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
    backup_vmxml = vmxml.copy()

    setup_test = eval("setup_%s" % dev_type) if "setup_%s" % dev_type in \
        locals() else setup_default
    teardown_test = eval("teardown_%s" % dev_type) if "teardown_%s" % \
        dev_type in locals() else teardown_default

    test_obj = None
    try:
        # Execute test
        test_obj = setup_test()
        run_test(dev_type, params, test_obj=test_obj)

    finally:
        backup_vmxml.sync()
        teardown_test()
Пример #27
0
def run(test, params, env):
    """
    Test storage migration
    1) Do storage migration(copy-storage-all/copy-storage-inc) with
    TLS encryption - NBD transport
    2) Cancel storage migration with TLS encryption
    3) Copy only the top image for storage migration with backing chain
    4) Migrate vm with copy storage - Native TLS(--tls) - inconsistent CN and
        server hostname
    5) Migrate vm with copy storage over TCP transport - Specified IP
    6) Migrate vm with copy storage over TCP transport - Specified IP+Port
    7) Migrate vm with copy storage over TCP transport - Specified disks_uri

    :param test: test object
    :param params: Dictionary with the test parameters
    :param env: Dictionary with test environment.
    """
    def prepare_nfs_backingfile(vm, params):
        """
        Create an image using nfs type backing_file

        :param vm: The guest
        :param params: the parameters used
        """
        mnt_path_name = params.get("nfs_mount_dir", "nfs-mount")
        exp_opt = params.get("export_options", "rw,no_root_squash,fsid=0")
        exp_dir = params.get("export_dir", "nfs-export")
        backingfile_img = params.get("source_dist_img", "nfs-img")
        disk_format = params.get("disk_format", "qcow2")
        img_name = params.get("img_name", "test.img")
        precreation = "yes" == params.get("precreation", "yes")
        vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm.name)
        disk_xml = vmxml.devices.by_device_tag('disk')[0]
        src_disk_format = disk_xml.xmltreefile.find('driver').get('type')
        first_disk = vm.get_first_disk_devices()
        blk_source = first_disk['source']
        disk_img = os.path.join(os.path.dirname(blk_source), img_name)

        res = libvirt.setup_or_cleanup_nfs(True,
                                           mnt_path_name,
                                           is_mount=True,
                                           export_options=exp_opt,
                                           export_dir=exp_dir)
        mnt_path = res["mount_dir"]
        params["selinux_status_bak"] = res["selinux_status_bak"]

        if vm.is_alive():
            vm.destroy(gracefully=False)

        disk_cmd = ("qemu-img convert -f %s -O %s %s %s/%s" %
                    (src_disk_format, disk_format, blk_source, mnt_path,
                     backingfile_img))
        process.run(disk_cmd, ignore_status=False, verbose=True)
        local_image_list.append("%s/%s" % (mnt_path, backingfile_img))
        logging.debug("Create a local image backing on NFS.")
        disk_cmd = ("qemu-img create -f %s -b %s/%s %s" %
                    (disk_format, mnt_path, backingfile_img, disk_img))
        process.run(disk_cmd, ignore_status=False, verbose=True)
        local_image_list.append(disk_img)
        if precreation:
            logging.debug("Create an image backing on NFS on remote host.")
            remote_session = remote.remote_login("ssh", server_ip, "22",
                                                 server_user, server_pwd,
                                                 r'[$#%]')
            utils_misc.make_dirs(os.path.dirname(blk_source), remote_session)
            status, stdout = utils_misc.cmd_status_output(
                disk_cmd, session=remote_session)
            logging.debug("status: {}, stdout: {}".format(status, stdout))
            remote_image_list.append("%s/%s" % (mnt_path, backingfile_img))
            remote_image_list.append(disk_img)
            remote_session.close()

        params.update({
            'disk_source_name': disk_img,
            'disk_type': 'file',
            'disk_source_protocol': 'file'
        })
        libvirt.set_vm_disk(vm, params)

    migration_test = migration.MigrationTest()
    migration_test.check_parameters(params)

    # Local variables
    server_ip = params["server_ip"] = params.get("remote_ip")
    server_user = params["server_user"] = params.get("remote_user", "root")
    server_pwd = params["server_pwd"] = params.get("remote_pwd")
    client_ip = params["client_ip"] = params.get("local_ip")
    client_pwd = params["client_pwd"] = params.get("local_pwd")
    virsh_options = params.get("virsh_options", "")
    copy_storage_option = params.get("copy_storage_option")
    extra = params.get("virsh_migrate_extra", "")
    options = params.get("virsh_migrate_options", "--live --verbose")
    backingfile_type = params.get("backingfile_type")
    check_str_local_log = params.get("check_str_local_log", "")
    disk_format = params.get("disk_format", "qcow2")
    log_file = params.get("log_outputs", "/var/log/libvirt/libvirtd.log")
    daemon_conf_dict = eval(params.get("daemon_conf_dict", '{}'))
    cancel_migration = "yes" == params.get("cancel_migration", "no")
    check_disks_port = "yes" == params.get("check_disks_port", "no")
    migrate_again = "yes" == params.get("migrate_again", "no")
    precreation = "yes" == params.get("precreation", "yes")
    tls_recovery = "yes" == params.get("tls_auto_recovery", "yes")
    func_params_exists = "yes" == params.get("func_params_exists", "no")
    status_error = "yes" == params.get("status_error", "no")

    local_image_list = []
    remote_image_list = []
    tls_obj = None

    func_name = None
    daemon_conf = None
    mig_result = None
    remote_session = None
    vm_session = None
    remove_dict = {}
    src_libvirt_file = None

    libvirt_version.is_libvirt_feature_supported(params)

    # params for migration connection
    params["virsh_migrate_desturi"] = libvirt_vm.complete_uri(
        params.get("migrate_dest_host"))
    dest_uri = params.get("virsh_migrate_desturi")

    vm_name = params.get("migrate_main_vm")
    vm = env.get_vm(vm_name)
    vm.verify_alive()

    extra = "{} {}".format(extra, copy_storage_option)

    extra_args = {}
    if func_params_exists:
        extra_args.update({'func_params': params})
    if cancel_migration:
        func_name = migration_test.do_cancel
    elif check_disks_port:
        func_name = libvirt_network.check_established

    # For safety reasons, we'd better back up  xmlfile.
    vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    orig_config_xml = vmxml.copy()

    try:
        if backingfile_type:
            if backingfile_type == "nfs":
                prepare_nfs_backingfile(vm, params)
        if extra.count("copy-storage-all") and precreation:
            blk_source = vm.get_first_disk_devices()['source']
            vsize = utils_misc.get_image_info(blk_source).get("vsize")
            remote_session = remote.remote_login("ssh", server_ip, "22",
                                                 server_user, server_pwd,
                                                 r'[$#%]')
            utils_misc.make_dirs(os.path.dirname(blk_source), remote_session)
            disk_cmd = ("qemu-img create -f %s %s %s" %
                        (disk_format, blk_source, vsize))
            status, stdout = utils_misc.cmd_status_output(
                disk_cmd, session=remote_session)
            logging.debug("status: {}, stdout: {}".format(status, stdout))
            remote_image_list.append(blk_source)
            remote_session.close()

        # Update libvirtd configuration
        if daemon_conf_dict:
            if os.path.exists(log_file):
                os.remove(log_file)
            daemon_conf = libvirt.customize_libvirt_config(daemon_conf_dict)

        if extra.count("--tls"):
            tls_obj = TLSConnection(params)
            if tls_recovery:
                tls_obj.auto_recover = True
                tls_obj.conn_setup()

        if not vm.is_alive():
            vm.start()

        logging.debug("Guest xml after starting:\n%s",
                      vm_xml.VMXML.new_from_dumpxml(vm_name))
        # Check local guest network connection before migration
        vm_session = vm.wait_for_login(restart_network=True)
        migration_test.ping_vm(vm, params)

        remove_dict = {"do_search": '{"%s": "ssh:/"}' % dest_uri}
        src_libvirt_file = libvirt_config.remove_key_for_modular_daemon(
            remove_dict)

        # Execute migration process
        vms = [vm]

        migration_test.do_migration(vms,
                                    None,
                                    dest_uri,
                                    'orderly',
                                    options,
                                    thread_timeout=900,
                                    ignore_status=True,
                                    virsh_opt=virsh_options,
                                    extra_opts=extra,
                                    func=func_name,
                                    **extra_args)

        mig_result = migration_test.ret
        migration_test.check_result(mig_result, params)

        if migrate_again and status_error:
            logging.debug(
                "Sleeping 10 seconds before rerunning the migration.")
            time.sleep(10)
            if cancel_migration:
                func_name = None
            params["status_error"] = "no"
            migration_test.do_migration(vms,
                                        None,
                                        dest_uri,
                                        'orderly',
                                        options,
                                        thread_timeout=900,
                                        ignore_status=True,
                                        virsh_opt=virsh_options,
                                        extra_opts=extra,
                                        func=func_name,
                                        **extra_args)

            mig_result = migration_test.ret
            migration_test.check_result(mig_result, params)
        if int(mig_result.exit_status) == 0:
            migration_test.ping_vm(vm, params, uri=dest_uri)

        if check_str_local_log:
            libvirt.check_logfile(check_str_local_log, log_file)

    finally:
        logging.debug("Recover test environment")
        # Clean VM on destination and source
        try:
            migration_test.cleanup_dest_vm(vm, vm.connect_uri, dest_uri)
        except Exception as err:
            logging.error(err)
        if vm.is_alive():
            vm.destroy(gracefully=False)
        orig_config_xml.sync()

        if daemon_conf:
            logging.debug("Recover the configurations")
            libvirt.customize_libvirt_config(None,
                                             is_recover=True,
                                             config_object=daemon_conf)
        if src_libvirt_file:
            src_libvirt_file.restore()

        if tls_obj:
            logging.debug("Clean up local objs")
            del tls_obj
        for source_file in local_image_list:
            libvirt.delete_local_disk("file", path=source_file)
        for img in remote_image_list:
            remote.run_remote_cmd("rm -rf %s" % img, params)

        if remote_session:
            remote_session.close()
Пример #28
0
def run(test, params, env):
    """
    This test cover two volume commands: vol-clone and vol-wipe.

    1. Create a given type pool.
    2. Create a given format volume in the pool.
    3. Clone the new create volume.
    4. Wipe the new clone volume.
    5. Delete the volume and pool.
    """

    pool_name = params.get("pool_name")
    pool_type = params.get("pool_type")
    pool_target = params.get("pool_target")
    if not os.path.dirname(pool_target):
        pool_target = os.path.join(data_dir.get_tmp_dir(), pool_target)
    emulated_image = params.get("emulated_image")
    emulated_image_size = params.get("emulated_image_size")
    vol_name = params.get("vol_name")
    new_vol_name = params.get("new_vol_name")
    vol_capability = params.get("vol_capability")
    vol_allocation = params.get("vol_allocation")
    vol_format = params.get("vol_format")
    clone_option = params.get("clone_option", "")
    wipe_algorithms = params.get("wipe_algorithms")
    b_luks_encrypted = "luks" == params.get("encryption_method")
    encryption_password = params.get("encryption_password", "redhat")
    secret_uuids = []
    wipe_old_vol = False
    with_clusterSize = "yes" == params.get("with_clusterSize")
    vol_clusterSize = params.get("vol_clusterSize", "64")
    vol_clusterSize_unit = params.get("vol_clusterSize_unit")
    libvirt_version.is_libvirt_feature_supported(params)

    if virsh.has_command_help_match("vol-clone",
                                    "--prealloc-metadata") is None:
        if "prealloc-metadata" in clone_option:
            test.cancel("Option --prealloc-metadata " "is not supported.")

    clone_status_error = "yes" == params.get("clone_status_error", "no")
    wipe_status_error = "yes" == params.get("wipe_status_error", "no")
    setup_libvirt_polkit = "yes" == params.get("setup_libvirt_polkit")

    # libvirt acl polkit related params
    uri = params.get("virsh_uri")
    unpri_user = params.get('unprivileged_user')
    if unpri_user:
        if unpri_user.count('EXAMPLE'):
            unpri_user = '******'

    if not libvirt_version.version_compare(1, 1, 1):
        if setup_libvirt_polkit:
            test.cancel("API acl test not supported in current"
                        " libvirt version.")

    # Using algorithms other than zero need scrub installed.
    try:
        utils_path.find_command('scrub')
    except utils_path.CmdNotFoundError:
        logging.warning("Can't locate scrub binary, only 'zero' algorithm "
                        "is used.")
        valid_algorithms = ["zero"]
    else:
        valid_algorithms = [
            "zero", "nnsa", "dod", "bsi", "gutmann", "schneier", "pfitzner7",
            "pfitzner33", "random"
        ]

    # Choose an algorithm randomly
    if wipe_algorithms:
        alg = random.choice(wipe_algorithms.split())
    else:
        alg = random.choice(valid_algorithms)

    libvirt_pvt = utlv.PoolVolumeTest(test, params)
    libvirt_pool = libvirt_storage.StoragePool()
    if libvirt_pool.pool_exists(pool_name):
        test.error("Pool '%s' already exist" % pool_name)
    try:
        # Create a new pool
        disk_vol = []
        if pool_type == 'disk':
            disk_vol.append(params.get("pre_vol", '10M'))
        libvirt_pvt.pre_pool(pool_name=pool_name,
                             pool_type=pool_type,
                             pool_target=pool_target,
                             emulated_image=emulated_image,
                             image_size=emulated_image_size,
                             pre_disk_vol=disk_vol)

        libvirt_vol = libvirt_storage.PoolVolume(pool_name)
        # Create a new volume
        if vol_format in ['raw', 'qcow2', 'qed', 'vmdk']:
            if (b_luks_encrypted and vol_format in ['raw', 'qcow2']):
                if not libvirt_version.version_compare(2, 0, 0):
                    test.cancel("LUKS is not supported in current"
                                " libvirt version")
                if vol_format == "qcow2" and not libvirt_version.version_compare(
                        6, 10, 0):
                    test.cancel("Qcow2 format with luks encryption is not"
                                " supported in current libvirt version")
                luks_sec_uuid = create_luks_secret(
                    os.path.join(pool_target, vol_name), encryption_password,
                    test)
                secret_uuids.append(luks_sec_uuid)
                vol_arg = {}
                vol_arg['name'] = vol_name
                vol_arg['capacity'] = int(vol_capability)
                vol_arg['allocation'] = int(vol_allocation)
                vol_arg['format'] = vol_format
                if with_clusterSize:
                    vol_arg['clusterSize'] = int(vol_clusterSize)
                    vol_arg['clusterSize_unit'] = vol_clusterSize_unit
                create_luks_vol(pool_name, vol_name, luks_sec_uuid, vol_arg)
            else:
                libvirt_pvt.pre_vol(vol_name=vol_name,
                                    vol_format=vol_format,
                                    capacity=vol_capability,
                                    allocation=None,
                                    pool_name=pool_name)
        elif vol_format == 'partition':
            vol_name = list(utlv.get_vol_list(pool_name).keys())[0]
            logging.debug("Find partition %s in disk pool", vol_name)
        elif vol_format == 'sparse':
            # Create a sparse file in pool
            sparse_file = pool_target + '/' + vol_name
            cmd = "dd if=/dev/zero of=" + sparse_file
            cmd += " bs=1 count=0 seek=" + vol_capability
            process.run(cmd, ignore_status=True, shell=True)
        else:
            test.error("Unknown volume format %s" % vol_format)

        # Refresh the pool
        virsh.pool_refresh(pool_name, debug=True)
        vol_info = libvirt_vol.volume_info(vol_name)
        if not vol_info:
            test.error("Fail to get info of volume %s" % vol_name)

        for key in vol_info:
            logging.debug("Original volume info: %s = %s", key, vol_info[key])

        # Metadata preallocation is not support for block volume
        if vol_info["Type"] == "block" and clone_option.count(
                "prealloc-metadata"):
            clone_status_error = True
        if b_luks_encrypted:
            wipe_old_vol = True

        if pool_type == "disk":
            new_vol_name = utlv.new_disk_vol_name(pool_name)
            if new_vol_name is None:
                test.error("Fail to generate volume name")
            # update polkit rule as the volume name changed
            if setup_libvirt_polkit:
                vol_pat = r"lookup\('vol_name'\) == ('\S+')"
                new_value = "lookup('vol_name') == '%s'" % new_vol_name
                utlv.update_polkit_rule(params, vol_pat, new_value)

        bad_cloned_vol_name = params.get("bad_cloned_vol_name", "")
        if bad_cloned_vol_name:
            new_vol_name = bad_cloned_vol_name

        # Clone volume
        clone_result = virsh.vol_clone(vol_name,
                                       new_vol_name,
                                       pool_name,
                                       clone_option,
                                       debug=True)
        if not clone_status_error:
            if clone_result.exit_status != 0:
                test.fail("Clone volume fail:\n%s" %
                          clone_result.stderr.strip())
            else:
                vol_info = libvirt_vol.volume_info(new_vol_name)
                for key in vol_info:
                    logging.debug("Cloned volume info: %s = %s", key,
                                  vol_info[key])
                logging.debug("Clone volume successfully.")
                # Wipe the new clone volume
                if alg:
                    logging.debug("Wiping volume by '%s' algorithm", alg)
                wipe_result = virsh.vol_wipe(new_vol_name,
                                             pool_name,
                                             alg,
                                             unprivileged_user=unpri_user,
                                             uri=uri,
                                             debug=True)
                unsupported_err = [
                    "Unsupported algorithm", "no such pattern sequence"
                ]
                if not wipe_status_error:
                    if wipe_result.exit_status != 0:
                        if any(err in wipe_result.stderr
                               for err in unsupported_err):
                            test.cancel(wipe_result.stderr)
                        test.fail("Wipe volume fail:\n%s" %
                                  clone_result.stdout.strip())
                    else:
                        virsh_vol_info = libvirt_vol.volume_info(new_vol_name)
                        for key in virsh_vol_info:
                            logging.debug("Wiped volume info(virsh): %s = %s",
                                          key, virsh_vol_info[key])
                        vol_path = virsh.vol_path(new_vol_name,
                                                  pool_name).stdout.strip()
                        qemu_vol_info = utils_misc.get_image_info(vol_path)
                        for key in qemu_vol_info:
                            logging.debug("Wiped volume info(qemu): %s = %s",
                                          key, qemu_vol_info[key])
                            if qemu_vol_info['format'] != 'raw':
                                test.fail("Expect wiped volume "
                                          "format is raw")
                elif wipe_status_error and wipe_result.exit_status == 0:
                    test.fail("Expect wipe volume fail, but run"
                              " successfully.")
        elif clone_status_error and clone_result.exit_status == 0:
            test.fail("Expect clone volume fail, but run" " successfully.")

        if wipe_old_vol:
            # Wipe the old volume
            if alg:
                logging.debug("Wiping volume by '%s' algorithm", alg)
            wipe_result = virsh.vol_wipe(vol_name,
                                         pool_name,
                                         alg,
                                         unprivileged_user=unpri_user,
                                         uri=uri,
                                         debug=True)
            unsupported_err = [
                "Unsupported algorithm", "no such pattern sequence"
            ]
            if not wipe_status_error:
                if wipe_result.exit_status != 0:
                    if any(err in wipe_result.stderr
                           for err in unsupported_err):
                        test.cancel(wipe_result.stderr)
                    test.fail("Wipe volume fail:\n%s" %
                              clone_result.stdout.strip())
                else:
                    virsh_vol_info = libvirt_vol.volume_info(vol_name)
                    for key in virsh_vol_info:
                        logging.debug("Wiped volume info(virsh): %s = %s", key,
                                      virsh_vol_info[key])
                    vol_path = virsh.vol_path(vol_name,
                                              pool_name).stdout.strip()
                    qemu_vol_info = utils_misc.get_image_info(vol_path)
                    for key in qemu_vol_info:
                        logging.debug("Wiped volume info(qemu): %s = %s", key,
                                      qemu_vol_info[key])
                        if qemu_vol_info['format'] != 'raw':
                            test.fail("Expect wiped volume " "format is raw")
            elif wipe_status_error and wipe_result.exit_status == 0:
                test.fail("Expect wipe volume fail, but run" " successfully.")

        if bad_cloned_vol_name:
            pattern = "volume name '%s' cannot contain '/'" % new_vol_name
            if re.search(pattern, clone_result.stderr) is None:
                test.fail("vol-clone failed with unexpected reason")
    finally:
        # Clean up
        try:
            libvirt_pvt.cleanup_pool(pool_name, pool_type, pool_target,
                                     emulated_image)
            for secret_uuid in set(secret_uuids):
                virsh.secret_undefine(secret_uuid)

        except exceptions.TestFail as detail:
            logging.error(str(detail))
Пример #29
0
def run(test, params, env):
    """
    Test Boot OVMF Guest and Seabios Guest with options

    Steps:
    1) Edit VM xml with specified options
    2) For secure boot mode, boot OVMF Guest from
       cdrom first, enroll the key, then switch
       boot from hd
    3) For normal boot mode, directly boot Guest from given device
    4) Verify if Guest can boot as expected
    """
    vm_name = params.get("main_vm", "")
    vm = env.get_vm(vm_name)
    username = params.get("username", "root")
    password = params.get("password", "redhat")
    test_cmd = params.get("test_cmd", "")
    expected_output = params.get("expected_output", "")
    check_point = params.get("checkpoint", "")
    status_error = "yes" == params.get("status_error", "no")
    boot_iso_file = os.path.join(data_dir.get_tmp_dir(), "boot.iso")
    non_release_os_url = params.get("non_release_os_url", "")
    download_file_path = os.path.join(data_dir.get_tmp_dir(),
                                      "non_released_os.qcow2")
    release_os_url = params.get("release_os_url", "")
    download_released_file_path = os.path.join(data_dir.get_tmp_dir(),
                                               "released_os.qcow2")
    uefi_iso = params.get("uefi_iso", "")
    custom_codes = params.get("uefi_custom_codes", "")
    uefi_target_dev = params.get("uefi_target_dev", "")
    uefi_device_bus = params.get("uefi_device_bus", "")
    with_boot = (params.get("with_boot", "no") == "yes")
    boot_ref = params.get("boot_ref", "dev")
    boot_order = params.get("boot_order", "1")
    boot_dev = params.get("boot_dev", "hd")
    target_dev = params.get("target_dev", "vdb")
    vol_name = params.get("vol_name")
    brick_path = os.path.join(test.virtdir, "gluster-pool")
    boot_type = params.get("boot_type", "seabios")
    boot_loadparm = params.get("boot_loadparm", None)
    libvirt_version.is_libvirt_feature_supported(params)

    # Prepare result checkpoint list
    check_points = []
    if check_point:
        check_points.append(check_point)

    # Back VM XML
    vmxml_backup = vm_xml.VMXML.new_from_dumpxml(vm_name)
    vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)

    # Prepare a blank params to confirm if delete the configure at the end of the test
    ceph_cfg = ''
    try:
        # Create config file if it doesn't exist
        ceph_cfg = ceph.create_config_file(params.get("mon_host"))
        setup_test_env(params, test)
        apply_boot_options(vmxml, params, test)
        blk_source = vm.get_first_disk_devices()['source']
        set_domain_disk(vmxml, blk_source, params, test)
        vmxml.remove_all_boots()
        if with_boot:
            boot_kwargs = {
                "boot_ref": boot_ref,
                "boot_dev": boot_dev,
                "boot_order": boot_order,
                "target_dev": target_dev,
                "loadparm": boot_loadparm
            }
            if "yes" == params.get("two_same_boot_dev", "no"):
                boot_kwargs.update({"two_same_boot_dev": True})
            set_boot_dev_or_boot_order(vmxml, **boot_kwargs)
        define_error = ("yes" == params.get("define_error", "no"))
        enable_normal_boot(vmxml, check_points, define_error, test)
        # Some negative cases failed at virsh.define
        if define_error:
            return

        # Start VM and check result
        # For boot from cdrom or non_released_os, just verify key words from serial console output
        # For boot from disk image, run 'test cmd' to verify if OS boot well
        if boot_dev == "cdrom" or non_release_os_url:
            if not vm.is_alive():
                vm.start()
                check_prompt = params.get("check_prompt", "")
                while True:
                    if boot_type == "ovmf":
                        match, text = vm.serial_console.read_until_any_line_matches(
                            [check_prompt], timeout=30.0, internal_timeout=0.5)
                    else:
                        match, text = read_until_any_line_matches(
                            vm.serial_console, [check_prompt],
                            timeout=30.0,
                            internal_timeout=0.5)
                    logging.debug("matches %s", check_prompt)
                    if match == -1:
                        logging.debug("Got check point as expected")
                        break
        elif boot_dev == "hd":
            ret = virsh.start(vm_name, timeout=60)
            utlv.check_result(ret, expected_fails=check_points)
            # For no boot options, further check if boot dev can be automatically added
            if not with_boot:
                if re.search(r"<boot dev='hd'/>",
                             virsh.dumpxml(vm_name).stdout.strip()):
                    logging.debug("OS boot dev added automatically")
                else:
                    test.fail("OS boot dev not added as expected")
            if not status_error:
                vm_ip = vm.wait_for_get_address(0, timeout=240)
                remote_session = remote.wait_for_login("ssh", vm_ip, "22",
                                                       username, password,
                                                       r"[\#\$]\s*$")
                if test_cmd:
                    status, output = remote_session.cmd_status_output(test_cmd)
                    logging.debug("CMD '%s' running result is:\n%s", test_cmd,
                                  output)
                    if expected_output:
                        if not re.search(expected_output, output):
                            test.fail("Expected '%s' to match '%s'"
                                      " but failed." %
                                      (output, expected_output))
                    if status:
                        test.fail("Failed to boot %s from %s" %
                                  (vm_name, vmxml.xml))
                remote_session.close()
        logging.debug("Succeed to boot %s" % vm_name)
    finally:
        # Remove ceph configure file if created.
        if ceph_cfg:
            os.remove(ceph_cfg)
        logging.debug("Start to cleanup")
        if vm.is_alive:
            vm.destroy()
        logging.debug("Restore the VM XML")
        vmxml_backup.sync(options="--nvram")
        if cleanup_gluster:
            process.run("umount /mnt", ignore_status=True, shell=True)
            gluster.setup_or_cleanup_gluster(False,
                                             brick_path=brick_path,
                                             **params)
        if cleanup_iscsi:
            utlv.setup_or_cleanup_iscsi(False)
        if cleanup_iso_file:
            process.run("rm -rf %s" % boot_iso_file,
                        shell=True,
                        ignore_status=True)
        if cleanup_image_file:
            process.run("rm -rf %s" % download_file_path,
                        shell=True,
                        ignore_status=True)
        if cleanup_released_image_file:
            process.run("rm -rf %s" % download_released_file_path,
                        shell=True,
                        ignore_status=True)
Пример #30
0
def run(test, params, env):
    """
    Test virsh domblkthreshold option.

    1.Prepare backend storage (file/luks/iscsi/gluster/ceph/nbd)
    2.Start VM
    3.Set domblkthreshold on target device in VM
    4.Trigger one threshold event
    5.Check threshold event is received as expected
    6.Clean up test environment
    """
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    virsh_dargs = {'debug': True, 'ignore_status': True}
    block_threshold_timeout = params.get("block_threshold_timeout", "120")
    event_type = params.get("event_type", "block-threshold")
    block_threshold_option = params.get("block_threshold_option", "--loop")

    def set_vm_block_domblkthreshold(vm_name, target_device, threshold,
                                     **dargs):
        """
        Set VM block threshold on specific target device.

        :param vm_name: VM name.
        :param target_device: target device in VM
        :param threshold: threshold value with specific unit such as 100M
        :param dargs: mutable parameter dict
        """
        ret = virsh.domblkthreshold(vm_name, target_device, threshold, **dargs)
        libvirt.check_exit_status(ret)

    def trigger_block_threshold_event(vm_domain, target):
        """
        Trigger block threshold event.

        :param vm_domain: VM name
        :param target: Disk dev in VM.
        """
        try:
            session = vm_domain.wait_for_login()
            time.sleep(10)
            cmd = ("fdisk -l /dev/{0} && mkfs.ext4 -F /dev/{0} && "
                   " mount /dev/{0} /mnt && "
                   " dd if=/dev/urandom of=/mnt/bigfile bs=1M count=101".
                   format(target))
            status, output = session.cmd_status_output(cmd)
            if status:
                test.error("Failed to mount and fill data in VM: %s" % output)
        except (remote.LoginError, virt_vm.VMError, aexpect.ShellError) as e:
            logging.error(str(e))
            raise

    def check_threshold_event(vm_name, event_type, event_timeout, options,
                              **dargs):
        """
        Check threshold event.

        :param vm_name: VM name
        :param event_type: event type.
        :param event_timeout: event timeout value
        :param options: event option
        :dargs: dynamic parameters.
        """
        ret = virsh.event(vm_name, event_type, event_timeout, options, **dargs)
        logging.debug(ret.stdout_text)
        libvirt.check_exit_status(ret)
        event_out = ret.stdout_text.replace("\n", "").strip()
        expect_event_count = params.get("event_count")
        if expect_event_count:
            match = re.match(r'.*events received:\s+(.*)', event_out)
            if match is None:
                test.fail("No events received")
            event_count = match.group(1)
            if event_count != expect_event_count:
                test.fail(
                    "Get events count: %s is not equal to expected event counts:%s"
                    % (event_count, expect_event_count))

    def create_vol(p_name, vol_params):
        """
        Create volume.

        :param p_name: Pool name.
        :param vol_params: Volume parameters dict.
        """
        # Clean up dirty volumes if pool has.
        pv = libvirt_storage.PoolVolume(p_name)
        vol_name_list = pv.list_volumes()
        for vol_name in vol_name_list:
            pv.delete_volume(vol_name)

        volxml = vol_xml.VolXML()
        v_xml = volxml.new_vol(**vol_params)
        v_xml.xmltreefile.write()

        ret = virsh.vol_create(p_name, v_xml.xml, **virsh_dargs)
        libvirt.check_exit_status(ret)

    def trigger_block_commit(vm_name, target, blockcommit_options,
                             **virsh_dargs):
        """
        Trigger blockcommit.

        :param vm_name: VM name
        :param target: Disk dev in VM.
        :param blockcommit_options: blockcommit option
        :param virsh_dargs: additional parameters
        """
        result = virsh.blockcommit(vm_name,
                                   target,
                                   blockcommit_options,
                                   ignore_status=False,
                                   **virsh_dargs)

    def trigger_block_copy(vm_name, target, dest_path, blockcopy_options,
                           **virsh_dargs):
        """
        Trigger blockcopy

        :param vm_name: string, VM name
        :param target: string, target disk
        :param dest_path: string, the path of copied disk
        :param blockcopy_options: string, some options applied
        :param virsh_dargs: additional options
        """
        result = virsh.blockcopy(vm_name, target, dest_path, blockcopy_options,
                                 **virsh_dargs)
        libvirt.check_exit_status(result)

    def trigger_mirror_threshold_event(vm_domain, target):
        """
        Trigger mirror mode block threshold event.

        :param vm_domain: VM name
        :param target: Disk target in VM.
        """
        try:
            session = vm_domain.wait_for_login()
            # Sleep 10 seconds to let wait for events thread start first in main thread
            time.sleep(10)
            cmd = ("dd if=/dev/urandom of=file bs=1G count=3")
            status, output = session.cmd_status_output(cmd)
            if status:
                test.error("Failed to fill data in VM target: %s with %s" %
                           (target, output))
        except (remote.LoginError, virt_vm.VMError, aexpect.ShellError) as e:
            logging.error(str(e))
            raise
        except Exception as ex:
            raise

    def get_mirror_source_index(vm_name, dev_index=0):
        """
        Get mirror source index

        :param vm_name: VM name
        :param dev_index: Disk device index.
        :return mirror source index in integer
        """
        disk_list = vm_xml.VMXML.get_disk_source(vm_name)
        disk_mirror = disk_list[dev_index].find('mirror')
        if disk_mirror is None:
            test.fail("Failed to get disk mirror")
        disk_mirror_source = disk_mirror.find('source')
        return int(disk_mirror_source.get('index'))

    # Disk specific attributes.
    device = params.get("virt_disk_device", "disk")
    device_target = params.get("virt_disk_device_target", "vdd")
    device_format = params.get("virt_disk_device_format", "raw")
    device_type = params.get("virt_disk_device_type", "file")
    device_bus = params.get("virt_disk_device_bus", "virtio")
    backend_storage_type = params.get("backend_storage_type", "iscsi")

    # Backend storage auth info
    storage_size = params.get("storage_size", "1G")
    enable_auth = "yes" == params.get("enable_auth")
    use_auth_usage = "yes" == params.get("use_auth_usage")
    auth_sec_usage_type = params.get("auth_sec_usage_type", "iscsi")
    auth_sec_usage_target = params.get("auth_sec_usage_target", "libvirtiscsi")
    auth_sec_uuid = ""
    luks_sec_uuid = ""
    disk_auth_dict = {}
    disk_encryption_dict = {}

    status_error = "yes" == params.get("status_error")
    define_error = "yes" == params.get("define_error")

    mirror_mode_blockcommit = "yes" == params.get("mirror_mode_blockcommit",
                                                  "no")
    mirror_mode_blockcopy = "yes" == params.get("mirror_mode_blockcopy", "no")
    default_snapshot_test = "yes" == params.get("default_snapshot_test", "no")
    threshold_index_event_once = "yes" == params.get(
        "threshold_index_event_once", "no")
    block_threshold_value = params.get("block_threshold_value", "100M")
    snapshot_external_disks = []
    tmp_dir = data_dir.get_tmp_dir()
    dest_path = params.get("dest_path", "/var/lib/libvirt/images/newclone")

    pvt = None
    # Initialize one NbdExport object
    nbd = None
    img_file = os.path.join(data_dir.get_tmp_dir(), "%s_test.img" % vm_name)
    if ((backend_storage_type == "luks")
            and not libvirt_version.version_compare(3, 9, 0)):
        test.cancel(
            "Cannot support <encryption> inside disk in this libvirt version.")

    # Start VM and get all partitions in VM.
    if vm.is_dead():
        vm.start()
    session = vm.wait_for_login()
    old_parts = utils_disk.get_parts_list(session)
    session.close()
    vm.destroy(gracefully=False)

    # Back up xml file.
    vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)

    # Additional disk images.
    disks_img = []

    # Enable filter feature case in specific libvirt version
    libvirt_version.is_libvirt_feature_supported(params)
    try:
        # Clean up dirty secrets in test environments if there are.
        utils_secret.clean_up_secrets()
        # Setup backend storage
        if backend_storage_type == "file":
            image_filename = params.get("image_filename", "raw.img")
            disk_path = os.path.join(data_dir.get_tmp_dir(), image_filename)
            device_source = libvirt.create_local_disk(backend_storage_type,
                                                      disk_path, storage_size,
                                                      device_format)
            disks_img.append({
                "format": device_format,
                "source": disk_path,
                "path": disk_path
            })
            disk_src_dict = {
                'attrs': {
                    'file': device_source,
                    'type_name': 'file'
                }
            }
        # Setup backend storage
        elif backend_storage_type == "luks":
            luks_encrypt_passwd = params.get("luks_encrypt_passwd", "password")
            luks_secret_passwd = params.get("luks_secret_passwd", "password")
            # Create secret
            luks_sec_uuid = libvirt.create_secret(params)
            logging.debug("A secret created with uuid = '%s'", luks_sec_uuid)
            virsh.secret_set_value(luks_sec_uuid,
                                   luks_secret_passwd,
                                   encode=True,
                                   ignore_status=False,
                                   debug=True)
            image_filename = params.get("image_filename", "raw.img")
            device_source = os.path.join(data_dir.get_tmp_dir(),
                                         image_filename)

            disks_img.append({
                "format": device_format,
                "source": device_source,
                "path": device_source
            })
            disk_src_dict = {
                'attrs': {
                    'file': device_source,
                    'type_name': 'file'
                }
            }
            disk_encryption_dict = {
                "encryption": "luks",
                "secret": {
                    "type": "passphrase",
                    "uuid": luks_sec_uuid
                }
            }

            cmd = (
                "qemu-img create -f luks "
                "--object secret,id=sec0,data=`printf '%s' | base64`,format=base64 "
                "-o key-secret=sec0 %s %s" %
                (luks_encrypt_passwd, device_source, storage_size))
            if process.system(cmd, shell=True):
                test.error("Can't create a luks encrypted img by qemu-img")
        elif backend_storage_type == "iscsi":
            iscsi_host = params.get("iscsi_host")
            iscsi_port = params.get("iscsi_port")
            if device_type == "block":
                device_source = libvirt.setup_or_cleanup_iscsi(is_setup=True)
                disk_src_dict = {'attrs': {'dev': device_source}}
            elif device_type == "network":
                chap_user = params.get("chap_user", "redhat")
                chap_passwd = params.get("chap_passwd", "password")
                auth_sec_usage = params.get("auth_sec_usage", "libvirtiscsi")
                auth_sec_dict = {
                    "sec_usage": "iscsi",
                    "sec_target": auth_sec_usage
                }
                auth_sec_uuid = libvirt.create_secret(auth_sec_dict)
                # Set password of auth secret (not luks encryption secret)
                virsh.secret_set_value(auth_sec_uuid,
                                       chap_passwd,
                                       encode=True,
                                       ignore_status=False,
                                       debug=True)
                iscsi_target, lun_num = libvirt.setup_or_cleanup_iscsi(
                    is_setup=True,
                    is_login=False,
                    image_size=storage_size,
                    chap_user=chap_user,
                    chap_passwd=chap_passwd,
                    portal_ip=iscsi_host)
                # ISCSI auth attributes for disk xml
                disk_auth_dict = {
                    "auth_user": chap_user,
                    "secret_type": auth_sec_usage_type,
                    "secret_usage": auth_sec_usage_target
                }
                device_source = "iscsi://%s:%s/%s/%s" % (
                    iscsi_host, iscsi_port, iscsi_target, lun_num)
                disk_src_dict = {
                    "attrs": {
                        "protocol": "iscsi",
                        "name": "%s/%s" % (iscsi_target, lun_num)
                    },
                    "hosts": [{
                        "name": iscsi_host,
                        "port": iscsi_port
                    }]
                }
        elif backend_storage_type == "gluster":
            gluster_vol_name = params.get("gluster_vol_name", "gluster_vol1")
            gluster_pool_name = params.get("gluster_pool_name",
                                           "gluster_pool1")
            gluster_img_name = params.get("gluster_img_name", "gluster1.img")
            gluster_host_ip = gluster.setup_or_cleanup_gluster(
                is_setup=True,
                vol_name=gluster_vol_name,
                pool_name=gluster_pool_name,
                **params)

            device_source = "gluster://%s/%s/%s" % (
                gluster_host_ip, gluster_vol_name, gluster_img_name)
            cmd = ("qemu-img create -f %s "
                   "%s %s" % (device_format, device_source, storage_size))
            if process.system(cmd, shell=True):
                test.error("Can't create a gluster type img by qemu-img")
            disk_src_dict = {
                "attrs": {
                    "protocol": "gluster",
                    "name": "%s/%s" % (gluster_vol_name, gluster_img_name)
                },
                "hosts": [{
                    "name": gluster_host_ip,
                    "port": "24007"
                }]
            }
        elif backend_storage_type == "ceph":
            ceph_host_ip = params.get("ceph_host_ip", "EXAMPLE_HOSTS")
            ceph_mon_ip = params.get("ceph_mon_ip", "EXAMPLE_MON_HOST")
            ceph_host_port = params.get("ceph_host_port", "EXAMPLE_PORTS")
            ceph_disk_name = params.get("ceph_disk_name",
                                        "EXAMPLE_SOURCE_NAME")
            ceph_client_name = params.get("ceph_client_name")
            ceph_client_key = params.get("ceph_client_key")
            ceph_auth_user = params.get("ceph_auth_user")
            ceph_auth_key = params.get("ceph_auth_key")
            enable_auth = "yes" == params.get("enable_auth")

            key_file = os.path.join(data_dir.get_tmp_dir(), "ceph.key")
            key_opt = ""
            # Prepare a blank params to confirm if delete the configure at the end of the test
            ceph_cfg = ""
            if not utils_package.package_install(["ceph-common"]):
                test.error("Failed to install ceph-common")
            # Create config file if it doesn't exist
            ceph_cfg = ceph.create_config_file(ceph_mon_ip)
            # If enable auth, prepare a local file to save key
            if ceph_client_name and ceph_client_key:
                with open(key_file, 'w') as f:
                    f.write("[%s]\n\tkey = %s\n" %
                            (ceph_client_name, ceph_client_key))
                key_opt = "--keyring %s" % key_file
                auth_sec_dict = {
                    "sec_usage": auth_sec_usage_type,
                    "sec_name": "ceph_auth_secret"
                }
                auth_sec_uuid = libvirt.create_secret(auth_sec_dict)
                virsh.secret_set_value(auth_sec_uuid,
                                       ceph_auth_key,
                                       debug=True)
                disk_auth_dict = {
                    "auth_user": ceph_auth_user,
                    "secret_type": auth_sec_usage_type,
                    "secret_uuid": auth_sec_uuid
                }
            else:
                test.error("No ceph client name/key provided.")
            device_source = "rbd:%s:mon_host=%s:keyring=%s" % (
                ceph_disk_name, ceph_mon_ip, key_file)
            cmd = ("rbd -m {0} {1} info {2} && rbd -m {0} {1} rm "
                   "{2}".format(ceph_mon_ip, key_opt, ceph_disk_name))
            cmd_result = process.run(cmd, ignore_status=True, shell=True)
            logging.debug("pre clean up rbd disk if exists: %s", cmd_result)
            # Create an local image and make FS on it.
            disk_cmd = ("qemu-img create -f %s %s %s" %
                        (device_format, img_file, storage_size))
            process.run(disk_cmd, ignore_status=False, shell=True)
            # Convert the image to remote storage
            disk_path = ("rbd:%s:mon_host=%s" % (ceph_disk_name, ceph_mon_ip))
            if ceph_client_name and ceph_client_key:
                disk_path += (":id=%s:key=%s" %
                              (ceph_auth_user, ceph_auth_key))
            rbd_cmd = (
                "rbd -m %s %s info %s 2> /dev/null|| qemu-img convert -O"
                " %s %s %s" % (ceph_mon_ip, key_opt, ceph_disk_name,
                               device_format, img_file, disk_path))
            process.run(rbd_cmd, ignore_status=False, shell=True)
            disk_src_dict = {
                "attrs": {
                    "protocol": "rbd",
                    "name": ceph_disk_name
                },
                "hosts": [{
                    "name": ceph_host_ip,
                    "port": ceph_host_port
                }]
            }
        elif backend_storage_type == "nfs":
            pool_name = params.get("pool_name", "nfs_pool")
            pool_target = params.get("pool_target", "nfs_mount")
            pool_type = params.get("pool_type", "netfs")
            nfs_server_dir = params.get("nfs_server_dir", "nfs_server")
            emulated_image = params.get("emulated_image")
            image_name = params.get("nfs_image_name", "nfs.img")
            tmp_dir = data_dir.get_tmp_dir()
            pvt = libvirt.PoolVolumeTest(test, params)
            pvt.pre_pool(pool_name, pool_type, pool_target, emulated_image)
            # Set virt_use_nfs
            virt_use_nfs = params.get("virt_use_nfs", "off")
            result = process.run("setsebool virt_use_nfs %s" % virt_use_nfs,
                                 shell=True)
            if result.exit_status:
                test.error("Failed to set virt_use_nfs value")

            nfs_mount_dir = os.path.join(tmp_dir, pool_target)
            device_source = nfs_mount_dir + image_name
            # Create one image on nfs server
            libvirt.create_local_disk("file", device_source, '1', "raw")
            disks_img.append({
                "format": device_format,
                "source": device_source,
                "path": device_source
            })
            disk_src_dict = {
                'attrs': {
                    'file': device_source,
                    'type_name': 'file'
                }
            }
        # Create dir based pool,and then create one volume on it.
        elif backend_storage_type == "dir":
            pool_name = params.get("pool_name", "dir_pool")
            pool_target = params.get("pool_target")
            pool_type = params.get("pool_type")
            emulated_image = params.get("emulated_image")
            image_name = params.get("dir_image_name", "luks_1.img")
            # Create and start dir_based pool.
            pvt = libvirt.PoolVolumeTest(test, params)
            if not os.path.exists(pool_target):
                os.mkdir(pool_target)
            pvt.pre_pool(pool_name, pool_type, pool_target, emulated_image)
            sp = libvirt_storage.StoragePool()
            if not sp.is_pool_active(pool_name):
                sp.set_pool_autostart(pool_name)
                sp.start_pool(pool_name)
            # Create one volume on the pool.
            volume_name = params.get("vol_name")
            volume_alloc = params.get("vol_alloc")
            volume_cap_unit = params.get("vol_cap_unit")
            volume_cap = params.get("vol_cap")
            volume_target_path = params.get("sec_volume")
            volume_target_format = params.get("target_format")
            volume_target_encypt = params.get("target_encypt", "")
            volume_target_label = params.get("target_label")
            vol_params = {
                "name": volume_name,
                "capacity": int(volume_cap),
                "allocation": int(volume_alloc),
                "format": volume_target_format,
                "path": volume_target_path,
                "label": volume_target_label,
                "capacity_unit": volume_cap_unit
            }
            try:
                # If Libvirt version is lower than 2.5.0
                # Creating luks encryption volume is not supported,so skip it.
                create_vol(pool_name, vol_params)
            except AssertionError as info:
                err_msgs = ("create: invalid option")
                if str(info).count(err_msgs):
                    test.cancel("Creating luks encryption volume "
                                "is not supported on this libvirt version")
                else:
                    test.error("Failed to create volume."
                               "Error: %s" % str(info))
            disk_src_dict = {'attrs': {'file': volume_target_path}}
            device_source = volume_target_path
        elif backend_storage_type == "nbd":
            # Get server hostname.
            hostname = process.run('hostname',
                                   ignore_status=False,
                                   shell=True,
                                   verbose=True).stdout_text.strip()
            # Setup backend storage
            nbd_server_host = hostname
            nbd_server_port = params.get("nbd_server_port")
            image_path = params.get("emulated_image",
                                    "/var/lib/libvirt/images/nbdtest.img")
            # Create NbdExport object
            nbd = NbdExport(image_path,
                            image_format=device_format,
                            port=nbd_server_port)
            nbd.start_nbd_server()
            # Prepare disk source xml
            source_attrs_dict = {"protocol": "nbd"}
            disk_src_dict = {}
            disk_src_dict.update({"attrs": source_attrs_dict})
            disk_src_dict.update({
                "hosts": [{
                    "name": nbd_server_host,
                    "port": nbd_server_port
                }]
            })
            device_source = "nbd://%s:%s/%s" % (nbd_server_host,
                                                nbd_server_port, image_path)

        logging.debug("device source is: %s", device_source)

        # Add disk xml.
        vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
        disk_xml = Disk(type_name=device_type)
        disk_xml.device = device
        disk_xml.target = {"dev": device_target, "bus": device_bus}
        driver_dict = {"name": "qemu", "type": device_format}
        disk_xml.driver = driver_dict
        disk_source = disk_xml.new_disk_source(**disk_src_dict)
        if disk_auth_dict:
            logging.debug("disk auth dict is: %s" % disk_auth_dict)
            disk_xml.auth = disk_xml.new_auth(**disk_auth_dict)
        if disk_encryption_dict:
            disk_encryption_dict = {
                "encryption": "luks",
                "secret": {
                    "type": "passphrase",
                    "uuid": luks_sec_uuid
                }
            }
            disk_encryption = disk_xml.new_encryption(**disk_encryption_dict)

            disk_xml.encryption = disk_encryption
        disk_xml.source = disk_source
        logging.debug("new disk xml is: %s", disk_xml)
        # Sync VM xml except mirror_mode_blockcommit or mirror_mode_blockcopy
        if (not mirror_mode_blockcommit and not mirror_mode_blockcopy):
            vmxml.add_device(disk_xml)
        try:
            vmxml.sync()
            vm.start()
            vm.wait_for_login().close()
        except xcepts.LibvirtXMLError as xml_error:
            if not define_error:
                test.fail("Failed to define VM:\n%s", str(xml_error))
        except virt_vm.VMStartError as details:
            # When use wrong password in disk xml for cold plug cases,
            # VM cannot be started
            if status_error:
                logging.info("VM failed to start as expected: %s",
                             str(details))
            else:
                test.fail("VM should start but failed: %s" % str(details))
        func_name = trigger_block_threshold_event
        # Additional operations before set block threshold
        if backend_storage_type == "file":
            logging.info("Create snapshot...")
            snap_opt = " %s --disk-only "
            snap_opt += "%s,snapshot=external,file=%s"
            if default_snapshot_test:
                for index in range(1, 5):
                    snapshot_name = "snapshot_%s" % index
                    snap_path = "%s/%s_%s.snap" % (tmp_dir, vm_name, index)
                    snapshot_external_disks.append(snap_path)
                    snap_option = snap_opt % (snapshot_name, device_target,
                                              snap_path)
                    virsh.snapshot_create_as(vm_name,
                                             snap_option,
                                             ignore_status=False,
                                             debug=True)

            if mirror_mode_blockcommit:
                if not libvirt_version.version_compare(6, 6, 0):
                    test.cancel(
                        "Set threshold for disk mirroring feature is not supported on current version"
                    )
                vmxml.del_device(disk_xml)
                virsh.snapshot_create_as(vm_name,
                                         "--disk-only --no-metadata",
                                         ignore_status=False,
                                         debug=True)
                # Do active blockcommit in background.
                blockcommit_options = "--active"
                mirror_blockcommit_thread = threading.Thread(
                    target=trigger_block_commit,
                    args=(
                        vm_name,
                        'vda',
                        blockcommit_options,
                    ),
                    kwargs={'debug': True})
                mirror_blockcommit_thread.start()
                device_target = "vda[1]"
                func_name = trigger_mirror_threshold_event
            if mirror_mode_blockcopy:
                if not libvirt_version.version_compare(6, 6, 0):
                    test.cancel(
                        "Set threshold for disk mirroring feature is not supported on current version"
                    )
                # Do transient blockcopy in background.
                blockcopy_options = "--transient-job "
                # Do cleanup
                if os.path.exists(dest_path):
                    libvirt.delete_local_disk("file", dest_path)
                mirror_blockcopy_thread = threading.Thread(
                    target=trigger_block_copy,
                    args=(
                        vm_name,
                        'vda',
                        dest_path,
                        blockcopy_options,
                    ),
                    kwargs={'debug': True})
                mirror_blockcopy_thread.start()
                mirror_blockcopy_thread.join(10)
                device_target = "vda[%d]" % get_mirror_source_index(vm_name)
                func_name = trigger_mirror_threshold_event
            if threshold_index_event_once:
                # Use dev[index] to enable set domain block threshold with DEV[INDEX] use case
                device_target = params.get("dev_target_index", "vdb[1]")
        set_vm_block_domblkthreshold(vm_name, device_target,
                                     block_threshold_value, **{"debug": True})
        if threshold_index_event_once:
            # Restore device_target to original value since events trigger use device target, rather than DEV[INDEX]
            device_target = params.get("virt_disk_device_target", "vdd")
        cli_thread = threading.Thread(target=func_name,
                                      args=(vm, device_target))
        cli_thread.start()
        check_threshold_event(vm_name, event_type, block_threshold_timeout,
                              block_threshold_option, **{"debug": True})
    finally:
        # Delete snapshots.
        if virsh.domain_exists(vm_name):
            #To delete snapshot, destroy VM first.
            if vm.is_alive():
                vm.destroy()
            libvirt.clean_up_snapshots(vm_name, domxml=vmxml_backup)

        vmxml_backup.sync("--snapshots-metadata")

        if os.path.exists(img_file):
            libvirt.delete_local_disk("file", img_file)
        for img in disks_img:
            if os.path.exists(img["path"]):
                libvirt.delete_local_disk("file", img["path"])

        for disk in snapshot_external_disks:
            libvirt.delete_local_disk('file', disk)

        if os.path.exists(dest_path):
            libvirt.delete_local_disk("file", dest_path)

        # Clean up backend storage
        if backend_storage_type == "iscsi":
            libvirt.setup_or_cleanup_iscsi(is_setup=False)
        elif backend_storage_type == "gluster":
            gluster.setup_or_cleanup_gluster(is_setup=False,
                                             vol_name=gluster_vol_name,
                                             pool_name=gluster_pool_name,
                                             **params)
        elif backend_storage_type == "ceph":
            # Remove ceph configure file if created.
            if ceph_cfg:
                os.remove(ceph_cfg)
            cmd = ("rbd -m {0} {1} info {2} && rbd -m {0} {1} rm "
                   "{2}".format(ceph_mon_ip, key_opt, ceph_disk_name))
            cmd_result = process.run(cmd, ignore_status=True, shell=True)
            logging.debug("result of rbd removal: %s", cmd_result)
            if os.path.exists(key_file):
                os.remove(key_file)
        elif backend_storage_type == "nfs":
            result = process.run("setsebool virt_use_nfs off", shell=True)
            if result.exit_status:
                logging.info("Failed to restore virt_use_nfs value")
        elif backend_storage_type == "nbd":
            if nbd:
                try:
                    nbd.cleanup()
                except Exception as ndbEx:
                    logging.info("Clean Up nbd failed: %s" % str(ndbEx))
        # Clean up secrets
        if auth_sec_uuid:
            virsh.secret_undefine(auth_sec_uuid)
        if luks_sec_uuid:
            virsh.secret_undefine(luks_sec_uuid)

        # Clean up pools
        if pvt:
            pvt.cleanup_pool(pool_name, pool_type, pool_target, emulated_image)