Exemplo n.º 1
0
def reset_pci_num(vm_name, num=15):
    """
    Reset the number of guest pci, add 15 by default

    :param vm_name: VM name
    :param num: The number of expected pci
    """
    vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
    # This func only works on aarch64 and x86/q35 machine
    if 'aarch64' in vmxml.os.arch \
            or 'q35' in vmxml.os.machine:
        # Default pcie setting
        pcie_root_port = {
            'controller_model': 'pcie-root-port',
            'controller_type': 'pci'
        }
        ret_indexes = get_max_contr_indexes(vmxml, 'pci', 'pcie-root-port')
        pcie_to_pci_brg_indexes = get_max_contr_indexes(
            vmxml, 'pci', 'pcie-to-pci-bridge')
        cur_pci_num = ret_indexes[0] if not pcie_to_pci_brg_indexes else \
            max(ret_indexes[0], pcie_to_pci_brg_indexes[0])
        LOG.debug("The current maximum PCI controller index is %d",
                  cur_pci_num)
        if cur_pci_num < num:
            for i in list(range(cur_pci_num + 1, num)):
                pcie_root_port.update({'controller_index': "%d" % i})
                vmxml.add_device(libvirt.create_controller_xml(pcie_root_port))
        else:
            LOG.info("Current pci number is greater than expected")

    # synchronize XML
    vmxml.sync()
def add_pcie_controller(vm_name):
    """
    Add pcie-to-pci-bridge controller if not exists in vm

    :param vm_name: name of vm
    """
    vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    pci_controllers = vmxml.get_controllers('pci')
    for controller in pci_controllers:
        if controller.get('model') == 'pcie-to-pci-bridge':
            break
    else:
        contr_dict = {
            'controller_type': 'pci',
            'controller_model': 'pcie-to-pci-bridge'
        }
        libvirt.create_controller_xml(contr_dict, "add_controller", vm_name)
def get_free_root_port(vm_name):
    """
    Get a free root port for rng device

    :param vm_name: The name of the vm to be performed
    :return: The bus index of free root port
    """
    root_ports = set()
    other_ports = set()
    used_slot = set()
    # Record the bus indexes for all pci controllers
    vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    pci_controllers = vmxml.get_controllers('pci')
    for controller in pci_controllers:
        if controller.get('model') == 'pcie-root-port':
            root_ports.add(controller.get('index'))
        else:
            other_ports.add(controller.get('index'))
    # Record the addresses being allocated for all pci devices
    pci_devices = vmxml.xmltreefile.find('devices').getchildren()
    for dev in pci_devices:
        address = dev.find('address')
        if address is not None:
            used_slot.add(address.get('bus'))
    # Find the bus address unused
    for bus_index in root_ports:
        bus = "%0#4x" % int(bus_index)
        if bus not in used_slot:
            return bus
    # Add a new pcie-root-port if no free one
    for index in range(1, 30):
        if index not in (root_ports | other_ports):
            contr_dict = {
                'controller_type': 'pci',
                'controller_index': index,
                'controller_model': 'pcie-root-port'
            }
            libvirt.create_controller_xml(contr_dict, "add_controller",
                                          vm_name)
            return "%0#4x" % int(index)
    return None
Exemplo n.º 4
0
 def attach_controller():  # pylint: disable=W0611
     """
     Sub test for attach controller
     """
     v_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
     contr_index = len(v_xml.get_controllers('scsi'))
     contr_type = params.get("controller_type", 'scsi')
     contr_model = params.get("controller_model", "virtio-scsi")
     contr_dict = {
         'controller_type': contr_type,
         'controller_model': contr_model,
         'controller_index': contr_index
     }
     if pci_bridge_index:
         slot = get_free_slot(pci_bridge_index, v_xml)
         addr = '{"bus": %s, "slot": %s}' % (pci_bridge_index, slot)
         contr_dict.update({'controller_addr': addr})
     xml = libvirt.create_controller_xml(contr_dict=contr_dict)
     attach(xml, params['controller_model'])
     xml = libvirt.create_controller_xml(contr_dict=contr_dict)
     detach(xml, params['controller_model'])
Exemplo n.º 5
0
def create_ccw_addr_controller(params):
    """
    Create one ccw address controller device

    :param params: dict wrapped with params
    """

    contr_dict = {'controller_type': 'scsi', 'controller_index': '10'}

    ccw_addr_controller = libvirt.create_controller_xml(contr_dict)

    addr_dict = eval(params.get("addr_attrs"))
    ccw_addr_controller.address = ccw_addr_controller.new_controller_address(
        **{"attrs": addr_dict})
    logging.debug("create_ccw_addr_controller xml: %s", ccw_addr_controller)
    return ccw_addr_controller
Exemplo n.º 6
0
def get_pci_bridge_index(vm_name):
    """
    Get the index of usable pci bridge, add one if there is not

    :param vm_name: The name of the vm to be performed
    :return: The index of the pci bridge
    """
    vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    pci_controllers = vmxml.get_controllers('pci')
    for controller in pci_controllers:
        if controller.get('model') == 'pcie-to-pci-bridge':
            pci_bridge = controller
            break
    else:
        contr_dict = {'controller_type': 'pci',
                      'controller_model': 'pcie-to-pci-bridge'}
        pci_bridge = libvirt.create_controller_xml(contr_dict)
        libvirt.add_controller(vm_name, pci_bridge)
    return '%0#4x' % int(pci_bridge.get("index"))
Exemplo n.º 7
0
def run(test, params, env):
    """
    Test virsh {at|de}tach-device command.

    1) Prepare test environment and its parameters
    2) Operate virsh on one or more devices
    3) Check functionality of each device
    4) Check functionality of mmconfig option
    5) Restore domain
    6) Handle results
    """
    vm_name = params.get('main_vm')
    machine_type = params.get("machine_type", "pc")
    backup_vm_xml = vmxml = VMXML.new_from_inactive_dumpxml(vm_name)

    dev_obj = params.get("vadu_dev_objs")
    vadu_vdb = int(params.get("vadu_dev_obj_count_VirtualDiskBasic", "0"))
    vadu_dom_ref = params.get("vadu_dom_ref", "dom_ref")
    status_error = "yes" == params.get("status_error", "no")
    vadu_domain_positional = "yes" == params.get("vadu_domain_positional",
                                                 "no")
    vadu_file_positional = "yes" == params.get("vadu_file_positional", "no")
    vadu_preboot_error = "yes" == params.get("vadu_preboot_function_error",
                                             "no")

    # Skip chardev hotplug on rhel6 host as it is not supported
    if "Serial" in dev_obj:
        if not libvirt_version.version_compare(1, 1, 0):
            test.cancel("You libvirt version not supported"
                        " attach/detach Serial devices")
    # Prepare test environment and its parameters
    test_params = TestParams(params, env, test)

    xml_machine = vmxml.os.machine
    # Only limited q35 with machine_type set correctly under block type condition
    if 'q35' in xml_machine and machine_type == 'q35' and "VirtualDiskBasic" in dev_obj:
        # Only apply change on some cases with feature:
        # block.multi_virtio_file..normal_test.hot_attach_hot_vm..name_ref.file_positional.domain_positional
        # those cases often throw No more available PCI slots
        if vadu_vdb == 16 and not status_error \
            and not vadu_preboot_error and 'name' in vadu_dom_ref \
                and vadu_file_positional and vadu_domain_positional:

            previous_state_running = test_params.main_vm.is_alive()
            if previous_state_running:
                test_params.main_vm.destroy(gracefully=True)
            vmxml.remove_all_device_by_type('controller')
            machine_list = vmxml.os.machine.split("-")
            vmxml.set_os_attrs(
                **{"machine": machine_list[0] + "-q35-" + machine_list[2]})
            q35_pcie_dict0 = {
                'controller_model': 'pcie-root',
                'controller_type': 'pci',
                'controller_index': 0
            }
            q35_pcie_dict1 = {
                'controller_model': 'pcie-root-port',
                'controller_type': 'pci'
            }
            vmxml.add_device(libvirt.create_controller_xml(q35_pcie_dict0))
            # Add enough controllers to match max times disk attaching requirements
            for i in list(range(1, 24)):
                q35_pcie_dict1.update({'controller_index': "%d" % i})
                vmxml.add_device(libvirt.create_controller_xml(q35_pcie_dict1))
            vmxml.sync()
            logging.debug("Guest XMl with adding many controllers: %s",
                          test_params.main_vm.get_xml())
            if previous_state_running:
                test_params.main_vm.start()

    remove_non_disks(vm_name, vmxml)
    update_controllers_ppc(vm_name, vmxml)

    if params.get("remove_all_chardev", "no") == "yes":
        remove_chardevs(vm_name, vmxml)

    logging.info("Preparing initial VM state")

    if test_params.start_vm:
        # Make sure VM is working
        test_params.main_vm.verify_alive()
        test_params.main_vm.wait_for_login().close()
    else:  # VM not suppose to be started
        if test_params.main_vm.is_alive():
            test_params.main_vm.destroy(gracefully=True)
    # Capture backup of original XML early in test
    test_params.vmxml = VMXML.new_from_inactive_dumpxml(
        test_params.main_vm.name)
    # All devices should share same access state
    test_params.virsh = virsh.Virsh(ignore_status=True)
    logging.info("Creating %d test device instances", len(test_params.devs))
    # Create test objects from cfg. class names via subclasses above
    test_devices = [
        globals()[class_name](test_params, test)  # instantiate
        for class_name in test_params.devs
    ]  # vadu_dev_objs
    operational_results = []
    preboot_results = []
    pstboot_results = []
    try:
        operational_action(test_params, test_devices, operational_results)
        # Fail early if attach-device return value is not expected
        analyze_results(test_params,
                        test,
                        operational_results=operational_results)

        #  Can't do functional testing with a cold VM, only test hot-attach
        preboot_action(test_params, test_devices, preboot_results)

        logging.info(
            "Preparing test VM state for post-boot functional testing")
        if test_params.start_vm:
            # Hard-reboot required
            test_params.main_vm.destroy(gracefully=True,
                                        free_mac_addresses=False)
        try:
            logging.debug("vmxml %s", VMXML.new_from_inactive_dumpxml(vm_name))
            test_params.main_vm.start()
        except virt_vm.VMStartError as details:
            test.fail('VM Failed to start for some reason!: %s' % details)
        # Signal devices reboot is finished
        for test_device in test_devices:
            test_device.booted = True
        logging.debug("Current VMXML %s", test_params.main_vm.get_xml())
        test_params.main_vm.wait_for_login().close()
        postboot_action(test_params, test_devices, pstboot_results)
        analyze_results(test_params,
                        test,
                        preboot_results=preboot_results,
                        pstboot_results=pstboot_results)
    finally:
        logging.info("Restoring VM from backup, then checking results")
        test_params.main_vm.destroy(gracefully=False, free_mac_addresses=False)
        test_params.vmxml.undefine()
        test_params.vmxml.restore()  # Recover the original XML
        test_params.vmxml.define()
        if not test_params.start_vm:
            # Test began with not start_vm, shut it down.
            test_params.main_vm.destroy(gracefully=True)
        # Device cleanup can raise multiple exceptions, do it last:
        logging.info("Cleaning up test devices")
        try:
            test_params.cleanup(test_devices)
        except RuntimeError as e:
            logging.debug("Error cleaning up devices: %s", e)
        backup_vm_xml.sync()
Exemplo n.º 8
0
def run(test, params, env):
    """
    Attach/Detach an iscsi network/volume disk to domain

    1. For secret usage testing:
        1.1. Setup an iscsi target with CHAP authentication.
        1.2. Define a secret for iscsi target usage
        1.3. Set secret value
    2. Create
    4. Create an iscsi network disk XML
    5. Attach disk with the XML file and check the disk inside the VM
    6. Detach the disk
    """
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)

    disk_device = params.get("disk_device", "disk")
    disk_type = params.get("disk_type", "network")
    disk_src_protocol = params.get("disk_source_protocol", "iscsi")
    disk_src_host = params.get("disk_source_host", "127.0.0.1")
    disk_src_port = params.get("disk_source_port", "3260")
    disk_src_pool = params.get("disk_source_pool")
    disk_src_mode = params.get("disk_source_mode", "host")
    pool_type = params.get("pool_type", "iscsi")
    pool_src_host = params.get("pool_source_host", "127.0.0.1")
    pool_target = params.get("pool_target", "/dev/disk/by-path")
    disk_target = params.get("disk_target", "vdb")
    disk_target_bus = params.get("disk_target_bus", "virtio")
    disk_readonly = params.get("disk_readonly", "no")
    chap_auth = "yes" == params.get("chap_auth", "no")
    chap_user = params.get("chap_username", "")
    chap_passwd = params.get("chap_password", "")
    secret_usage_target = params.get("secret_usage_target")
    secret_ephemeral = params.get("secret_ephemeral", "no")
    secret_private = params.get("secret_private", "yes")
    status_error = "yes" == params.get("status_error", "no")
    vg_name = params.get("virt_disk_vg_name", "vg_test_0")
    lv_name = params.get("virt_disk_lv_name", "lv_test_0")
    driver_packed = params.get("driver_packed", "on")
    disk_packed = "yes" == params.get("disk_packed", "no")
    scsi_packed = "yes" == params.get("scsi_packed", "no")

    # Indicate the PPC platform
    on_ppc = False
    if platform.platform().count('ppc64'):
        on_ppc = True

    if disk_src_protocol == 'iscsi':
        if not libvirt_version.version_compare(1, 0, 4):
            test.cancel("'iscsi' disk doesn't support in"
                        " current libvirt version.")
    if disk_type == "volume":
        if not libvirt_version.version_compare(1, 0, 5):
            test.cancel("'volume' type disk doesn't support in"
                        " current libvirt version.")
    if pool_type == "iscsi-direct":
        if not libvirt_version.version_compare(4, 7, 0):
            test.cancel("iscsi-direct pool is not supported in"
                        " current libvirt version.")
    if ((disk_packed or scsi_packed)
            and not libvirt_version.version_compare(6, 3, 0)):
        test.cancel("The virtio packed attribute is not supported in"
                    " current libvirt version.")
    # Back VM XML
    vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)

    # Fix no more PCI slots issue in certain cases.
    vm_dump_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    machine_type = params.get("machine_type", "pc")
    if machine_type == 'q35':
        vm_dump_xml.remove_all_device_by_type('controller')
        machine_list = vm_dump_xml.os.machine.split("-")
        vm_dump_xml.set_os_attrs(
            **{"machine": machine_list[0] + "-q35-" + machine_list[2]})
        q35_pcie_dict0 = {
            'controller_model': 'pcie-root',
            'controller_type': 'pci',
            'controller_index': 0
        }
        q35_pcie_dict1 = {
            'controller_model': 'pcie-root-port',
            'controller_type': 'pci'
        }
        vm_dump_xml.add_device(libvirt.create_controller_xml(q35_pcie_dict0))
        # Add enough controllers to match multiple times disk attaching requirements
        for i in list(range(1, 12)):
            q35_pcie_dict1.update({'controller_index': "%d" % i})
            vm_dump_xml.add_device(
                libvirt.create_controller_xml(q35_pcie_dict1))
        vm_dump_xml.sync()

    virsh_dargs = {'debug': True, 'ignore_status': True}
    try:
        start_vm = "yes" == params.get("start_vm", "yes")
        if start_vm:
            if vm.is_dead():
                vm.start()
            vm.wait_for_login()
        else:
            if not vm.is_dead():
                vm.destroy()

        if chap_auth:
            # Create a secret xml to define it
            secret_xml = SecretXML(secret_ephemeral, secret_private)
            secret_xml.auth_type = "chap"
            secret_xml.auth_username = chap_user
            secret_xml.usage = disk_src_protocol
            secret_xml.target = secret_usage_target
            with open(secret_xml.xml) as f:
                logging.debug("Define secret by XML: %s", f.read())
            # Define secret
            cmd_result = virsh.secret_define(secret_xml.xml, **virsh_dargs)
            libvirt.check_exit_status(cmd_result)
            # Get secret uuid
            try:
                secret_uuid = cmd_result.stdout.strip().split()[1]
            except IndexError:
                test.error("Fail to get new created secret uuid")

            # Set secret value
            encoding = locale.getpreferredencoding()
            secret_string = base64.b64encode(
                chap_passwd.encode(encoding)).decode(encoding)
            cmd_result = virsh.secret_set_value(secret_uuid, secret_string,
                                                **virsh_dargs)
            libvirt.check_exit_status(cmd_result)
        else:
            # Set chap_user and chap_passwd to empty to avoid setup
            # CHAP authentication when export iscsi target
            chap_user = ""
            chap_passwd = ""

        # Setup iscsi target
        if disk_type == "block":
            iscsi_target = libvirt.setup_or_cleanup_iscsi(
                is_setup=True,
                is_login=True,
                image_size="1G",
                chap_user=chap_user,
                chap_passwd=chap_passwd,
                portal_ip=disk_src_host)
        else:
            iscsi_target, lun_num = libvirt.setup_or_cleanup_iscsi(
                is_setup=True,
                is_login=False,
                image_size='1G',
                chap_user=chap_user,
                chap_passwd=chap_passwd,
                portal_ip=disk_src_host)
        # Create iscsi pool
        if disk_type == "volume":
            # Create an iscsi pool xml to create it
            pool_src_xml = pool_xml.SourceXML()
            pool_src_xml.host_name = pool_src_host
            pool_src_xml.device_path = iscsi_target
            poolxml = pool_xml.PoolXML(pool_type=pool_type)
            poolxml.name = disk_src_pool
            poolxml.set_source(pool_src_xml)
            poolxml.target_path = pool_target
            if chap_auth:
                pool_src_xml.auth_type = "chap"
                pool_src_xml.auth_username = chap_user
                pool_src_xml.secret_usage = secret_usage_target
                poolxml.set_source(pool_src_xml)
            if pool_type == "iscsi-direct":
                iscsi_initiator = params.get('iscsi_initiator')
                pool_src_xml.iqn_name = iscsi_initiator
                poolxml.set_source(pool_src_xml)
            # Create iscsi/iscsi-direct pool
            cmd_result = virsh.pool_create(poolxml.xml, **virsh_dargs)
            libvirt.check_exit_status(cmd_result)
            xml = virsh.pool_dumpxml(disk_src_pool)
            logging.debug("Pool '%s' XML:\n%s", disk_src_pool, xml)

            def get_vol():
                """Get the volume info"""
                # Refresh the pool
                cmd_result = virsh.pool_refresh(disk_src_pool)
                libvirt.check_exit_status(cmd_result)
                # Get volume name
                cmd_result = virsh.vol_list(disk_src_pool, **virsh_dargs)
                libvirt.check_exit_status(cmd_result)
                vol_list = []
                vol_list = re.findall(r"(\S+)\ +(\S+)",
                                      str(cmd_result.stdout.strip()))
                if len(vol_list) > 1:
                    return vol_list[1]
                else:
                    return None

            # Wait for a while so that we can get the volume info
            vol_info = utils_misc.wait_for(get_vol, 10)
            if vol_info:
                vol_name, vol_path = vol_info
            else:
                test.error("Failed to get volume info")
            # Snapshot doesn't support raw disk format, create a qcow2 volume
            # disk for snapshot operation.
            if pool_type == "iscsi":
                process.run('qemu-img create -f qcow2 %s %s' %
                            (vol_path, '100M'),
                            shell=True,
                            verbose=True)
            else:
                # Get iscsi URL to create a qcow2 volume disk
                disk_path = ("iscsi://[%s]/%s/%s" %
                             (disk_src_host, iscsi_target, lun_num))
                blk_source = "/mnt/test.qcow2"
                process.run('qemu-img create -f qcow2 %s %s' %
                            (blk_source, '100M'),
                            shell=True,
                            verbose=True)
                process.run('qemu-img convert -O qcow2 %s %s' %
                            (blk_source, disk_path),
                            shell=True,
                            verbose=True)

        # Create block device
        if disk_type == "block":
            logging.debug("iscsi dev name: %s", iscsi_target)
            lv_utils.vg_create(vg_name, iscsi_target)
            device_source = libvirt.create_local_disk("lvm",
                                                      size="10M",
                                                      vgname=vg_name,
                                                      lvname=lv_name)
            logging.debug("New created volume: %s", lv_name)

        # Create iscsi network disk XML
        disk_params = {
            'device_type': disk_device,
            'type_name': disk_type,
            'target_dev': disk_target,
            'target_bus': disk_target_bus,
            'readonly': disk_readonly
        }
        disk_params_src = {}
        if disk_type == "network":
            disk_params_src = {
                'source_protocol': disk_src_protocol,
                'source_name': iscsi_target + "/%s" % lun_num,
                'source_host_name': disk_src_host,
                'source_host_port': disk_src_port
            }
        elif disk_type == "volume":
            if pool_type == "iscsi":
                disk_params_src = {
                    'source_pool': disk_src_pool,
                    'source_volume': vol_name,
                    'driver_type': 'qcow2',
                    'source_mode': disk_src_mode
                }
            # iscsi-direct pool don't include source_mode option
            else:
                disk_params_src = {
                    'source_pool': disk_src_pool,
                    'source_volume': vol_name,
                    'driver_type': 'qcow2'
                }
        elif disk_type == "block":
            disk_params_src = {
                'source_file': device_source,
                'driver_type': 'raw'
            }
            # Start guest with packed attribute in disk
            if disk_packed:
                disk_params_src['driver_packed'] = driver_packed
            # Start guest with packed attribute in scsi controller
            if scsi_packed:
                scsi_controller = Controller("controller")
                scsi_controller.type = "scsi"
                scsi_controller.model = "virtio-scsi"
                scsi_controller.driver = {'packed': driver_packed}
                vm_dump_xml.add_device(scsi_controller)
                vm_dump_xml.sync()
        else:
            test.cancel("Unsupported disk type in this test")
        disk_params.update(disk_params_src)
        if chap_auth and disk_type != "volume":
            disk_params_auth = {
                'auth_user': chap_user,
                'secret_type': disk_src_protocol,
                'secret_usage': secret_xml.target
            }
            disk_params.update(disk_params_auth)
        disk_xml = libvirt.create_disk_xml(disk_params)
        attach_option = params.get("attach_option", "")
        cmd_result = virsh.attach_device(domainarg=vm_name,
                                         filearg=disk_xml,
                                         flagstr=attach_option,
                                         dargs=virsh_dargs)
        libvirt.check_exit_status(cmd_result, status_error)

        if vm.is_dead():
            cmd_result = virsh.start(vm_name, **virsh_dargs)
            libvirt.check_exit_status(cmd_result)

        # Wait for domain is stable
        vm.wait_for_login().close()
        domain_operation = params.get("domain_operation", "")
        if domain_operation == "save":
            save_file = os.path.join(data_dir.get_tmp_dir(), "vm.save")
            cmd_result = virsh.save(vm_name, save_file, **virsh_dargs)
            libvirt.check_exit_status(cmd_result)
            cmd_result = virsh.restore(save_file)
            libvirt.check_exit_status(cmd_result)
            if os.path.exists(save_file):
                os.remove(save_file)
        elif domain_operation == "snapshot":
            # Run snapshot related commands: snapshot-create-as, snapshot-list
            # snapshot-info, snapshot-dumpxml, snapshot-create
            # virsh snapshot-revert is not supported on combined internal and external snapshots
            # see more details from,https://bugzilla.redhat.com/show_bug.cgi?id=1733173
            snapshot_name1 = "snap1"
            snapshot_name2 = "snap2"
            cmd_result = virsh.snapshot_create_as(vm_name, snapshot_name1,
                                                  **virsh_dargs)
            libvirt.check_exit_status(cmd_result)

            try:
                virsh.snapshot_list(vm_name, **virsh_dargs)
            except process.CmdError:
                test.fail("Failed getting snapshots list for %s" % vm_name)

            try:
                virsh.snapshot_info(vm_name, snapshot_name1, **virsh_dargs)
            except process.CmdError:
                test.fail("Failed getting snapshots info for %s" % vm_name)

            cmd_result = virsh.snapshot_dumpxml(vm_name, snapshot_name1,
                                                **virsh_dargs)
            libvirt.check_exit_status(cmd_result)

            cmd_result = virsh.snapshot_create(vm_name, **virsh_dargs)
            libvirt.check_exit_status(cmd_result)

            cmd_result = virsh.snapshot_current(vm_name, **virsh_dargs)
            libvirt.check_exit_status(cmd_result)

            virsh.snapshot_create_as(vm_name,
                                     snapshot_name2,
                                     ignore_status=False,
                                     debug=True)

            cmd_result = virsh.snapshot_revert(vm_name, snapshot_name1,
                                               **virsh_dargs)

            cmd_result = virsh.snapshot_list(vm_name, **virsh_dargs)
            if snapshot_name2 not in cmd_result:
                test.error("Snapshot %s not found" % snapshot_name2)
        elif domain_operation == "start_with_packed":
            expect_xml_line = "packed=\"%s\"" % driver_packed
            libvirt.check_dumpxml(vm, expect_xml_line)
            expect_qemu_line = "packed=%s" % driver_packed
            libvirt.check_qemu_cmd_line(expect_qemu_line)
        elif domain_operation == "":
            logging.debug("No domain operation provided, so skip it")
        else:
            logging.error("Unsupported operation %s in this case, so skip it",
                          domain_operation)

        def find_attach_disk(expect=True):
            """
            Find attached disk inside the VM
            """
            found_disk = False
            if vm.is_dead():
                test.error("Domain %s is not running" % vm_name)
            else:
                try:
                    session = vm.wait_for_login()
                    # Here the script needs wait for a while for the guest to
                    # recognize the hotplugged disk on PPC
                    if on_ppc:
                        time.sleep(10)
                    cmd = "grep %s /proc/partitions" % disk_target
                    s, o = session.cmd_status_output(cmd)
                    logging.info("%s output: %s", cmd, o)
                    session.close()
                    if s == 0:
                        found_disk = True
                except (LoginError, VMError, ShellError) as e:
                    logging.error(str(e))
            if found_disk == expect:
                logging.debug("Check disk inside the VM PASS as expected")
            else:
                test.error("Check disk inside the VM FAIL")

        # Check disk inside the VM, expect is False if status_error=True
        find_attach_disk(not status_error)

        # Detach disk
        cmd_result = virsh.detach_disk(vm_name,
                                       disk_target,
                                       wait_remove_event=True)
        libvirt.check_exit_status(cmd_result, status_error)

        # Check disk inside the VM
        find_attach_disk(False)

    finally:
        # Clean up snapshot
        # Shut down before cleaning up snapshots
        if vm.is_alive():
            vm.destroy()
        libvirt.clean_up_snapshots(vm_name, domxml=vmxml_backup)
        # Restore vm
        vmxml_backup.sync("--snapshots-metadata")
        # Destroy pool and undefine secret, which may not exist
        try:
            if disk_type == "volume":
                virsh.pool_destroy(disk_src_pool)
            if disk_type == "block":
                clean_up_lvm(iscsi_target, vg_name, lv_name)
            if chap_auth:
                virsh.secret_undefine(secret_uuid)
        except Exception:
            pass
        libvirt.setup_or_cleanup_iscsi(is_setup=False)
Exemplo n.º 9
0
def run(test, params, env):
    """
    Test for basic controller device function.

    1) Define the VM w/o specified controller device and check result meets
       expectation.
    2) Start the guest and check if start result meets expectation
    3) Test the function of started controller device
    4) Shutdown the VM and clean up environment
    """

    def setup_os_xml():
        """
        Prepare os part of VM XML.

        """
        osxml = vm_xml.os
        orig_machine = osxml.machine
        # avocado-vt only use virt machine type on aarch64
        if platform.machine() == 'aarch64':
            osxml.machine = 'virt'
            return

        if os_machine:
            osxml.machine = os_machine
            vm_xml.os = osxml
        else:
            cur_machine = orig_machine

    def setup_controller_xml(index, addr_target=None):
        """
        Prepare controller devices of VM XML.

        :param index: The index of controller
        :param addr_target: The controller address

        """
        ctrl = Controller(type_name=cntlr_type)
        if model:
            ctrl.model = model
        if pcihole:
            ctrl.pcihole64 = pcihole
        if vectors:
            ctrl.vectors = vectors
        if index:
            ctrl.index = index
        if chassisNr:
            ctrl.target = {'chassisNr': chassisNr}
        if model_name:
            ctrl.model_name = {'name': model_name}

        if addr_target:
            match = re.match(r"(?P<bus>[0-9]*):(?P<slot>[0-9a-f]*).(?P<function>[0-9])", addr_target)
            if match:
                addr_dict = match.groupdict()
                addr_dict['bus'] = hex(int(addr_dict['bus'], 16))
                addr_dict['slot'] = hex(int(addr_dict['slot'], 16))
                addr_dict['function'] = hex(int(addr_dict['function'], 16))
                addr_dict['domain'] = '0x0000'
                ctrl.address = ctrl.new_controller_address(attrs=addr_dict)

        logging.debug("Controller XML is:%s", ctrl)
        vm_xml.add_device(ctrl)

        if cmpnn_cntlr_model is not None:
            for num in range(int(cmpnn_cntlr_num)):
                ctrl = Controller(type_name=cntlr_type)
                ctrl.model = cmpnn_cntlr_model + str(num+1)
                ctrl.index = index
                logging.debug("Controller XML is:%s", ctrl)
                vm_xml.add_device(ctrl)

    def define_and_check(guest_xml):
        """
        Define the guest and check the result.

        :param guest_xml: The guest VMXML instance
        """
        fail_patts = []
        if expect_err_msg:
            fail_patts.append(expect_err_msg)
        guest_xml.undefine()
        res = vm_xml.virsh.define(guest_xml.xml)
        logging.debug("Expect failures: %s", fail_patts)
        libvirt.check_result(res, expected_fails=fail_patts)
        return not res.exit_status

    def start_and_check():
        """
        Predict the error message when starting and try to start the guest.
        """
        fail_patts = []
        if expect_err_msg:
            fail_patts.append(expect_err_msg)
        res = virsh.start(vm_name)
        logging.debug("Expect failures: %s", fail_patts)
        libvirt.check_result(res, expected_fails=fail_patts)
        return not res.exit_status

    def prepare_qemu_pattern(elem):
        """
        Collect the patterns to be searched in qemu command line.

        :param elem: a Controller object

        :return: A list including search patterns
        """
        search_qemu_cmd = []

        bus = int(elem.address.attrs.get('bus'), 0)
        slot = int(elem.address.attrs.get('slot'), 0)
        func = int(elem.address.attrs.get('function'), 0)
        addr_str = '%02d:%02d.%1d' % (bus, slot, func)
        name = elem.alias.get('name')
        if elem.model != 'dmi-to-pci-bridge':
            chassisNR = elem.target.get('chassisNr')
            value = "pci-bridge,chassis_nr=%s" % chassisNR
            value = "%s,id=%s,bus=pci.%d,addr=%#x" % (value, name, bus, slot)
        else:
            value = "%s" % elem.model_name['name']
            value = "%s,id=%s,bus=pcie.%d,addr=%#x" % (value, name, bus, slot)

        tup = ('-device', value)
        search_qemu_cmd.append(tup)
        return search_qemu_cmd

    def get_patt_inx_ctl(cur_vm_xml, qemu_list, inx):
        """
        Get search pattern in qemu line for some kind of cases

        :param cur_vm_xml: Guest xml
        :param qemu_list: List for storing qemu search patterns
        :param inx: Controller index used

        :return: a tuple for (search_result, qemu_list)

        """
        (search_result, qemu_search) = check_cntrl(cur_vm_xml,
                                                   cntlr_type,
                                                   model,
                                                   inx, None, True)
        if qemu_search:
            qemu_list.extend(qemu_search)
        return (search_result, qemu_list)

    def get_patt_non_zero_bus(cur_vm_xml):
        """
        Get search pattern for multiple controllers with non-zero bus.

        :param cur_vm_xml: The guest VMXML instance
        :return: List, The search pattern list
        """
        actual_set = set()
        for elem in cur_vm_xml.devices.by_device_tag('controller'):
            if (elem.type == cntlr_type and elem.model == model):
                actual_set.add(int(elem.index))
                qemu_list = prepare_qemu_pattern(elem)
        expect_set = set()
        for num in range(1, int(pci_bus_number) + 1):
            expect_set.add(num)

        logging.debug("expect: %s, actual: %s", expect_set, actual_set)
        if (not actual_set.issubset(expect_set) or
                not expect_set.issubset(actual_set)):
            test.fail("The actual index set (%s)does "
                      "not match the expect index set "
                      "(%s)." % (actual_set, expect_set))
        return qemu_list

    def get_search_patt_qemu_line():
        """
        Check if the guest XML has the expected content.

        :return: -device pci-bridge,chassis_nr=1,id=pci.1,bus=pci.0,addr=0x3
        """
        cur_vm_xml = VMXML.new_from_dumpxml(vm_name)
        qemu_list = []
        # Check the pci-root controller has index = 0
        if no_pci_controller == "yes":
            (_, qemu_list) = get_patt_inx_ctl(cur_vm_xml,
                                              qemu_list, '0')
            return qemu_list

        # Check index numbers of pci-bridge controllers should be equal
        # to the pci_bus_number
        if int(pci_bus_number) > 0:
            return get_patt_non_zero_bus(cur_vm_xml)
        # All controllers should exist if there is a gap between two PCI
        # controller indexes
        if index and index_second and int(index) > 0 and int(index_second) > 0:
            for idx in range(int(index_second), int(index) + 1):
                (_, qemu_list) = get_patt_inx_ctl(cur_vm_xml,
                                                  qemu_list, str(idx))
            return qemu_list

        # All controllers should exist with index among [1..index]
        if index and int(index) > 0 and not index_second:
            for idx in range(1, int(index) + 1):
                (search_result, qemu_list) = get_patt_inx_ctl(cur_vm_xml,
                                                              qemu_list,
                                                              str(idx))
                if not search_result:
                    test.fail("Can not find %s controller "
                              "with index %s." % (model, str(idx)))
            return qemu_list

    def get_controller_addr(cntlr_type=None, model=None, index=None, cntlr_bus=None):
        """
        Get the address of testing controller from VM XML as a string with
        format
        a. "bus:slot.function" for pci address type
        b. "cssid:ssid.devno" for ccw address type

        :param cntlr_type: controller type, e.g. pci
        :param model: controller model, e.g. pcie-root-port
        :param index: controller index, e.g. '0'
        :param cntlr_bus: controller bus type, e.g. pci, ccw
        :return: a tuple including an address string, bus, slot,
                        function, multifunction
        """
        if model in ['pci-root', 'pcie-root']:
            return (None, None, None, None, None)

        addr_str = None
        cur_vm_xml = VMXML.new_from_dumpxml(vm_name)

        for elem in cur_vm_xml.devices.by_device_tag('controller'):
            if (
                    (cntlr_type is None or elem.type == cntlr_type) and
                    (model is None or elem.model == model) and
                    (index is None or elem.index == index)):
                addr_elem = elem.address
                if addr_elem is None:
                    test.error("Can not find 'Address' "
                               "element for the controller")
                p4 = None
                if 'ccw' == cntlr_bus:
                    p1 = int(addr_elem.attrs.get('cssid'), 0)
                    p2 = int(addr_elem.attrs.get('ssid'), 0)
                    p3 = int(addr_elem.attrs.get('devno'), 0)
                else:
                    p1 = int(addr_elem.attrs.get('bus'), 0)
                    p2 = int(addr_elem.attrs.get('slot'), 0)
                    p3 = int(addr_elem.attrs.get('function'), 0)
                    p4 = addr_elem.attrs.get('multifunction')
                addr_str = '%02d:%02x.%1d' % (p1, p2, p3)
                logging.debug("Controller address is %s", addr_str)
                return (addr_str, p1, p2, p3, p4)

        return (None, None, None, None, None)

    def check_controller_addr(cntlr_bus=None):
        """
        Check test controller address against expectation.

        :param cntlr_bus: controller bus type, e.g. pci, ccw
        """
        (addr_str, _, _, _, _) = get_controller_addr(cntlr_type, model, index, cntlr_bus)
        if model in ['pci-root', 'pcie-root']:
            if addr_str is None:
                return
            else:
                test.fail('Expect controller do not have address, '
                          'but got "%s"' % addr_str)

        if index != 0:
            if '00:00' in addr_str:
                test.fail("Invalid PCI address 0000:00:00, "
                          "at least one of domain, bus, "
                          "or slot must be > 0")

        exp_addr_patt = r'00:[0-9]{2}.[0-9]'
        if model in ['ehci']:
            exp_addr_patt = r'0[1-9]:[0-9]{2}.[0-9]'
        if addr_str:
            exp_addr_patt = addr_str
        if 'ccw' == cntlr_bus:
            exp_addr_patt = r'254:\d+\.\d+'

        if not re.match(exp_addr_patt, addr_str):
            test.fail('Expect get controller address "%s", '
                      'but got "%s"' % (exp_addr_patt, addr_str))

    def check_qemu_cmdline(search_pattern=None):
        """
        Check domain qemu command line against expectation.

        :param search_pattern: search list with tuple objects
        """
        with open('/proc/%s/cmdline' % vm.get_pid()) as proc_file:
            cmdline = proc_file.read()
        options = cmdline.split('\x00')
        logging.debug(options)
        # Search the command line options for the given patterns
        if search_pattern and isinstance(search_pattern, list):
            for pattern in search_pattern:
                key = pattern[0]
                value = pattern[1]
                logging.debug("key=%s, value=%s", key, value)
                found = False
                check_value = False
                for opt in options:
                    if check_value:
                        if re.findall(value, opt):
                            logging.debug("Found the expected (%s %s) in qemu "
                                          "command line" % (key, value))
                            found = True
                            break
                        check_value = False
                    if key == opt:
                        check_value = True
                if not found:
                    test.fail("Can not find '%s %s' in qemu "
                              "command line" % (key, value))

        # Get pcihole options from qemu command line
        pcihole_opt = ''
        for idx, opt in enumerate(options):
            if 'pci-hole64-size' in opt:
                pcihole_opt = opt

        # Get expected pcihole options from params
        exp_pcihole_opt = ''
        if (cntlr_type == 'pci' and model in ['pci-root', 'pcie-root'] and
           pcihole):
            if 'pc' in cur_machine:
                exp_pcihole_opt = 'i440FX-pcihost'
            elif 'q35' in cur_machine:
                exp_pcihole_opt = 'q35-pcihost'
            exp_pcihole_opt += '.pci-hole64-size=%sK' % pcihole

        # Check options against expectation
        if pcihole_opt != exp_pcihole_opt:
            test.fail('Expect get qemu command serial option "%s", '
                      'but got "%s"' % (exp_pcihole_opt, pcihole_opt))

        # Check usb options against expectation
        if cntlr_type == "usb":
            pattern = ""
            if cmpnn_cntlr_num is not None:
                for num in range(int(cmpnn_cntlr_num)):
                    name = (cmpnn_cntlr_model+str(num+1)).split('-')
                    pattern = pattern + r"-device.%s-usb-%s.*" % (name[0], name[1])
            elif model == "ehci":
                pattern = r"-device.usb-ehci"
            elif model == "qemu-xhci":
                pattern = r"-device.qemu-xhci"

            logging.debug("pattern is %s", pattern)

            if pattern and not re.search(pattern, cmdline):
                test.fail("Expect get usb model info in qemu cmdline, but failed!")

    def check_guest(cntlr_type, cntlr_model, cntlr_index=None, cntlr_bus=""):
        """
        Check status within the guest against expectation.

        :param cntlr_type: //controller@type, e.g. ide
        :param cntlr_model: //controller@model, e.g. virtio-scsi
        :param cntlr_index: //controller@index, e.g. '0'
        :param cntlr_bus: //controller/address@type, e.g. pci
        :raise avocado.core.exceptions.TestFail: Fails the test if checks fail
        :raise avocado.core.exceptions.TestError: Fails if test couldn't be fully executed
        :return: None
        """
        if model == 'pci-root' or model == 'pcie-root':
            return

        (addr_str, _, _, _, _) = get_controller_addr(cntlr_type=cntlr_type,
                                                     model=cntlr_model,
                                                     index=cntlr_index,
                                                     cntlr_bus=cntlr_bus)

        if 'ccw' == cntlr_bus:
            check_ccw_bus_type(addr_str)
        else:
            check_pci_bus_type(addr_str, cntlr_index, cntlr_model, cntlr_type)

    def check_ccw_bus_type(addr_str):
        """
        Uses lszdev to check for device info in guest.

        :param addr_str: Device address from libvirt
        :raise avocado.core.exceptions.TestFail: Fails the test if unexpected test values
        :raise avocado.core.exceptions.TestError: Fails if can't query dev info in guest
        :return: None
        """
        session = vm.wait_for_login(serial=True)
        cmd = 'lszdev generic-ccw --columns ID'
        status, output = session.cmd_status_output(cmd)
        logging.debug("lszdev output is: %s", output)
        if status:
            test.error("Failed to get guest device info, check logs.")
        devno = int(addr_str.split('.')[-1])
        devno_str = hex(devno).replace('0x', '').zfill(4)
        if devno_str not in output:
            test.fail("Can't find device with number %s in guest. Searched for %s in %s"
                      % (devno, devno_str, output))

    def check_pci_bus_type(addr_str, cntlr_index, cntlr_model, cntlr_type):
        """
        Uses lspci to check for device info in guest.

        :param addr_str: Device address from libvirt
        :param cntlr_index: controller index
        :param cntlr_model: controller model
        :param cntlr_type: controller type
        :raise avocado.core.exceptions.TestError: Fails if device info not found
        :raise avocado.core.exceptions.TestFail: Fails if unexcepted test values
        :return: None
        """
        pci_name = 'PCI bridge:'
        verbose_option = ""
        if cntlr_type == 'virtio-serial':
            verbose_option = '-vvv'
        if (addr_str is None and model != 'pci-root' and model != 'pcie-root'):
            test.error("Can't find target controller in XML")
        if cntlr_index:
            logging.debug("%s, %s, %s", cntlr_type, cntlr_model, cntlr_index)
        if (addr_str is None and cntlr_model != 'pci-root' and cntlr_model != 'pcie-root'):
            test.error("Can't find target controller in XML")
        session = vm.wait_for_login(serial=True)
        status, output = session.cmd_status_output('lspci %s -s %s'
                                                   % (verbose_option, addr_str))
        logging.debug("lspci output is: %s", output)
        if (cntlr_type == 'virtio-serial' and
                (vectors and int(vectors) == 0)):
            if 'MSI' in output:
                test.fail("Expect MSI disable with zero vectors, "
                          "but got %s" % output)
        if (cntlr_type == 'virtio-serial' and
                (vectors is None or int(vectors) != 0)):
            if 'MSI' not in output:
                test.fail("Expect MSI enable with non-zero vectors, "
                          "but got %s" % output)
        if (cntlr_type == 'pci'):
            if pci_name not in output:
                test.fail("Can't find target pci device"
                          " '%s' on guest " % addr_str)

    def check_guest_by_pattern(patterns):
        """
        Search the command output with specified patterns

        :param patterns: patterns to search in guest. Type: str or list
        """
        logging.debug("Search pattern:{}".format(patterns))
        session = vm.wait_for_login(serial=True)
        libvirt.check_cmd_output('lspci', eval(patterns), session=session)
        session.close()

    def check_cntrl(vm_xml, cntlr_type, cntlr_model, cntlr_index,
                    check_dict, qemu_pattern):
        """
        Check the controller or get the controller's search patterns.
        Currently check_dict and qemu_pattern are not
        supported to be used at same time.

        :param vm_xml, the guest VMXML instance
        :param cntlr_type, the controller type
        :param cntlr_model, the controller's model
        :param cntlr_index, the controller's index
        :param check_dict, the dict for checking in the controller
        :param qemu_pattern: True if it needs to be checked with qemu
                              command line. False if not.
        :return Tuple (Controller, List) if qemu_pattern
                       Controller: the controller found.
                       List: a list including qemu search patterns
        :return None if check_dict
        :raise test.fail if the model name is not expected
        :raise test.error if the controller is not found
        """
        qemu_list = None
        for elem in vm_xml.devices.by_device_tag('controller'):
            if (cntlr_type == elem.type and cntlr_model == elem.model):
                if cntlr_index and cntlr_index != elem.index:
                    continue
                if qemu_pattern:
                    if cntlr_model not in ['pci-root', 'pcie-root']:
                        qemu_list = prepare_qemu_pattern(elem)
                    return (elem, qemu_list)
                if check_dict:
                    logging.debug("Checking list {}".format(check_dict))
                    if ('modelname' in check_dict and
                            elem.model_name['name'] != check_dict['modelname']):
                        test.fail("Can't find the expected model name {} "
                                  "with (type:{}, model:{}, index:{}), "
                                  "found {}".format(check_dict['modelname'],
                                                    cntlr_type,
                                                    cntlr_model,
                                                    cntlr_index,
                                                    elem.model_name['name']))
                    if ('busNr' in check_dict and
                            elem.target['busNr'] != check_dict['busNr']):
                        test.fail("Can't find the expected busNr {} "
                                  "with (type:{}, model:{}, index:{}), "
                                  "found {}".format(check_dict['busNr'],
                                                    cntlr_type,
                                                    cntlr_model,
                                                    cntlr_index,
                                                    elem.target['busNr']))
                    else:
                        logging.debug("Check controller successfully")
                        return
        test.error("Can't find the specified controller with "
                   "(type:{}, model:{}, index:{})".format(cntlr_type,
                                                          cntlr_model,
                                                          cntlr_index))

    def detach_device(vm_name):
        """
        Detach a device from the given guest

        :param vm_name: The guest name
        :return: None
        """
        attach_dev_type = params.get("attach_dev_type", 'disk')
        detach_option = params.get("detach_option")
        if attach_dev_type == 'interface':
            ret = virsh.detach_interface(vm_name, detach_option,
                                         **virsh_dargs)
        else:
            logging.debug("No need to detach any device.")

    def attach_device(vm_name):
        """
        Attach devices to the guest for some times

        :param vm_name: The guest name
        :return: None
        """
        attach_count = params.get("attach_count", '1')
        attach_dev_type = params.get("attach_dev_type", 'disk')
        attach_option = params.get("attach_option")
        if attach_option.count('--address '):
            index_str = "%02x" % int(auto_indexes_dict['pcie-root-port'][0])
            attach_option = attach_option % index_str
        for count in range(0, int(attach_count)):
            if attach_dev_type == 'disk':
                file_path = tempfile.mktemp(dir=data_dir.get_tmp_dir())
                libvirt.create_local_disk('file', file_path, size='1')
                ret = virsh.attach_disk(vm_name,
                                        file_path,
                                        params.get('dev_target', 'vdb'),
                                        extra=attach_option,
                                        **virsh_dargs)
            elif attach_dev_type == 'interface':
                ret = virsh.attach_interface(vm_name,
                                             attach_option,
                                             **virsh_dargs)
            else:
                logging.debug("No need to attach any device.")
                break

    def check_detach_attach_result(vm_name, cmd, pattern, expect_output, option='--hmp'):
        """
        Check the attach/detach result by qemu_monitor_command.

        :param vm_name: guest name
        :param cmd: the command for qemu_monitor_command
        :param pattern: regular expr used to search
                        the output of qemu_monitor_command
        :param expect_output: the expected output for qemu_monitor_command
        :param option: option for qemu_monitor_command
        :raise test.fail if the pattern is not matched
        :return: the qemu_monitor_command output
        """
        ret = virsh.qemu_monitor_command(vm_name, cmd, option)
        libvirt.check_result(ret)
        if pattern and expect_output:
            if not re.findall(pattern, ret.stdout.strip()):
                test.fail("Can't find the pattern '{}' in "
                          "qemu monitor command "
                          "output'{}'".format(pattern,
                                              ret.stdout.strip()))
        else:
            return expect_output == ret.stdout.strip()

    def check_guest_by_cmd(cmds, expect_error=False):
        """
        Execute the command within guest and check status

        :param cmds: Str or List, The command executed in guest
        :param expect_error: True if the command is expected to fail
        :return: None
        :raise test.fail if command status is not as expected
        """
        def _check_cmd_result(cmd):
            logging.debug("Command in guest gets result: %s", output)
            if status and not expect_error:
                test.fail("Command '{}' fails in guest with status "
                          "'{}'".format(cmd, status))
            elif status and expect_error:
                logging.debug("Command '{}' fails in guest as "
                              "expected".format(cmd))
            elif not status and not expect_error:
                logging.debug("Check guest by command successfully")
            else:
                test.fail("Check guest by command successfully, "
                          "but expect failure")

        logging.debug("Execute command '{}' in guest".format(cmds))
        session = vm.wait_for_login(serial=True)
        (status, output) = (None, None)
        if isinstance(cmds, str):
            status, output = session.cmd_status_output(cmds)
            _check_cmd_result(cmds)
        elif isinstance(cmds, list):
            for cmd in cmds:
                if isinstance(cmd, str):
                    status, output = session.cmd_status_output(cmd)
                    _check_cmd_result(cmd)
                elif isinstance(cmd, dict):
                    for cmd_key in cmd.keys():
                        status, output = session.cmd_status_output(cmd_key)
                        if output.strip() != cmd[cmd_key]:
                            test.fail("Command '{}' does not get "
                                      "expect result {}, but found "
                                      "{}".format(cmd_key,
                                                  cmd[cmd_key],
                                                  output.strip()))

    def get_device_bus(vm_xml, device_type):
        """
        Get the bus that the devices are attached to.

        :param vm_xml: Guest xml
        :param device_type: The type of device, like disk, interface
        :return a list includes buses the devices attached to
        """
        devices = vm_xml.get_devices(device_type=device_type)
        bus_list = []
        for device in devices:
            logging.debug("device:{}".format(device))
            bus = device.address.attrs['bus']
            logging.debug("This device's bus:{}".format(bus))
            bus_list.append(bus)
        return bus_list

    def add_device_xml(vm_xml, device_type, device_cfg_dict):
        """
        Add a device xml to the existing vm xml

        :param vm_xml: the existing vm xml object
        :param device_type: type of device to be added
        :param device_cfg_dict: the configuration of the device
        :return: None
        """
        vm_xml.remove_all_device_by_type(device_type)
        dev_obj = vm_xml.get_device_class(device_type)()

        dev_cfg = eval(device_cfg_dict)
        if device_type == 'sound':
            dev_obj.model_type = dev_cfg.get("model")
        elif device_type == 'rng':
            dev_obj.rng_model = dev_cfg.get("model")
            rng_backend = dev_obj.Backend()
            rng_backend.backend_model = "random"
            dev_obj.backend = rng_backend
        elif device_type == 'memballoon':
            dev_obj.model = dev_cfg.get("model")
        if 'bus' in dev_cfg:
            addr_dict = {'bus': dev_cfg.get("bus"),
                         'type': dev_cfg.get("type", "pci"),
                         'slot': dev_cfg.get("slot", "0x00")}
            if device_type == 'rng':
                dev_obj.address = dev_obj\
                    .new_rng_address(**{"attrs": addr_dict})
            else:
                dev_obj.address = addr_dict
        vm_xml.add_device(dev_obj)

    def check_multifunction():
        """
        Check if multifunction is found in vm xml for specified controller

        :raise: test.fail if multifunction is not as expected
        """
        (_, _, _, _, multi_func) = get_controller_addr(cntlr_type, model, '0')
        if not multi_func or multi_func != 'on':
            test.fail("Can't find multifunction=on in certain "
                      "controller(type:{}, model:{}, "
                      "index:{})".format(cntlr_type, model, 0))

    os_machine = params.get('machine_type', None)
    libvirt.check_machine_type_arch(os_machine)
    cntlr_type = params.get('controller_type', None)
    model = params.get('controller_model', None)
    index = params.get('controller_index', None)
    vectors = params.get('controller_vectors', None)
    pcihole = params.get('controller_pcihole64', None)
    chassisNr = params.get('chassisNr', None)
    addr_str = params.get('controller_address', None)
    cmpnn_cntlr_model = params.get('companion_controller_model', None)
    cmpnn_cntlr_num = params.get('companion_controller_num', None)
    vm_name = params.get("main_vm", "avocado-vt-vm1")
    no_pci_controller = params.get("no_pci_controller", "no")
    pci_bus_number = params.get("pci_bus_number", "0")
    remove_address = "yes" == params.get("remove_address", "yes")
    remove_contr = "yes" == params.get("remove_contr", "yes")
    setup_controller = params.get("setup_controller", "yes")
    index_second = params.get("controller_index_second", None)
    cntlr_bus = params.get('controller_bus')
    cur_machine = os_machine
    check_qemu = "yes" == params.get("check_qemu", "no")
    check_within_guest = "yes" == params.get("check_within_guest", "no")
    run_vm = "yes" == params.get("run_vm", "no")
    second_level_controller_num = params.get("second_level_controller_num", "0")
    check_contr_addr = "yes" == params.get("check_contr_addr", "yes")
    qemu_patterns = params.get("qemu_patterns")
    status_error = "yes" == params.get("status_error", "no")
    model_name = params.get("model_name", None)
    expect_err_msg = params.get("err_msg", None)
    new_pcie_root_port_model = params.get("new_model")
    old_pcie_root_port_model = params.get("old_model")
    add_contrl_list = params.get("add_contrl_list")
    auto_bus = "yes" == params.get("auto_bus", "no")
    check_cntrls_list = params.get("check_cntrls_list")
    sound_dict = params.get("sound_dict")
    balloon_dict = params.get("balloon_dict")
    rng_dict = params.get("rng_dict")
    guest_patterns = params.get("guest_patterns")
    attach_option = params.get("attach_option")
    detach_option = params.get("detach_option")
    attach_dev_type = params.get("attach_dev_type", 'disk')
    remove_nic = "yes" == params.get("remove_nic", 'no')
    qemu_monitor_cmd = params.get("qemu_monitor_cmd")
    cmd_in_guest = params.get("cmd_in_guest")
    check_dev_bus = "yes" == params.get("check_dev_bus", "no")
    cpu_numa_cells = params.get("cpu_numa_cells")
    virsh_dargs = {'ignore_status': False, 'debug': True}
    auto_indexes_dict = {}
    auto_index = params.get('auto_index', 'no') == 'yes'
    auto_slot = params.get('auto_slot', 'no') == 'yes'

    libvirt_version.is_libvirt_feature_supported(params)

    if index and index_second:
        if int(index) > int(index_second):
            test.error("Invalid parameters")

    vm = env.get_vm(vm_name)
    vm_xml = VMXML.new_from_inactive_dumpxml(vm_name)
    vm_xml_backup = vm_xml.copy()

    try:
        if remove_contr:
            vm_xml.remove_all_device_by_type('controller')
        if remove_address:
            remove_devices(vm_xml, 'address')
        remove_devices(vm_xml, 'usb')
        if remove_nic:
            remove_devices(vm_xml, 'interface')
        # Get the max controller index in current vm xml
        the_model = 'pci-root' if 'ppc' in platform.machine() else 'pcie-root-port'
        if add_contrl_list:
            ret_indexes = libvirt_pcicontr.get_max_contr_indexes(vm_xml, 'pci', the_model)
            if ret_indexes and len(ret_indexes) > 0:
                if auto_bus:
                    new_index = "0x%02x" % (int(ret_indexes[0]) + 1)
                    add_contrl_list = re.sub(r"'bus': '%s'", "'bus': '%s'" % new_index, add_contrl_list, count=5)
                    logging.debug("bus is set automatically with %s", new_index)
                if auto_slot:
                    available_slot = libvirt_pcicontr.get_free_pci_slot(vm_xml)
                    if not available_slot:
                        test.error("No pci slot is available any more. Please check your vm xml.")
                    add_contrl_list = re.sub(r"'slot': '%s'", "'slot': '%s'" % available_slot, add_contrl_list, count=5)
                    logging.debug("slot is set automatically with %s", available_slot)
                if auto_index:
                    new_index = int(ret_indexes[0]) + 1
                    add_contrl_list = re.sub(r"'index': '%s'", "'index': '%s'" % new_index, add_contrl_list, count=5)
                    logging.debug("index is set automatically with %s", new_index)
        logging.debug("Now add_contrl_list=%s", add_contrl_list)

        if setup_controller == "yes":
            if add_contrl_list:
                contrls = eval(add_contrl_list)
                for one_contrl in contrls:
                    contr_dict = {}
                    cntl_target = ''
                    if 'model' in one_contrl:
                        contr_dict.update({'controller_model': one_contrl['model']})
                    if 'busNr' in one_contrl:
                        cntl_target = "{'busNr': %s}" % one_contrl['busNr']
                    if 'chassisNr' in one_contrl:
                        cntl_target += "{'chassisNr': '%s'}" % one_contrl['chassisNr']
                    if 'alias' in one_contrl:
                        contr_dict.update({'contr_alias': one_contrl['alias']})
                    if 'type' in one_contrl:
                        contr_dict.update({'controller_type': one_contrl['type']})
                    else:
                        contr_dict.update({'controller_type': 'pci'})
                    if 'node' in one_contrl:
                        contr_dict.update({'controller_node': one_contrl['node']})
                    if 'index' in one_contrl:
                        contr_dict.update({'controller_index': one_contrl['index']})
                    contr_dict.update({'controller_target': cntl_target})
                    addr = None
                    if 'bus' in one_contrl:
                        addr = {'bus': one_contrl['bus']}
                        if 'slot' in one_contrl:
                            addr.update({'slot': one_contrl['slot']})
                            if 'func' in one_contrl:
                                addr.update({'function': one_contrl['func']})
                    if addr:
                        contr_dict.update({'controller_addr': str(addr)})
                    logging.debug(contr_dict)
                    controller_add = libvirt.create_controller_xml(contr_dict)
                    vm_xml.add_device(controller_add)
                    logging.debug("Add a controller: %s" % controller_add)
            else:
                if index_second:
                    setup_controller_xml(index_second)
                setup_controller_xml(index, addr_str)
                if second_level_controller_num:
                    for indx in range(2, int(second_level_controller_num) + 2):
                        addr_second = "0%s:0%s.0" % (index, str(indx))
                        setup_controller_xml(str(indx), addr_second)

        setup_os_xml()
        if int(pci_bus_number) > 0:
            address_params = {'bus': "%0#4x" % int(pci_bus_number), 'slot': "%0#4x" % int(pci_bus_number)}
            libvirt.set_disk_attr(vm_xml, 'vda', 'address', address_params)
        if cpu_numa_cells:
            if not vm_xml.cpu:
                vmxml_cpu = VMCPUXML()
                vmxml_cpu.xml = "<cpu mode='host-model'><numa/></cpu>"
            else:
                vmxml_cpu = vm_xml.cpu
                logging.debug("Existing cpu configuration in guest xml:\n%s", vmxml_cpu)
                vmxml_cpu.mode = 'host-model'
                if platform.machine() == 'aarch64':
                    vmxml_cpu.mode = 'host-passthrough'
                vmxml_cpu.remove_elem_by_xpath('/model')
                vmxml_cpu.remove_elem_by_xpath('/numa')
            vmxml_cpu.numa_cell = VMCPUXML.dicts_to_cells(eval(cpu_numa_cells))
            vm_xml.cpu = vmxml_cpu
            vm_xml.vcpu = int(params.get('vcpu_count', 4))
        if sound_dict:
            add_device_xml(vm_xml, 'sound', sound_dict)
        if rng_dict:
            add_device_xml(vm_xml, 'rng', rng_dict)
        if balloon_dict:
            add_device_xml(vm_xml, 'memballoon', balloon_dict)

        logging.debug("Test VM XML before define is %s" % vm_xml)
        if not define_and_check(vm_xml):
            logging.debug("Can't define the VM, exiting.")
            return
        vm_xml = VMXML.new_from_dumpxml(vm_name)
        logging.debug("Test VM XML after define is %s" % vm_xml)
        if auto_index:
            contrls = eval(add_contrl_list)
            for one_contrl in contrls:
                ret_indexes = libvirt_pcicontr.get_max_contr_indexes(vm_xml,
                                                                     one_contrl.get('type', 'pci'),
                                                                     one_contrl.get('model'))
                auto_indexes_dict.update({one_contrl['model']: ret_indexes})
        if check_contr_addr:
            check_controller_addr(cntlr_bus)
        if new_pcie_root_port_model and old_pcie_root_port_model:
            if utils_misc.compare_qemu_version(2, 9, 0, False):
                expect_model = new_pcie_root_port_model
            else:
                expect_model = old_pcie_root_port_model
            logging.debug("Expect the model for 'pcie-root-port': "
                          "%s" % expect_model)
            check_dict = {'modelname': expect_model}
            check_cntrl(vm_xml, 'pci', 'pcie-root-port',
                        '2', check_dict, False)
        if check_cntrls_list:
            for check_one in eval(check_cntrls_list):
                logging.debug("The controller to be checked: {}".format(check_one))
                check_cntrl(vm_xml, check_one.get('type', 'pci'), check_one.get('model'),
                            check_one.get('index'), check_one, False)
        if run_vm:
            try:
                if not start_and_check():
                    logging.debug("Can't start the VM, exiting.")
                    return
            except virt_vm.VMStartError as detail:
                test.fail(detail)

        # Need coldplug/hotplug
        if attach_option:
            attach_device(vm_name)
            vm_xml = VMXML.new_from_dumpxml(vm_name)
            logging.debug("Guest xml after attaching device:{}".format(vm_xml))
            # Check device's bus if needed
            if check_dev_bus:
                buses = get_device_bus(vm_xml, attach_dev_type)
                if len(buses) == 0:
                    test.fail("No bus was found")
                if buses[0] != params.get("expect_bus"):
                    test.fail("The expected bus for device is {}, "
                              "but found {}".format(params.get("expect_bus"),
                                                    buses[0]))
        if qemu_monitor_cmd:
            check_detach_attach_result(vm_name,
                                       qemu_monitor_cmd,
                                       params.get("qemu_monitor_pattern"),
                                       None)
        # Check guest xml
        if attach_dev_type == 'interface' and 'e1000e' in attach_option:
            cntls = vm_xml.get_controllers(controller_type='pci', model='pcie-root-port')
            cntl_index_list = []
            for cntl in cntls:
                cntl_index_list.append(cntl.get('index'))
            logging.debug("All pcie-root-port controllers' "
                          "index: {}".format(cntl_index_list))
            bus_list = get_device_bus(vm_xml, "interface")
            for bus in bus_list:
                if str(int(bus, 16)) not in cntl_index_list:
                    test.fail("The attached NIC with bus '{}' is not attached "
                              "to any pcie-root-port by default".format(bus))
        if check_qemu:
            if qemu_patterns:
                if auto_index:
                    index_str = "%x" % int(auto_indexes_dict['pcie-root-port'][0])
                    qemu_patterns = qemu_patterns % index_str
                    logging.debug("qemu_patterns=%s", qemu_patterns)
                if qemu_patterns.count('multifunction=on'):
                    check_multifunction()
                search_qemu_cmd = eval(qemu_patterns)
                logging.debug(search_qemu_cmd)
            else:
                search_qemu_cmd = get_search_patt_qemu_line()
            check_qemu_cmdline(search_pattern=search_qemu_cmd)
            vm.wait_for_login().close()

        if check_within_guest:
            try:
                if int(pci_bus_number) > 0:
                    for contr_idx in range(1, int(pci_bus_number) + 1):
                        check_guest(cntlr_type, model, str(contr_idx))
                    return
                if index:
                    check_max_index = int(index) + int(second_level_controller_num)
                    for contr_idx in range(1, int(check_max_index) + 1):
                        check_guest(cntlr_type, model, str(contr_idx))
                elif guest_patterns:
                    check_guest_by_pattern(guest_patterns)
                elif cmd_in_guest:
                    check_guest_by_cmd(eval(cmd_in_guest))
                else:
                    check_guest(cntlr_type, model, cntlr_bus=cntlr_bus)
                    if model == 'pcie-root':
                        # Need check other auto added controller
                        check_guest(cntlr_type, 'dmi-to-pci-bridge', '1')
                        check_guest(cntlr_type, 'pci-bridge', '2')
            except remote.LoginTimeoutError as e:
                logging.debug(e)
                if not status_error:
                    raise
        # Need hotunplug
        if detach_option:
            detach_device(vm_name)
            if qemu_monitor_cmd:
                check_detach_attach_result(vm_name,
                                           qemu_monitor_cmd,
                                           params.get("qemu_monitor_pattern"),
                                           "")
            if cmd_in_guest:
                check_guest_by_cmd(eval(cmd_in_guest), expect_error=True)

    finally:
        vm_xml_backup.sync()
Exemplo n.º 10
0
def run(test, params, env):
    """
    Test rbd disk device.

    1.Prepare test environment,destroy or suspend a VM.
    2.Prepare disk image.
    3.Edit disks xml and start the domain.
    4.Perform test operation.
    5.Recover test environment.
    """
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    virsh_dargs = {'debug': True, 'ignore_status': True}
    additional_xml_file = os.path.join(TMP_DATA_DIR, "additional_disk.xml")

    def config_ceph():
        """
        Write the configs to the file.
        """
        src_host = disk_src_host.split()
        src_port = disk_src_port.split()
        conf_str = "mon_host = "
        hosts = []
        for host, port in zip(src_host, src_port):
            hosts.append("%s:%s" % (host, port))
        with open(disk_src_config, 'w') as f:
            f.write(conf_str + ','.join(hosts) + '\n')

    def create_pool():
        """
        Define and start a pool.
        """
        sp = libvirt_storage.StoragePool()
        if create_by_xml:
            p_xml = pool_xml.PoolXML(pool_type=pool_type)
            p_xml.name = pool_name
            s_xml = pool_xml.SourceXML()
            s_xml.vg_name = disk_src_pool
            source_host = []
            for (host_name, host_port) in zip(disk_src_host.split(),
                                              disk_src_port.split()):
                source_host.append({'name': host_name, 'port': host_port})

            s_xml.hosts = source_host
            if auth_type:
                s_xml.auth_type = auth_type
            if auth_user:
                s_xml.auth_username = auth_user
            if auth_usage:
                s_xml.secret_usage = auth_usage
            p_xml.source = s_xml
            logging.debug("Pool xml: %s", p_xml)
            p_xml.xmltreefile.write()
            ret = virsh.pool_define(p_xml.xml, **virsh_dargs)
            libvirt.check_exit_status(ret)
            ret = virsh.pool_build(pool_name, **virsh_dargs)
            libvirt.check_exit_status(ret)
            ret = virsh.pool_start(pool_name, **virsh_dargs)
            libvirt.check_exit_status(ret)
        else:
            auth_opt = ""
            if client_name and client_key:
                auth_opt = (
                    "--auth-type %s --auth-username %s --secret-usage '%s'" %
                    (auth_type, auth_user, auth_usage))
            if not sp.define_rbd_pool(
                    pool_name, mon_host, disk_src_pool, extra=auth_opt):
                test.fail("Failed to define storage pool")
            if not sp.build_pool(pool_name):
                test.fail("Failed to build storage pool")
            if not sp.start_pool(pool_name):
                test.fail("Failed to start storage pool")

        # Check pool operation
        ret = virsh.pool_refresh(pool_name, **virsh_dargs)
        libvirt.check_exit_status(ret)
        ret = virsh.pool_uuid(pool_name, **virsh_dargs)
        libvirt.check_exit_status(ret)
        # pool-info
        pool_info = sp.pool_info(pool_name)
        if pool_info["Autostart"] != 'no':
            test.fail("Failed to check pool information")
        # pool-autostart
        if not sp.set_pool_autostart(pool_name):
            test.fail("Failed to set pool autostart")
        pool_info = sp.pool_info(pool_name)
        if pool_info["Autostart"] != 'yes':
            test.fail("Failed to check pool information")
        # pool-autostart --disable
        if not sp.set_pool_autostart(pool_name, "--disable"):
            test.fail("Failed to set pool autostart")
        # If port is not pre-configured, port value should not be hardcoded in pool information.
        if "yes" == params.get("rbd_port", "no"):
            if 'port' in virsh.pool_dumpxml(pool_name):
                test.fail("port attribute should not be in pool information")
        # find-storage-pool-sources-as
        if "yes" == params.get("find_storage_pool_sources_as", "no"):
            ret = virsh.find_storage_pool_sources_as("rbd", mon_host)
            libvirt.check_result(ret, skip_if=unsupported_err)

    def create_vol(vol_params):
        """
        Create volume.

        :param p_name. Pool name.
        :param vol_params. Volume parameters dict.
        :return: True if create successfully.
        """
        pvt = libvirt.PoolVolumeTest(test, params)
        if create_by_xml:
            pvt.pre_vol_by_xml(pool_name, **vol_params)
        else:
            pvt.pre_vol(vol_name, None, '2G', None, pool_name)

    def check_vol(vol_params):
        """
        Check volume information.
        """
        pv = libvirt_storage.PoolVolume(pool_name)
        # Supported operation
        if vol_name not in pv.list_volumes():
            test.fail("Volume %s doesn't exist" % vol_name)
        ret = virsh.vol_dumpxml(vol_name, pool_name)
        libvirt.check_exit_status(ret)
        # vol-info
        if not pv.volume_info(vol_name):
            test.fail("Can't see volume info")
        # vol-key
        ret = virsh.vol_key(vol_name, pool_name)
        libvirt.check_exit_status(ret)
        if "%s/%s" % (disk_src_pool, vol_name) not in ret.stdout.strip():
            test.fail("Volume key isn't correct")
        # vol-path
        ret = virsh.vol_path(vol_name, pool_name)
        libvirt.check_exit_status(ret)
        if "%s/%s" % (disk_src_pool, vol_name) not in ret.stdout.strip():
            test.fail("Volume path isn't correct")
        # vol-pool
        ret = virsh.vol_pool("%s/%s" % (disk_src_pool, vol_name))
        libvirt.check_exit_status(ret)
        if pool_name not in ret.stdout.strip():
            test.fail("Volume pool isn't correct")
        # vol-name
        ret = virsh.vol_name("%s/%s" % (disk_src_pool, vol_name))
        libvirt.check_exit_status(ret)
        if vol_name not in ret.stdout.strip():
            test.fail("Volume name isn't correct")
        # vol-resize
        ret = virsh.vol_resize(vol_name, "2G", pool_name)
        libvirt.check_exit_status(ret)

        # Not supported operation
        # vol-clone
        ret = virsh.vol_clone(vol_name, cloned_vol_name, pool_name)
        libvirt.check_result(ret, skip_if=unsupported_err)
        # vol-create-from
        volxml = vol_xml.VolXML()
        vol_params.update({"name": "%s" % create_from_cloned_volume})
        v_xml = volxml.new_vol(**vol_params)
        v_xml.xmltreefile.write()
        ret = virsh.vol_create_from(pool_name, v_xml.xml, vol_name, pool_name)
        libvirt.check_result(ret, skip_if=unsupported_err)

        # vol-wipe
        ret = virsh.vol_wipe(vol_name, pool_name)
        libvirt.check_result(ret, skip_if=unsupported_err)
        # vol-upload
        ret = virsh.vol_upload(vol_name,
                               vm.get_first_disk_devices()['source'],
                               "--pool %s" % pool_name)
        libvirt.check_result(ret, skip_if=unsupported_err)
        # vol-download
        ret = virsh.vol_download(vol_name, cloned_vol_name,
                                 "--pool %s" % pool_name)
        libvirt.check_result(ret, skip_if=unsupported_err)

    def check_qemu_cmd():
        """
        Check qemu command line options.
        """
        cmd = ("ps -ef | grep %s | grep -v grep " % vm_name)
        process.run(cmd, shell=True)
        if disk_src_name:
            cmd += " | grep file=rbd:%s:" % disk_src_name
            if auth_user and auth_key:
                cmd += ('id=%s:auth_supported=cephx' % auth_user)
        if disk_src_config:
            cmd += " | grep 'conf=%s'" % disk_src_config
        elif mon_host:
            hosts = '\:6789\;'.join(mon_host.split())
            cmd += " | grep 'mon_host=%s'" % hosts
        if driver_iothread:
            cmd += " | grep iothread%s" % driver_iothread
        # Run the command
        process.run(cmd, shell=True)

    def check_save_restore():
        """
        Test save and restore operation
        """
        save_file = os.path.join(TMP_DATA_DIR, "%s.save" % vm_name)
        ret = virsh.save(vm_name, save_file, **virsh_dargs)
        libvirt.check_exit_status(ret)
        ret = virsh.restore(save_file, **virsh_dargs)
        libvirt.check_exit_status(ret)
        if os.path.exists(save_file):
            os.remove(save_file)
        # Login to check vm status
        vm.wait_for_login().close()

    def check_snapshot(snap_option, target_dev='vda'):
        """
        Test snapshot operation.
        """
        snap_name = "s1"
        snap_mem = os.path.join(TMP_DATA_DIR, "rbd.mem")
        snap_disk = os.path.join(TMP_DATA_DIR, "rbd.disk")
        xml_snap_exp = [
            "disk name='%s' snapshot='external' type='file'" % target_dev
        ]
        xml_dom_exp = [
            "source file='%s'" % snap_disk,
            "backingStore type='network' index='1'",
            "source protocol='rbd' name='%s'" % disk_src_name
        ]
        if snap_option.count("disk-only"):
            options = ("%s --diskspec %s,file=%s --disk-only" %
                       (snap_name, target_dev, snap_disk))
        elif snap_option.count("disk-mem"):
            options = ("%s --memspec file=%s --diskspec %s,file="
                       "%s" % (snap_name, snap_mem, target_dev, snap_disk))
            xml_snap_exp.append("memory snapshot='external' file='%s'" %
                                snap_mem)
        else:
            options = snap_name

        ret = virsh.snapshot_create_as(vm_name, options)
        if test_disk_internal_snapshot:
            libvirt.check_result(ret, expected_fails=unsupported_err)
        elif test_disk_readonly:
            if libvirt_version.version_compare(6, 0, 0):
                libvirt.check_result(ret)
            else:
                libvirt.check_result(ret, expected_fails=unsupported_err)
        else:
            libvirt.check_result(ret, skip_if=unsupported_err)

        # check xml file.
        if not ret.exit_status:
            snap_xml = virsh.snapshot_dumpxml(vm_name, snap_name,
                                              debug=True).stdout.strip()
            dom_xml = virsh.dumpxml(vm_name, debug=True).stdout.strip()
            # Delete snapshots.
            libvirt.clean_up_snapshots(vm_name)
            if os.path.exists(snap_mem):
                os.remove(snap_mem)
            if os.path.exists(snap_disk):
                os.remove(snap_disk)

            if not all([x in snap_xml for x in xml_snap_exp]):
                test.fail("Failed to check snapshot xml")
            if not all([x in dom_xml for x in xml_dom_exp]):
                test.fail("Failed to check domain xml")

    def check_blockcopy(target):
        """
        Block copy operation test.
        """
        blk_file = os.path.join(TMP_DATA_DIR, "blk.rbd")
        if os.path.exists(blk_file):
            os.remove(blk_file)
        blk_mirror = ("mirror type='file' file='%s' "
                      "format='raw' job='copy'" % blk_file)

        # Do blockcopy
        ret = virsh.blockcopy(vm_name, target, blk_file)
        libvirt.check_result(ret, skip_if=unsupported_err)

        dom_xml = virsh.dumpxml(vm_name, debug=True).stdout.strip()
        if not dom_xml.count(blk_mirror):
            test.fail("Can't see block job in domain xml")

        # Abort
        ret = virsh.blockjob(vm_name, target, "--abort")
        libvirt.check_exit_status(ret)
        dom_xml = virsh.dumpxml(vm_name, debug=True).stdout.strip()
        if dom_xml.count(blk_mirror):
            test.fail("Failed to abort block job")
        if os.path.exists(blk_file):
            os.remove(blk_file)

        # Sleep for a while after abort operation.
        time.sleep(5)
        # Do blockcopy again
        ret = virsh.blockcopy(vm_name, target, blk_file)
        libvirt.check_exit_status(ret)

        # Wait for complete
        def wait_func():
            ret = virsh.blockjob(vm_name, target, "--info")
            return ret.stderr.count("Block Copy: [100 %]")

        timeout = params.get("blockjob_timeout", 600)
        utils_misc.wait_for(wait_func, int(timeout))

        # Pivot
        ret = virsh.blockjob(vm_name, target, "--pivot")
        libvirt.check_exit_status(ret)
        dom_xml = virsh.dumpxml(vm_name, debug=True).stdout.strip()
        if not dom_xml.count("source file='%s'" % blk_file):
            test.fail("Failed to pivot block job")
        # Remove the disk file.
        if os.path.exists(blk_file):
            os.remove(blk_file)

    def check_in_vm(vm_obj, target, old_parts, read_only=False):
        """
        Check mount/read/write disk in VM.
        :param vm. VM guest.
        :param target. Disk dev in VM.
        :return: True if check successfully.
        """
        try:
            session = vm_obj.wait_for_login()
            new_parts = utils_disk.get_parts_list(session)
            added_parts = list(set(new_parts).difference(set(old_parts)))
            logging.info("Added parts:%s", added_parts)
            if len(added_parts) != 1:
                logging.error("The number of new partitions is invalid in VM")
                return False

            added_part = None
            if target.startswith("vd"):
                if added_parts[0].startswith("vd"):
                    added_part = added_parts[0]
            elif target.startswith("hd"):
                if added_parts[0].startswith("sd"):
                    added_part = added_parts[0]
            elif target.startswith("sd"):
                if added_parts[0].startswith("sd"):
                    added_part = added_parts[0]

            if not added_part:
                logging.error("Can't see added partition in VM")
                return False

            cmd = ("mount /dev/{0} /mnt && ls /mnt && (sleep 15;"
                   " touch /mnt/testfile; umount /mnt)".format(added_part))
            s, o = session.cmd_status_output(cmd, timeout=60)
            session.close()
            logging.info("Check disk operation in VM:\n, %s, %s", s, o)
            # Readonly fs, check the error messages.
            # The command may return True, read-only
            # messages can be found from the command output
            if read_only:
                if "Read-only file system" not in o:
                    return False
                else:
                    return True

            # Other errors
            if s != 0:
                return False
            return True

        except (remote.LoginError, virt_vm.VMError, aexpect.ShellError) as e:
            logging.error(str(e))
            return False

    def clean_up_volume_snapshots():
        """
        Get all snapshots for rbd_vol.img volume,unprotect and then clean up them.
        """
        cmd = ("rbd -m {0} {1} info {2}"
               "".format(mon_host, key_opt,
                         os.path.join(disk_src_pool, vol_name)))
        if process.run(cmd, ignore_status=True, shell=True).exit_status:
            return
        # Get snapshot list.
        cmd = ("rbd -m {0} {1} snap"
               " list {2}"
               "".format(mon_host, key_opt,
                         os.path.join(disk_src_pool, vol_name)))
        snaps_out = process.run(cmd, ignore_status=True,
                                shell=True).stdout_text
        snap_names = []
        if snaps_out:
            for line in snaps_out.rsplit("\n"):
                if line.startswith("SNAPID") or line == "":
                    continue
                snap_line = line.rsplit()
                if len(snap_line) == 4:
                    snap_names.append(snap_line[1])
            logging.debug("Find snapshots: %s", snap_names)
            # Unprotect snapshot first,otherwise it will fail to purge volume
            for snap_name in snap_names:
                cmd = ("rbd -m {0} {1} snap"
                       " unprotect {2}@{3}"
                       "".format(mon_host, key_opt,
                                 os.path.join(disk_src_pool, vol_name),
                                 snap_name))
                process.run(cmd, ignore_status=True, shell=True)
        # Purge volume,and then delete volume.
        cmd = ("rbd -m {0} {1} info {2} && rbd -m {0} {1} snap"
               " purge {2} && rbd -m {0} {1} rm {2}"
               "".format(mon_host, key_opt,
                         os.path.join(disk_src_pool, vol_name)))
        process.run(cmd, ignore_status=True, shell=True)

    def make_snapshot():
        """
        make external snapshots.

        :return external snapshot path list
        """
        logging.info("Making snapshot...")
        first_disk_source = vm.get_first_disk_devices()['source']
        snapshot_path_list = []
        snapshot2_file = os.path.join(TMP_DATA_DIR, "mem.s2")
        snapshot3_file = os.path.join(TMP_DATA_DIR, "mem.s3")
        snapshot4_file = os.path.join(TMP_DATA_DIR, "mem.s4")
        snapshot4_disk_file = os.path.join(TMP_DATA_DIR, "disk.s4")
        snapshot5_file = os.path.join(TMP_DATA_DIR, "mem.s5")
        snapshot5_disk_file = os.path.join(TMP_DATA_DIR, "disk.s5")

        # Attempt to take different types of snapshots.
        snapshots_param_dict = {
            "s1":
            "s1 --disk-only --no-metadata",
            "s2":
            "s2 --memspec %s --no-metadata" % snapshot2_file,
            "s3":
            "s3 --memspec %s --no-metadata --live" % snapshot3_file,
            "s4":
            "s4 --memspec %s --diskspec vda,file=%s --no-metadata" %
            (snapshot4_file, snapshot4_disk_file),
            "s5":
            "s5 --memspec %s --diskspec vda,file=%s --live --no-metadata" %
            (snapshot5_file, snapshot5_disk_file)
        }
        for snapshot_name in sorted(snapshots_param_dict.keys()):
            ret = virsh.snapshot_create_as(vm_name,
                                           snapshots_param_dict[snapshot_name],
                                           **virsh_dargs)
            libvirt.check_exit_status(ret)
            if snapshot_name != 's4' and snapshot_name != 's5':
                snapshot_path_list.append(
                    first_disk_source.replace('qcow2', snapshot_name))
        return snapshot_path_list

    def get_secret_list():
        """
        Get secret list.

        :return secret list
        """
        logging.info("Get secret list ...")
        secret_list_result = virsh.secret_list()
        secret_list = secret_list_result.stdout_text.strip().splitlines()
        # First two lines contain table header followed by entries
        # for each secret, such as:
        #
        # UUID                                  Usage
        # --------------------------------------------------------------------------------
        # b4e8f6d3-100c-4e71-9f91-069f89742273  ceph client.libvirt secret
        secret_list = secret_list[2:]
        result = []
        # If secret list is empty.
        if secret_list:
            for line in secret_list:
                # Split on whitespace, assume 1 column
                linesplit = line.split(None, 1)
                result.append(linesplit[0])
        return result

    mon_host = params.get("mon_host")
    disk_src_name = params.get("disk_source_name")
    disk_src_config = params.get("disk_source_config")
    disk_src_host = params.get("disk_source_host")
    disk_src_port = params.get("disk_source_port")
    disk_src_pool = params.get("disk_source_pool")
    disk_format = params.get("disk_format", "raw")
    driver_iothread = params.get("driver_iothread")
    snap_name = params.get("disk_snap_name")
    attach_device = "yes" == params.get("attach_device", "no")
    attach_disk = "yes" == params.get("attach_disk", "no")
    test_save_restore = "yes" == params.get("test_save_restore", "no")
    test_snapshot = "yes" == params.get("test_snapshot", "no")
    test_blockcopy = "yes" == params.get("test_blockcopy", "no")
    test_qemu_cmd = "yes" == params.get("test_qemu_cmd", "no")
    test_vm_parts = "yes" == params.get("test_vm_parts", "no")
    additional_guest = "yes" == params.get("additional_guest", "no")
    create_snapshot = "yes" == params.get("create_snapshot", "no")
    convert_image = "yes" == params.get("convert_image", "no")
    create_volume = "yes" == params.get("create_volume", "no")
    rbd_blockcopy = "yes" == params.get("rbd_blockcopy", "no")
    enable_slice = "yes" == params.get("enable_slice", "no")
    create_by_xml = "yes" == params.get("create_by_xml", "no")
    client_key = params.get("client_key")
    client_name = params.get("client_name")
    auth_key = params.get("auth_key")
    auth_user = params.get("auth_user")
    auth_type = params.get("auth_type")
    auth_usage = params.get("secret_usage")
    pool_name = params.get("pool_name")
    pool_type = params.get("pool_type")
    vol_name = params.get("vol_name")
    cloned_vol_name = params.get("cloned_volume", "cloned_test_volume")
    create_from_cloned_volume = params.get("create_from_cloned_volume",
                                           "create_from_cloned_test_volume")
    vol_cap = params.get("vol_cap")
    vol_cap_unit = params.get("vol_cap_unit")
    start_vm = "yes" == params.get("start_vm", "no")
    test_disk_readonly = "yes" == params.get("test_disk_readonly", "no")
    test_disk_internal_snapshot = "yes" == params.get(
        "test_disk_internal_snapshot", "no")
    test_disk_external_snapshot = "yes" == params.get(
        "test_disk_external_snapshot", "no")
    test_json_pseudo_protocol = "yes" == params.get("json_pseudo_protocol",
                                                    "no")
    disk_snapshot_with_sanlock = "yes" == params.get(
        "disk_internal_with_sanlock", "no")
    auth_place_in_source = params.get("auth_place_in_source")
    test_target_bus = "yes" == params.get("scsi_target_test", "no")

    # Prepare a blank params to confirm if delete the configure at the end of the test
    ceph_cfg = ""
    # Create config file if it doesn't exist
    ceph_cfg = ceph.create_config_file(mon_host)

    # After libvirt 3.9.0, auth element can be put into source part.
    if auth_place_in_source and not libvirt_version.version_compare(3, 9, 0):
        test.cancel(
            "place auth in source is not supported in current libvirt version")

    # After libvirt 6.0.0, blockcopy rbd backend feature is support.
    if rbd_blockcopy and not libvirt_version.version_compare(6, 0, 0):
        test.cancel(
            "blockcopy rbd backend is not supported in current libvirt version"
        )

    # Start vm and get all partitions in vm.
    if vm.is_dead():
        vm.start()
    session = vm.wait_for_login()
    old_parts = utils_disk.get_parts_list(session)
    session.close()
    vm.destroy(gracefully=False)
    if additional_guest:
        guest_name = "%s_%s" % (vm_name, '1')
        timeout = params.get("clone_timeout", 360)
        utils_libguestfs.virt_clone_cmd(vm_name,
                                        guest_name,
                                        True,
                                        timeout=timeout,
                                        ignore_status=False)
        additional_vm = vm.clone(guest_name)
        if start_vm:
            virsh.start(guest_name)

    # Back up xml file.
    vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    key_opt = ""
    secret_uuid = None
    snapshot_path = None
    key_file = os.path.join(TMP_DATA_DIR, "ceph.key")
    img_file = os.path.join(TMP_DATA_DIR, "%s_test.img" % vm_name)
    front_end_img_file = os.path.join(TMP_DATA_DIR,
                                      "%s_frontend_test.img" % vm_name)
    # Construct a unsupported error message list to skip these kind of tests
    unsupported_err = []
    if driver_iothread:
        unsupported_err.append('IOThreads not supported')
    if test_snapshot:
        unsupported_err.append('live disk snapshot not supported')
    if test_disk_readonly:
        if not libvirt_version.version_compare(5, 0, 0):
            unsupported_err.append('Could not create file: Permission denied')
            unsupported_err.append('Permission denied')
        else:
            unsupported_err.append(
                'unsupported configuration: external snapshot ' +
                'for readonly disk vdb is not supported')
    if test_disk_internal_snapshot:
        unsupported_err.append(
            'unsupported configuration: internal snapshot for disk ' +
            'vdb unsupported for storage type raw')
    if test_blockcopy:
        unsupported_err.append('block copy is not supported')
    if attach_disk:
        unsupported_err.append('No such file or directory')
    if create_volume:
        unsupported_err.append("backing 'volume' disks isn't yet supported")
        unsupported_err.append('this function is not supported')

    try:
        # Clean up dirty secrets in test environments if there have.
        dirty_secret_list = get_secret_list()
        if dirty_secret_list:
            for dirty_secret_uuid in dirty_secret_list:
                virsh.secret_undefine(dirty_secret_uuid)
        # Prepare test environment.
        qemu_config = LibvirtQemuConfig()

        if disk_snapshot_with_sanlock:
            # Install necessary package:sanlock,libvirt-lock-sanlock
            if not utils_package.package_install(["sanlock"]):
                test.error("fail to install sanlock")
            if not utils_package.package_install(["libvirt-lock-sanlock"]):
                test.error("fail to install libvirt-lock-sanlock")

            # Set virt_use_sanlock
            result = process.run("setsebool -P virt_use_sanlock 1", shell=True)
            if result.exit_status:
                test.error("Failed to set virt_use_sanlock value")

            # Update lock_manager in qemu.conf
            qemu_config.lock_manager = 'sanlock'

            # Update qemu-sanlock.conf.
            san_lock_config = LibvirtSanLockConfig()
            san_lock_config.user = '******'
            san_lock_config.group = 'sanlock'
            san_lock_config.host_id = 1
            san_lock_config.auto_disk_leases = True
            process.run("mkdir -p /var/lib/libvirt/sanlock", shell=True)
            san_lock_config.disk_lease_dir = "/var/lib/libvirt/sanlock"
            san_lock_config.require_lease_for_disks = False

            # Start sanlock service and restart libvirtd to enforce changes.
            result = process.run("systemctl start wdmd", shell=True)
            if result.exit_status:
                test.error("Failed to start wdmd service")
            result = process.run("systemctl start sanlock", shell=True)
            if result.exit_status:
                test.error("Failed to start sanlock service")
            utils_libvirtd.Libvirtd().restart()

            # Prepare lockspace and lease file for sanlock in order.
            sanlock_cmd_dict = OrderedDict()
            sanlock_cmd_dict[
                "truncate -s 1M /var/lib/libvirt/sanlock/TEST_LS"] = "Failed to truncate TEST_LS"
            sanlock_cmd_dict[
                "sanlock direct init -s TEST_LS:0:/var/lib/libvirt/sanlock/TEST_LS:0"] = "Failed to sanlock direct init TEST_LS:0"
            sanlock_cmd_dict[
                "chown sanlock:sanlock /var/lib/libvirt/sanlock/TEST_LS"] = "Failed to chown sanlock TEST_LS"
            sanlock_cmd_dict[
                "restorecon -R -v /var/lib/libvirt/sanlock"] = "Failed to restorecon sanlock"
            sanlock_cmd_dict[
                "truncate -s 1M /var/lib/libvirt/sanlock/test-disk-resource-lock"] = "Failed to truncate test-disk-resource-lock"
            sanlock_cmd_dict[
                "sanlock direct init -r TEST_LS:test-disk-resource-lock:" +
                "/var/lib/libvirt/sanlock/test-disk-resource-lock:0"] = "Failed to sanlock direct init test-disk-resource-lock"
            sanlock_cmd_dict[
                "chown sanlock:sanlock " +
                "/var/lib/libvirt/sanlock/test-disk-resource-lock"] = "Failed to chown test-disk-resource-loc"
            sanlock_cmd_dict[
                "sanlock client add_lockspace -s TEST_LS:1:" +
                "/var/lib/libvirt/sanlock/TEST_LS:0"] = "Failed to client add_lockspace -s TEST_LS:0"
            for sanlock_cmd in sanlock_cmd_dict.keys():
                result = process.run(sanlock_cmd, shell=True)
                if result.exit_status:
                    test.error(sanlock_cmd_dict[sanlock_cmd])

            # Create one lease device and add it to VM.
            san_lock_vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
            lease_device = Lease()
            lease_device.lockspace = 'TEST_LS'
            lease_device.key = 'test-disk-resource-lock'
            lease_device.target = {
                'path': '/var/lib/libvirt/sanlock/test-disk-resource-lock'
            }
            san_lock_vmxml.add_device(lease_device)
            san_lock_vmxml.sync()

        # Install ceph-common package which include rbd command
        if utils_package.package_install(["ceph-common"]):
            if client_name and client_key:
                with open(key_file, 'w') as f:
                    f.write("[%s]\n\tkey = %s\n" % (client_name, client_key))
                key_opt = "--keyring %s" % key_file

                # Create secret xml
                sec_xml = secret_xml.SecretXML("no", "no")
                sec_xml.usage = auth_type
                sec_xml.usage_name = auth_usage
                sec_xml.xmltreefile.write()

                logging.debug("Secret xml: %s", sec_xml)
                ret = virsh.secret_define(sec_xml.xml)
                libvirt.check_exit_status(ret)

                secret_uuid = re.findall(r".+\S+(\ +\S+)\ +.+\S+",
                                         ret.stdout.strip())[0].lstrip()
                logging.debug("Secret uuid %s", secret_uuid)
                if secret_uuid is None:
                    test.error("Failed to get secret uuid")

                # Set secret value
                auth_key = params.get("auth_key")
                ret = virsh.secret_set_value(secret_uuid, auth_key,
                                             **virsh_dargs)
                libvirt.check_exit_status(ret)

            # Delete the disk if it exists
            cmd = ("rbd -m {0} {1} info {2} && rbd -m {0} {1} rm "
                   "{2}".format(mon_host, key_opt, disk_src_name))
            process.run(cmd, ignore_status=True, shell=True)
        else:
            test.error("Failed to install ceph-common")

        if disk_src_config:
            config_ceph()
        disk_path = ("rbd:%s:mon_host=%s" % (disk_src_name, mon_host))
        if auth_user and auth_key:
            disk_path += (":id=%s:key=%s" % (auth_user, auth_key))
        targetdev = params.get("disk_target", "vdb")
        targetbus = params.get("disk_target_bus", "virtio")
        if test_target_bus:
            targetdev = params.get("disk_target", "sdb")
            targetbus = params.get("disk_target_bus", "scsi")
            # Add virtio-scsi controller for sd* test
            controller_vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
            conduct = {'controller_type': targetbus}
            ctrl = libvirt.create_controller_xml(conduct)
            libvirt.add_controller(vm.name, ctrl)
            logging.debug("Controller XML is:%s", ctrl)
        # To be compatible with create_disk_xml function,
        # some parameters need to be updated.
        params.update({
            "type_name": params.get("disk_type", "network"),
            "target_bus": targetbus,
            "target_dev": targetdev,
            "secret_uuid": secret_uuid,
            "source_protocol": params.get("disk_source_protocol"),
            "source_name": disk_src_name,
            "source_host_name": disk_src_host,
            "source_host_port": disk_src_port
        })
        # Prepare disk image
        if convert_image:
            first_disk = vm.get_first_disk_devices()
            blk_source = first_disk['source']
            # Convert the image to remote storage
            disk_cmd = ("rbd -m %s %s info %s 2> /dev/null|| qemu-img convert"
                        " -O %s %s %s" % (mon_host, key_opt, disk_src_name,
                                          disk_format, blk_source, disk_path))
            process.run(disk_cmd, ignore_status=False, shell=True)

        elif create_volume:
            vol_params = {
                "name": vol_name,
                "capacity": int(vol_cap),
                "capacity_unit": vol_cap_unit,
                "format": disk_format
            }

            create_pool()
            create_vol(vol_params)
            check_vol(vol_params)
        elif rbd_blockcopy:
            # Create one disk to attach to VM as second disk device
            second_disk_params = {}
            disk_size = params.get("virt_disk_device_size", "50M")
            device_source = libvirt.create_local_disk("file",
                                                      img_file,
                                                      disk_size,
                                                      disk_format="qcow2")
            second_disk_params.update({"source_file": device_source})
            second_disk_params.update({"driver_type": "qcow2"})
            second_xml_file = libvirt.create_disk_xml(second_disk_params)
            opts = params.get("attach_option", "--config")
            ret = virsh.attach_device(vm_name,
                                      second_xml_file,
                                      flagstr=opts,
                                      debug=True)
            libvirt.check_result(ret)
        else:
            # Create an local image and make FS on it.
            disk_cmd = ("qemu-img create -f %s %s 10M && mkfs.ext4 -F %s" %
                        (disk_format, img_file, img_file))
            process.run(disk_cmd, ignore_status=False, shell=True)
            # Convert the image to remote storage
            disk_cmd = (
                "rbd -m %s %s info %s 2> /dev/null|| qemu-img convert -O"
                " %s %s %s" % (mon_host, key_opt, disk_src_name, disk_format,
                               img_file, disk_path))
            process.run(disk_cmd, ignore_status=False, shell=True)
            # Create disk snapshot if needed.
            if create_snapshot:
                snap_cmd = ("rbd -m %s %s snap create %s@%s" %
                            (mon_host, key_opt, disk_src_name, snap_name))
                process.run(snap_cmd, ignore_status=False, shell=True)
            if test_json_pseudo_protocol:
                # After block-dev introduced, qemu-img: warning: RBD options encoded in the filename as keyvalue pairs is deprecated
                if libvirt_version.version_compare(6, 0, 0):
                    test.cancel(
                        "qemu-img: warning: RBD options encoded in the filename as keyvalue pairs in json format is deprecated"
                    )
                # Create one frontend image with the rbd backing file.
                json_str = ('json:{"file.driver":"rbd",'
                            '"file.filename":"rbd:%s:mon_host=%s"}' %
                            (disk_src_name, mon_host))
                # pass different json string according to the auth config
                if auth_user and auth_key:
                    json_str = ('%s:id=%s:key=%s"}' %
                                (json_str[:-2], auth_user, auth_key))
                disk_cmd = ("qemu-img create -f qcow2 -b '%s' %s" %
                            (json_str, front_end_img_file))
                disk_path = front_end_img_file
                process.run(disk_cmd, ignore_status=False, shell=True)
        # If hot plug, start VM first, and then wait the OS boot.
        # Otherwise stop VM if running.
        if start_vm:
            if vm.is_dead():
                vm.start()
            vm.wait_for_login().close()
        else:
            if not vm.is_dead():
                vm.destroy()
        if attach_device:
            if create_volume:
                params.update({"source_pool": pool_name})
                params.update({"type_name": "volume"})
                # No need auth options for volume
                if "auth_user" in params:
                    params.pop("auth_user")
                if "auth_type" in params:
                    params.pop("auth_type")
                if "secret_type" in params:
                    params.pop("secret_type")
                if "secret_uuid" in params:
                    params.pop("secret_uuid")
                if "secret_usage" in params:
                    params.pop("secret_usage")
            # After 3.9.0,the auth element can be place in source part.
            if auth_place_in_source:
                params.update({"auth_in_source": auth_place_in_source})
            xml_file = libvirt.create_disk_xml(params)
            if additional_guest:
                # Copy xml_file for additional guest VM.
                shutil.copyfile(xml_file, additional_xml_file)
            opts = params.get("attach_option", "")
            ret = virsh.attach_device(vm_name,
                                      xml_file,
                                      flagstr=opts,
                                      debug=True)
            libvirt.check_result(ret, skip_if=unsupported_err)
            if additional_guest:
                # Make sure the additional VM is running
                if additional_vm.is_dead():
                    additional_vm.start()
                    additional_vm.wait_for_login().close()
                ret = virsh.attach_device(guest_name,
                                          additional_xml_file,
                                          "",
                                          debug=True)
                libvirt.check_result(ret, skip_if=unsupported_err)
        elif attach_disk:
            opts = params.get("attach_option", "")
            ret = virsh.attach_disk(vm_name, disk_path, targetdev, opts)
            libvirt.check_result(ret, skip_if=unsupported_err)
        elif test_disk_readonly:
            params.update({'readonly': "yes"})
            xml_file = libvirt.create_disk_xml(params)
            opts = params.get("attach_option", "")
            ret = virsh.attach_device(vm_name,
                                      xml_file,
                                      flagstr=opts,
                                      debug=True)
            libvirt.check_result(ret, skip_if=unsupported_err)
        elif test_disk_internal_snapshot:
            xml_file = libvirt.create_disk_xml(params)
            opts = params.get("attach_option", "")
            ret = virsh.attach_device(vm_name,
                                      xml_file,
                                      flagstr=opts,
                                      debug=True)
            libvirt.check_result(ret, skip_if=unsupported_err)
        elif disk_snapshot_with_sanlock:
            if vm.is_dead():
                vm.start()
            snapshot_path = make_snapshot()
            if vm.is_alive():
                vm.destroy()
        elif rbd_blockcopy:
            if enable_slice:
                disk_cmd = ("rbd -m %s %s create %s --size 400M 2> /dev/null" %
                            (mon_host, key_opt, disk_src_name))
                process.run(disk_cmd, ignore_status=False, shell=True)
                slice_dict = {
                    "slice_type": "storage",
                    "slice_offset": "12345",
                    "slice_size": "52428800"
                }
                params.update({"disk_slice": slice_dict})
                logging.debug(
                    'create one volume on ceph backend storage for slice testing'
                )
            # Create one file on VM before doing blockcopy
            try:
                session = vm.wait_for_login()
                cmd = (
                    "mkfs.ext4 -F /dev/{0} && mount /dev/{0} /mnt && ls /mnt && (sleep 3;"
                    " touch /mnt/rbd_blockcopyfile; umount /mnt)".format(
                        targetdev))
                s, o = session.cmd_status_output(cmd, timeout=60)
                session.close()
                logging.info(
                    "touch one file in new added disk in VM:\n, %s, %s", s, o)
            except (remote.LoginError, virt_vm.VMError,
                    aexpect.ShellError) as e:
                logging.error(str(e))
            # Create rbd backend xml
            rbd_blockcopy_xml_file = libvirt.create_disk_xml(params)
            logging.debug("The rbd blockcopy xml is: %s" %
                          rbd_blockcopy_xml_file)
            dest_path = " --xml %s" % rbd_blockcopy_xml_file
            options1 = params.get("rbd_pivot_option",
                                  " --wait --verbose --transient-job --pivot")
            extra_dict = {'debug': True}
            cmd_result = virsh.blockcopy(vm_name, targetdev, dest_path,
                                         options1, **extra_dict)
            libvirt.check_exit_status(cmd_result)
        elif not create_volume:
            libvirt.set_vm_disk(vm, params)
        if test_blockcopy:
            logging.info("Creating %s...", vm_name)
            vmxml_for_test = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
            if vm.is_alive():
                vm.destroy(gracefully=False)
            vm.undefine()
            if virsh.create(vmxml_for_test.xml, **virsh_dargs).exit_status:
                vmxml_backup.define()
                test.fail("Can't create the domain")
        elif vm.is_dead():
            vm.start()
        # Wait for vm is running
        vm.wait_for_login(timeout=600).close()
        if additional_guest:
            if additional_vm.is_dead():
                additional_vm.start()

        # After block-dev introduced in libvirt 6.0.0 afterwards, file=rbd:* format information is not provided from qemu output
        if libvirt_version.version_compare(6, 0, 0):
            test_qemu_cmd = False

        # Check qemu command line
        if test_qemu_cmd:
            check_qemu_cmd()
        # Check partitions in vm
        if test_vm_parts:
            if not check_in_vm(
                    vm, targetdev, old_parts, read_only=create_snapshot):
                test.fail("Failed to check vm partitions")
            if additional_guest:
                if not check_in_vm(additional_vm, targetdev, old_parts):
                    test.fail("Failed to check vm partitions")
        # Save and restore operation
        if test_save_restore:
            check_save_restore()
        if test_snapshot:
            snap_option = params.get("snapshot_option", "")
            check_snapshot(snap_option)
        if test_blockcopy:
            check_blockcopy(targetdev)
        if test_disk_readonly and not libvirt_version.version_compare(6, 0, 0):
            snap_option = params.get("snapshot_option", "")
            check_snapshot(snap_option, 'vdb')
        if test_disk_internal_snapshot:
            snap_option = params.get("snapshot_option", "")
            check_snapshot(snap_option, targetdev)
        if test_disk_external_snapshot:
            snap_option = params.get("snapshot_option", "")
            check_snapshot(snap_option, targetdev)
        # Check rbd blockcopy inside VM
        if rbd_blockcopy:
            try:
                session = vm.wait_for_login()
                cmd = (
                    "mount /dev/{0} /mnt && ls /mnt/rbd_blockcopyfile && (sleep 3;"
                    " umount /mnt)".format(targetdev))
                s, o = session.cmd_status_output(cmd, timeout=60)
                session.close()
                logging.info(
                    "list one file in new rbd backend disk in VM:\n, %s, %s",
                    s, o)
            except (remote.LoginError, virt_vm.VMError,
                    aexpect.ShellError) as e:
                logging.error(str(e))
            debug_vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)

            def _check_slice_in_xml():
                """
                Check slice attribute in disk xml.
                """
                debug_vmxml = virsh.dumpxml(vm_name, "",
                                            debug=True).stdout.strip()
                if 'slices' in debug_vmxml:
                    return True
                else:
                    return False

            if enable_slice:
                if not _check_slice_in_xml():
                    test.fail("Failed to find slice attribute in VM xml")
        # Detach the device.
        if attach_device:
            xml_file = libvirt.create_disk_xml(params)
            ret = virsh.detach_device(vm_name, xml_file, wait_for_event=True)
            libvirt.check_exit_status(ret)
            if additional_guest:
                ret = virsh.detach_device(guest_name,
                                          xml_file,
                                          wait_for_event=True)
                libvirt.check_exit_status(ret)
        elif attach_disk:
            ret = virsh.detach_disk(vm_name, targetdev, wait_for_event=True)
            libvirt.check_exit_status(ret)

        # Check disk in vm after detachment.
        if attach_device or attach_disk:
            session = vm.wait_for_login()
            new_parts = utils_disk.get_parts_list(session)
            if len(new_parts) != len(old_parts):
                test.fail("Disk still exists in vm" " after detachment")
            session.close()

    except virt_vm.VMStartError as details:
        for msg in unsupported_err:
            if msg in str(details):
                test.cancel(str(details))
        else:
            test.fail("VM failed to start." "Error: %s" % str(details))
    finally:
        # Remove ceph configure file if created.
        if ceph_cfg:
            os.remove(ceph_cfg)
        # Delete snapshots.
        snapshot_lists = virsh.snapshot_list(vm_name)
        if len(snapshot_lists) > 0:
            libvirt.clean_up_snapshots(vm_name, snapshot_lists)
            for snap in snapshot_lists:
                virsh.snapshot_delete(vm_name, snap, "--metadata")

        # Recover VM.
        if vm.is_alive():
            vm.destroy(gracefully=False)
        if additional_guest:
            virsh.remove_domain(guest_name,
                                "--remove-all-storage",
                                ignore_stauts=True)
        # Remove the snapshot.
        if create_snapshot:
            cmd = ("rbd -m {0} {1} info {2} && rbd -m {0} {1} snap"
                   " purge {2} && rbd -m {0} {1} rm {2}"
                   "".format(mon_host, key_opt, disk_src_name))
            process.run(cmd, ignore_status=True, shell=True)
        elif create_volume:
            cmd = ("rbd -m {0} {1} info {2} && rbd -m {0} {1} rm {2}"
                   "".format(mon_host, key_opt,
                             os.path.join(disk_src_pool, cloned_vol_name)))
            process.run(cmd, ignore_status=True, shell=True)
            cmd = ("rbd -m {0} {1} info {2} && rbd -m {0} {1} rm {2}"
                   "".format(
                       mon_host, key_opt,
                       os.path.join(disk_src_pool, create_from_cloned_volume)))
            process.run(cmd, ignore_status=True, shell=True)
            clean_up_volume_snapshots()
        else:
            cmd = ("rbd -m {0} {1} info {2} && rbd -m {0} {1} rm {2}"
                   "".format(mon_host, key_opt, disk_src_name))
            process.run(cmd, ignore_status=True, shell=True)

        # Delete tmp files.
        if os.path.exists(key_file):
            os.remove(key_file)
        if os.path.exists(img_file):
            os.remove(img_file)
        # Clean up volume, pool
        if vol_name and vol_name in str(virsh.vol_list(pool_name).stdout):
            virsh.vol_delete(vol_name, pool_name)
        if pool_name and pool_name in virsh.pool_state_dict():
            virsh.pool_destroy(pool_name, **virsh_dargs)
            virsh.pool_undefine(pool_name, **virsh_dargs)

        # Clean up secret
        secret_list = get_secret_list()
        if secret_list:
            for secret_uuid in secret_list:
                virsh.secret_undefine(secret_uuid)

        logging.info("Restoring vm...")
        vmxml_backup.sync()

        if disk_snapshot_with_sanlock:
            # Restore virt_use_sanlock setting.
            process.run("setsebool -P virt_use_sanlock 0", shell=True)
            # Restore qemu config
            qemu_config.restore()
            utils_libvirtd.Libvirtd().restart()
            # Force shutdown sanlock service.
            process.run("sanlock client shutdown -f 1", shell=True)
            # Clean up lockspace folder
            process.run("rm -rf  /var/lib/libvirt/sanlock/*", shell=True)
            if snapshot_path is not None:
                for snapshot in snapshot_path:
                    if os.path.exists(snapshot):
                        os.remove(snapshot)
Exemplo n.º 11
0
def run(test, params, env):
    """
    Test virtio/virtio-transitional/virtio-non-transitional model of rng

    :param test: Test object
    :param params: Dictionary with the test parameters
    :param env: Dictionary with test environment
    """

    def get_free_pci_slot():
        """
        Get a free slot from pcie-to-pci-bridge

        :return: The free slot
        """
        used_slot = []
        for dev in pci_devices:
            address = dev.find('address')
            if (address is not None and
                    address.get('bus') == pci_bridge_index):
                used_slot.append(address.get('slot'))
        for slot_index in range(1, 30):
            slot = "%0#4x" % slot_index
            if slot not in used_slot:
                return slot
        return None

    def get_free_root_port():
        """
        Get a free root port for rng device

        :return: The bus index of free root port
        """
        root_ports = set()
        other_ports = set()
        used_slot = set()
        # Record the bus indexes for all pci controllers
        for controller in pci_controllers:
            if controller.get('model') == 'pcie-root-port':
                root_ports.add(controller.get('index'))
            else:
                other_ports.add(controller.get('index'))
        # Record the addresses being allocated for all pci devices
        pci_devices = vmxml.xmltreefile.find('devices').getchildren()
        for dev in pci_devices:
            address = dev.find('address')
            if address is not None:
                used_slot.add(address.get('bus'))
        # Find the bus address unused
        for bus_index in root_ports:
            bus = "%0#4x" % int(bus_index)
            if bus not in used_slot:
                return bus
        # Add a new pcie-root-port if no free one
        for index in range(1, 30):
            if index not in (root_ports | other_ports):
                contr_dict = {'controller_type': 'pci',
                              'controller_index': index,
                              'controller_model': 'pcie-root-port'}
                cntl_add = libvirt.create_controller_xml(contr_dict)
                libvirt.add_controller(vm_name, cntl_add)
                return "%0#4x" % int(index)
        return None

    def check_plug_to(bus_type='pcie-to-pci-bridge'):
        """
        Check if the nic is plugged onto pcie-to-pci-bridge

        :param bus_type:  The bus type been expected to plug to
        :return True if plugged onto 'bus_type', otherwise False
        """
        vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
        rng = vmxml.xmltreefile.find('devices').find('rng')
        bus = int(eval(rng.find('address').get('bus')))
        controllers = vmxml.get_controllers('pci')
        for controller in controllers:
            if controller.get('index') == bus:
                if controller.get('model') == bus_type:
                    return True
                break
        return False

    def check_rng_inside_guest():
        """
        check rng device inside guest
        """
        check_cmd = params['check_cmd']
        lspci_output = session.cmd_output(check_cmd)
        session.cmd_output('pkill -9 hexdump')
        if 'No such file or directory' in lspci_output and device_exists:
            test.fail('Can not detect device by %s.' % check_cmd)

    vm_name = params.get("main_vm", "avocado-vt-vm1")
    vm = env.get_vm(params["main_vm"])
    vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    backup_xml = vmxml.copy()
    guest_src_url = params.get("guest_src_url")
    virtio_model = params['virtio_model']
    boot_with_rng = (params.get('boot_with_rng', 'yes') == 'yes')
    hotplug = (params.get('hotplug', 'no') == 'yes')
    device_exists = (params.get('device_exists', 'yes') == 'yes')
    plug_to = params.get('plug_to', '')

    if not libvirt_version.version_compare(5, 0, 0):
        test.cancel("This libvirt version doesn't support "
                    "virtio-transitional model.")

    # Download and update image if required
    if guest_src_url:
        image_name = params['image_path']
        target_path = utils_misc.get_path(data_dir.get_data_dir(), image_name)
        if not os.path.exists(target_path):
            download.get_file(guest_src_url, target_path)
        params["blk_source_name"] = target_path

    # Add 'pcie-to-pci-bridge' if there is no one
    pci_controllers = vmxml.get_controllers('pci')
    for controller in pci_controllers:
        if controller.get('model') == 'pcie-to-pci-bridge':
            pci_bridge = controller
            break
    else:
        contr_dict = {'controller_type': 'pci',
                      'controller_model': 'pcie-to-pci-bridge'}
        pci_bridge = libvirt.create_controller_xml(contr_dict)
        libvirt.add_controller(vm_name, pci_bridge)
    pci_bridge_index = '%0#4x' % int(pci_bridge.get("index"))

    try:
        # Update nic and vm disks
        if (params["os_variant"] == 'rhel6' or
                'rhel6' in params.get("shortname")):
            iface_params = {'model': 'virtio-transitional'}
            libvirt.modify_vm_iface(vm_name, "update_iface", iface_params)
        libvirt.set_vm_disk(vm, params)
        # vmxml will not be updated since set_vm_disk
        # sync with another dumped xml inside the function
        vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)

        # Remove existed rng devices if there are
        rng_devs = vmxml.get_devices('rng')
        for rng in rng_devs:
            vmxml.del_device(rng)
        vmxml.xmltreefile.write()
        vmxml.sync()

        # General new rng xml per configurations
        rng_xml = libvirt.create_rng_xml({"rng_model": virtio_model})
        if params.get('specify_addr', 'no') == 'yes':
            pci_devices = vmxml.xmltreefile.find('devices').getchildren()
            addr = rng_xml.new_rng_address()
            if plug_to == 'pcie-root-port':
                bus = get_free_root_port()
                addr.set_attrs({'bus': bus})
            else:
                slot = get_free_pci_slot()
                addr.set_attrs({'bus': pci_bridge_index, 'slot': slot})
            rng_xml.address = addr
        if boot_with_rng:  # Add to vm if required
            libvirt.add_vm_device(vmxml, rng_xml)
        if not vm.is_alive():
            vm.start()
        if hotplug:  # Hotplug rng if required
            file_arg = rng_xml.xml
            with open(file_arg) as rng_file:
                logging.debug("Attach rng by XML: %s", rng_file.read())
            s_attach = virsh.attach_device(vm_name, file_arg, debug=True)
            libvirt.check_exit_status(s_attach)
            check_plug_to(plug_to)
        session = vm.wait_for_login()
        check_rng_inside_guest()
        if hotplug:  # Unplug rng if hotplugged previously
            vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
            rng = vmxml.get_devices('rng')[0]
            file_arg = rng.xml
            with open(file_arg) as rng_file:
                logging.debug("Detach rng by XML: %s", rng_file.read())
            s_detach = virsh.detach_device(vm_name, file_arg, debug=True)
            libvirt.check_exit_status(s_detach)
        if not hotplug:
            session.close()
            save_path = os.path.join(
                data_dir.get_tmp_dir(), '%s.save' % params['os_variant'])
            ret = virsh.save(vm_name, save_path)
            libvirt.check_exit_status(ret)
            ret = virsh.restore(save_path)
            libvirt.check_exit_status(ret)
            session = vm.wait_for_login()
            check_rng_inside_guest()
            process.run('rm -f %s' % save_path, ignore_status=True)
    finally:
        vm.destroy()
        backup_xml.sync()
Exemplo n.º 12
0
def run(test, params, env):
    """
    Test virsh {at|de}tach-disk command.

    The command can attach new disk/detach disk.
    1.Prepare test environment,destroy or suspend a VM.
    2.Perform virsh attach/detach-disk operation.
    3.Recover test environment.
    4.Confirm the test result.
    """
    def check_info_in_audit_log_file(test_cmd, device_source):
        """
        Check if information can be found in audit.log.

        :params test_cmd: test command
        :params device_source: device source
        """
        grep_audit = ('grep "%s" /var/log/audit/audit.log'
                      % test_cmd.split("-")[0])
        cmd = (grep_audit + ' | ' + 'grep "%s" | tail -n1 | grep "res=success"'
               % device_source)
        return process.run(cmd, ignore_status=True, shell=True).exit_status == 0

    def check_vm_partition(vm, device, os_type, target_name, old_parts):
        """
        Check VM disk's partition.

        :param vm. VM guest.
        :param os_type. VM's operation system type.
        :param target_name. Device target type.
        :return: True if check successfully.
        """
        logging.info("Checking VM partittion...")
        if vm.is_dead():
            vm.start()
        try:
            attached = False
            if os_type == "linux":
                session = vm.wait_for_login()
                new_parts = utils_disk.get_parts_list(session)
                added_parts = list(set(new_parts).difference(set(old_parts)))
                logging.debug("Added parts: %s" % added_parts)
                for i in range(len(added_parts)):
                    if device == "disk":
                        if target_name.startswith("vd"):
                            if added_parts[i].startswith("vd"):
                                attached = True
                        elif target_name.startswith("hd") or target_name.startswith("sd"):
                            if added_parts[i].startswith("sd"):
                                attached = True
                    elif device == "cdrom":
                        if added_parts[i].startswith("sr"):
                            attached = True
                session.close()
            return attached
        except (remote.LoginError, virt_vm.VMError, aexpect.ShellError) as e:
            logging.error(str(e))
            return False

    def acpiphp_module_modprobe(vm, os_type):
        """
        Add acpiphp module if VM's os type is rhle5.*

        :param vm. VM guest.
        :param os_type. VM's operation system type.
        :return: True if operate successfully.
        """
        if vm.is_dead():
            vm.start()
        try:
            if os_type == "linux":
                session = vm.wait_for_login()
                s_rpm, _ = session.cmd_status_output(
                    "rpm --version")
                # If status is different from 0, this
                # guest OS doesn't support the rpm package
                # manager
                if s_rpm:
                    session.close()
                    return True
                _, o_vd = session.cmd_status_output(
                    "rpm -qa | grep redhat-release")
                if o_vd.find("5Server") != -1:
                    s_mod, o_mod = session.cmd_status_output(
                        "modprobe acpiphp")
                    del o_mod
                    if s_mod != 0:
                        session.close()
                        return False
                session.close()
            return True
        except (remote.LoginError, virt_vm.VMError, aexpect.ShellError) as e:
            logging.error(str(e))
            return False

    def check_shareable(at_with_shareable, test_twice):
        """
        check if current libvirt version support shareable option

        at_with_shareable: True or False. Whether attach disk with shareable option
        test_twice: True or False. Whether perform operations twice
        return: True or cancel the test
        """
        if at_with_shareable or test_twice:
            if libvirt_version.version_compare(3, 9, 0):
                return True
            else:
                test.cancel("Current libvirt version doesn't support shareable feature")

    # Get test command.
    test_cmd = params.get("at_dt_disk_test_cmd", "attach-disk")

    vm_ref = params.get("at_dt_disk_vm_ref", "name")
    at_options = params.get("at_dt_disk_at_options", "")
    dt_options = params.get("at_dt_disk_dt_options", "")
    at_with_shareable = "yes" == params.get("at_with_shareable", 'no')
    pre_vm_state = params.get("at_dt_disk_pre_vm_state", "running")
    status_error = "yes" == params.get("status_error", 'no')
    no_attach = params.get("at_dt_disk_no_attach", 'no')
    os_type = params.get("os_type", "linux")
    qemu_file_lock = params.get("qemu_file_lock", "")
    if qemu_file_lock:
        if utils_misc.compare_qemu_version(2, 9, 0):
            logging.info('From qemu-kvm-rhev 2.9.0:'
                         'QEMU image locking, which should prevent multiple '
                         'runs of QEMU or qemu-img when a VM is running.')
            if test_cmd == "detach-disk" or pre_vm_state == "shut off":
                test.cancel('This case is not supported.')
            else:
                logging.info('The expect result is failure as opposed with succeed')
                status_error = True

    # Disk specific attributes.
    device = params.get("at_dt_disk_device", "disk")
    device_source_name = params.get("at_dt_disk_device_source", "attach.img")
    device_source_format = params.get("at_dt_disk_device_source_format", "raw")
    device_target = params.get("at_dt_disk_device_target", "vdd")
    device_disk_bus = params.get("at_dt_disk_bus_type", "virtio")
    source_path = "yes" == params.get("at_dt_disk_device_source_path", "yes")
    create_img = "yes" == params.get("at_dt_disk_create_image", "yes")
    test_twice = "yes" == params.get("at_dt_disk_test_twice", "no")
    test_systemlink_twice = "yes" == params.get("at_dt_disk_twice_with_systemlink", "no")
    test_type = "yes" == params.get("at_dt_disk_check_type", "no")
    test_audit = "yes" == params.get("at_dt_disk_check_audit", "no")
    test_block_dev = "yes" == params.get("at_dt_disk_iscsi_device", "no")
    test_logcial_dev = "yes" == params.get("at_dt_disk_logical_device", "no")
    restart_libvirtd = "yes" == params.get("at_dt_disk_restart_libvirtd", "no")
    detach_disk_with_print_xml = "yes" == params.get("detach_disk_with_print_xml", "no")
    vg_name = params.get("at_dt_disk_vg", "vg_test_0")
    lv_name = params.get("at_dt_disk_lv", "lv_test_0")
    # Get additional lvm item names.
    additional_lv_names = params.get("at_dt_disk_additional_lvs", "").split()
    serial = params.get("at_dt_disk_serial", "")
    address = params.get("at_dt_disk_address", "")
    address2 = params.get("at_dt_disk_address2", "")
    cache_options = params.get("cache_options", "")
    time_sleep = params.get("time_sleep", 3)
    # Define one empty list to locate those lvm.
    total_lvm_names = []
    if check_shareable(at_with_shareable, test_twice):
        at_options += " --mode shareable"
    if serial:
        at_options += (" --serial %s" % serial)
    if address2:
        at_options_twice = at_options + (" --address %s" % address2)
    if address:
        at_options += (" --address %s" % address)
    if cache_options:
        if cache_options.count("directsync"):
            if not libvirt_version.version_compare(1, 0, 0):
                test.cancel("'directsync' cache option doesn't "
                            "support in current libvirt version.")
        at_options += (" --cache %s" % cache_options)

    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)

    # Start vm and get all partions in vm.
    if vm.is_dead():
        vm.start()
    session = vm.wait_for_login()
    old_parts = utils_disk.get_parts_list(session)
    session.close()
    vm.destroy(gracefully=False)

    # Back up xml file.
    backup_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    vm_dump_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)

    # Create virtual device file.
    device_source_path = os.path.join(data_dir.get_tmp_dir(), device_source_name)
    if test_block_dev:
        device_source = libvirt.setup_or_cleanup_iscsi(True)
        if not device_source:
            # We should skip this case
            test.cancel("Can not get iscsi device name in host")
        if test_logcial_dev:
            if lv_utils.vg_check(vg_name):
                lv_utils.vg_remove(vg_name)
            lv_utils.vg_create(vg_name, device_source)
            device_source = libvirt.create_local_disk("lvm",
                                                      size="10M",
                                                      vgname=vg_name,
                                                      lvname=lv_name)
            logging.debug("New created volume: %s", lv_name)
            total_lvm_names.append(device_source)
            if test_systemlink_twice:
                for lvm__item_name in additional_lv_names:
                    additional_device_source = libvirt.create_local_disk(
                        "lvm", size="10M", vgname=vg_name, lvname=lvm__item_name)
                    logging.debug("New created volume: %s", additional_device_source)
                    total_lvm_names.append(additional_device_source)
    else:
        if source_path and create_img:
            device_source = libvirt.create_local_disk(
                "file", path=device_source_path,
                size="1G", disk_format=device_source_format)
        else:
            device_source = device_source_name

    # if we are testing audit, we need to start audit servcie first.
    if test_audit:
        auditd_service = Factory.create_service("auditd")
        if not auditd_service.status():
            auditd_service.start()
        logging.info("Auditd service status: %s" % auditd_service.status())

    # If we are testing cdrom device, we need to detach hdc in VM first.
    if device == "cdrom":
        if vm.is_alive():
            vm.destroy(gracefully=False)
        s_detach = virsh.detach_disk(vm_name, device_target, "--config")
        if not s_detach:
            logging.error("Detach hdc failed before test.")

    # If we are testing detach-disk, we need to attach certain device first.
    if test_cmd == "detach-disk" and no_attach != "yes":
        s_at_options = "--driver qemu --config"
        #Since lock feature is introduced in libvirt 3.9.0 afterwards, disk shareable options
        #need be set if disk needs be attached multitimes
        if check_shareable(at_with_shareable, test_twice):
            s_at_options += " --mode shareable"

        s_attach = virsh.attach_disk(vm_name, device_source, device_target,
                                     s_at_options, debug=True).exit_status
        if s_attach != 0:
            logging.error("Attaching device failed before testing detach-disk")
        else:
            logging.debug("Attaching device succeeded before testing detach-disk")
        if test_twice:
            device_target2 = params.get("at_dt_disk_device_target2",
                                        device_target)
            device_source = libvirt.create_local_disk(
                "file", path=device_source_path,
                size="1", disk_format=device_source_format)
            vm_dump_xml.remove_all_device_by_type('controller')
            machine_list = vm_dump_xml.os.machine.split("-")
            vm_dump_xml.set_os_attrs(**{"machine": machine_list[0] + "-q35-" + machine_list[2]})
            q35_pcie_dict0 = {'controller_model': 'pcie-root', 'controller_type': 'pci', 'controller_index': 0}
            q35_pcie_dict1 = {'controller_model': 'pcie-root-port', 'controller_type': 'pci'}
            vm_dump_xml.add_device(libvirt.create_controller_xml(q35_pcie_dict0))
            # Add enough controllers to match multiple times disk attaching requirements
            for i in list(range(1, 15)):
                q35_pcie_dict1.update({'controller_index': "%d" % i})
                vm_dump_xml.add_device(libvirt.create_controller_xml(q35_pcie_dict1))
            vm_dump_xml.sync()
            s_attach = virsh.attach_disk(vm_name, device_source, device_target2,
                                         s_at_options).exit_status
            if s_attach != 0:
                logging.error("Attaching device failed before testing "
                              "detach-disk test_twice")

    vm.start()
    vm.wait_for_login()

    # Add acpiphp module before testing if VM's os type is rhle5.*
    if not acpiphp_module_modprobe(vm, os_type):
        test.error("Add acpiphp module failed before test.")

    # Turn VM into certain state.
    if pre_vm_state == "paused":
        logging.info("Suspending %s..." % vm_name)
        if vm.is_alive():
            vm.pause()
    elif pre_vm_state == "shut off":
        logging.info("Shuting down %s..." % vm_name)
        if vm.is_alive():
            vm.destroy(gracefully=False)

    # Get disk count before test.
    disk_count_before_cmd = vm_xml.VMXML.get_disk_count(vm_name)

    # Test.
    domid = vm.get_id()
    domuuid = vm.get_uuid()

    # Confirm how to reference a VM.
    if vm_ref == "name":
        vm_ref = vm_name
    elif vm_ref.find("invalid") != -1:
        vm_ref = params.get(vm_ref)
    elif vm_ref == "id":
        vm_ref = domid
    elif vm_ref == "hex_id":
        vm_ref = hex(int(domid))
    elif vm_ref == "uuid":
        vm_ref = domuuid
    else:
        vm_ref = ""

    if test_cmd == "attach-disk":
        status = virsh.attach_disk(vm_ref, device_source, device_target,
                                   at_options, debug=True).exit_status
    elif test_cmd == "detach-disk":
        # For detach disk with print-xml option, it only print information,and not actual disk detachment.
        if detach_disk_with_print_xml and libvirt_version.version_compare(4, 5, 0):
            ret = virsh.detach_disk(vm_ref, device_target, at_options)
            libvirt.check_exit_status(ret)
            cmd = ("echo \"%s\" | grep -A 16 %s"
                   % (ret.stdout.strip(), device_source_name))
            if process.system(cmd, ignore_status=True, shell=True):
                test.error("Check disk with source image name failed")
        status = virsh.detach_disk(vm_ref, device_target, dt_options,
                                   debug=True).exit_status

    if restart_libvirtd:
        libvirtd_serv = utils_libvirtd.Libvirtd()
        libvirtd_serv.restart()

    if test_twice:
        device_target2 = params.get("at_dt_disk_device_target2", device_target)
        device_source = libvirt.create_local_disk(
            "file", path=device_source_path,
            size="1G", disk_format=device_source_format)
        if test_cmd == "attach-disk":
            if address2:
                at_options = at_options_twice
            status = virsh.attach_disk(vm_ref, device_source,
                                       device_target2, at_options,
                                       debug=True).exit_status
        elif test_cmd == "detach-disk":
            status = virsh.detach_disk(vm_ref, device_target2, dt_options,
                                       debug=True).exit_status
    if test_systemlink_twice:
        # Detach lvm previously attached.
        result = virsh.detach_disk(vm_ref, device_target, dt_options,
                                   debug=True)
        libvirt.check_exit_status(result)
        # Remove systemlink for lv01,lv02,and lv03
        for lvm_item in total_lvm_names:
            remove_systemlink_cmd = ('lvchange -a n %s' % lvm_item)
            if process.run(remove_systemlink_cmd, shell=True).exit_status:
                logging.error("Remove systemlink failed")
        # Add new systemlink for lv01,lv02,and lv03 by shifting one position
        for index in range(0, len(total_lvm_names)):
            add_systemlink_cmd = ('lvchange -a y %s' % total_lvm_names[(index+1) % len(total_lvm_names)])
            if process.run(add_systemlink_cmd, shell=True).exit_status:
                logging.error("Add systemlink failed")
        # Attach lvm lv01 again.
        result = virsh.attach_disk(vm_ref, device_source,
                                   device_target, at_options,
                                   debug=True)
        libvirt.check_exit_status(result)
        # Detach lvm 01 again.
        result = virsh.detach_disk(vm_ref, device_target, dt_options,
                                   debug=True)
        libvirt.check_exit_status(result)

    # Resume guest after command. On newer libvirt this is fixed as it has
    # been a bug. The change in xml file is done after the guest is resumed.
    if pre_vm_state == "paused":
        vm.resume()
        time.sleep(5)

    # Check audit log
    check_audit_after_cmd = True
    if test_audit:
        result = utils_misc.wait_for(lambda: check_info_in_audit_log_file(test_cmd, device_source), timeout=20)
        if not result:
            logging.error("Audit check failed")
            check_audit_after_cmd = False

    # Need wait a while for xml to sync
    time.sleep(float(time_sleep))
    # Check disk count after command.
    check_count_after_cmd = True
    disk_count_after_cmd = vm_xml.VMXML.get_disk_count(vm_name)
    if test_cmd == "attach-disk":
        if disk_count_after_cmd == disk_count_before_cmd:
            check_count_after_cmd = False
    elif test_cmd == "detach-disk":
        if disk_count_after_cmd < disk_count_before_cmd:
            check_count_after_cmd = False

    # Recover VM state.
    if pre_vm_state == "shut off":
        vm.start()

    # Check in VM after command.
    check_vm_after_cmd = True
    check_vm_after_cmd = check_vm_partition(vm, device, os_type,
                                            device_target, old_parts)

    # Check disk type after attach.
    check_disk_type = True
    if test_type:
        if test_block_dev:
            check_disk_type = vm_xml.VMXML.check_disk_type(vm_name,
                                                           device_source,
                                                           "block")
        else:
            check_disk_type = vm_xml.VMXML.check_disk_type(vm_name,
                                                           device_source,
                                                           "file")
    # Check disk serial after attach.
    check_disk_serial = True
    if serial:
        disk_serial = vm_xml.VMXML.get_disk_serial(vm_name, device_target)
        if serial != disk_serial:
            check_disk_serial = False

    # Check disk address after attach.
    check_disk_address = True
    if address:
        disk_address = vm_xml.VMXML.get_disk_address(vm_name, device_target)
        if address != disk_address:
            check_disk_address = False

    # Check multifunction address after attach.
    check_disk_address2 = True
    if address2:
        disk_address2 = vm_xml.VMXML.get_disk_address(vm_name, device_target2)
        if address2 != disk_address2:
            check_disk_address2 = False

    # Check disk cache option after attach.
    check_cache_after_cmd = True
    if cache_options:
        disk_cache = vm_xml.VMXML.get_disk_attr(vm_name, device_target,
                                                "driver", "cache")
        if cache_options == "default":
            if disk_cache is not None:
                check_cache_after_cmd = False
        elif disk_cache != cache_options:
            check_cache_after_cmd = False

    # Eject cdrom test
    eject_cdrom = "yes" == params.get("at_dt_disk_eject_cdrom", "no")
    save_vm = "yes" == params.get("at_dt_disk_save_vm", "no")
    save_file = os.path.join(data_dir.get_tmp_dir(), "vm.save")
    try:
        if eject_cdrom:
            eject_params = {'type_name': "file", 'device_type': "cdrom",
                            'target_dev': device_target, 'target_bus': device_disk_bus}
            eject_xml = libvirt.create_disk_xml(eject_params)
            with open(eject_xml) as eject_file:
                logging.debug("Eject CDROM by XML: %s", eject_file.read())
            # Run command tiwce to make sure cdrom tray open first #BZ892289
            # Open tray
            virsh.attach_device(domainarg=vm_name, filearg=eject_xml, debug=True)
            # Add time sleep between two attach commands.
            if time_sleep:
                time.sleep(float(time_sleep))
            # Eject cdrom
            result = virsh.attach_device(domainarg=vm_name, filearg=eject_xml,
                                         debug=True)
            if result.exit_status != 0:
                test.fail("Eject CDROM failed")
            if vm_xml.VMXML.check_disk_exist(vm_name, device_source):
                test.fail("Find %s after do eject" % device_source)
        # Save and restore VM
        if save_vm:
            result = virsh.save(vm_name, save_file, debug=True)
            libvirt.check_exit_status(result)
            result = virsh.restore(save_file, debug=True)
            libvirt.check_exit_status(result)
            if vm_xml.VMXML.check_disk_exist(vm_name, device_source):
                test.fail("Find %s after do restore" % device_source)

        # Destroy VM.
        vm.destroy(gracefully=False)

        # Check disk count after VM shutdown (with --config).
        check_count_after_shutdown = True
        inactive_vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
        disk_count_after_shutdown = len(inactive_vmxml.get_disk_all())
        if test_cmd == "attach-disk":
            if disk_count_after_shutdown == disk_count_before_cmd:
                check_count_after_shutdown = False
        elif test_cmd == "detach-disk":
            if disk_count_after_shutdown < disk_count_before_cmd:
                check_count_after_shutdown = False

    finally:
        # Recover VM.
        if vm.is_alive():
            vm.destroy(gracefully=False)
        logging.debug("Restore the VM XML")
        backup_xml.sync()
        if os.path.exists(save_file):
            os.remove(save_file)
        if test_block_dev:
            if test_logcial_dev:
                libvirt.delete_local_disk("lvm", vgname=vg_name, lvname=lv_name)
                if test_systemlink_twice:
                    for lv_item_name in additional_lv_names:
                        libvirt.delete_local_disk("lvm", vgname=vg_name, lvname=lv_item_name)
                lv_utils.vg_remove(vg_name)
                process.run("pvremove %s" % device_source, shell=True, ignore_status=True)
            libvirt.setup_or_cleanup_iscsi(False)
        else:
            libvirt.delete_local_disk("file", device_source)

    # Check results.
    if status_error:
        if not status:
            test.fail("virsh %s exit with unexpected value."
                      % test_cmd)
    else:
        if test_systemlink_twice:
            return
        if status:
            test.fail("virsh %s failed." % test_cmd)
        if test_cmd == "attach-disk":
            if at_options.count("config"):
                if not check_count_after_shutdown:
                    test.fail("Cannot see config attached device "
                              "in xml file after VM shutdown.")
                if not check_disk_serial:
                    test.fail("Serial set failed after attach")
                if not check_disk_address:
                    test.fail("Address set failed after attach")
                if not check_disk_address2:
                    test.fail("Address(multifunction) set failed"
                              " after attach")
            else:
                if not check_count_after_cmd:
                    test.fail("Cannot see device in xml file"
                              " after attach.")
                if not check_vm_after_cmd:
                    test.fail("Cannot see device in VM after"
                              " attach.")
                if not check_disk_type:
                    test.fail("Check disk type failed after"
                              " attach.")
                if not check_audit_after_cmd:
                    test.fail("Audit hotplug failure after attach")
                if not check_cache_after_cmd:
                    test.fail("Check cache failure after attach")
                if at_options.count("persistent"):
                    if not check_count_after_shutdown:
                        test.fail("Cannot see device attached "
                                  "with persistent after "
                                  "VM shutdown.")
                else:
                    if check_count_after_shutdown:
                        test.fail("See non-config attached device "
                                  "in xml file after VM shutdown.")
        elif test_cmd == "detach-disk":
            if dt_options.count("config"):
                if check_count_after_shutdown:
                    test.fail("See config detached device in "
                              "xml file after VM shutdown.")
            else:
                if check_count_after_cmd:
                    test.fail("See device in xml file "
                              "after detach.")
                if check_vm_after_cmd:
                    test.fail("See device in VM after detach.")
                if not check_audit_after_cmd:
                    test.fail("Audit hotunplug failure "
                              "after detach")

                if dt_options.count("persistent"):
                    if check_count_after_shutdown:
                        test.fail("See device deattached "
                                  "with persistent after "
                                  "VM shutdown.")
                else:
                    if not check_count_after_shutdown:
                        test.fail("See non-config detached "
                                  "device in xml file after "
                                  "VM shutdown.")

        else:
            test.error("Unknown command %s." % test_cmd)
Exemplo n.º 13
0
def run(test, params, env):
    """
    Test virtio/virtio-transitional/virtio-non-transitional model of vsock

    :param test: Test object
    :param params: Dictionary with the test parameters
    :param env: Dictionary with test environment
    """
    vm_name = params.get("main_vm", "avocado-vt-vm1")
    vm = env.get_vm(params["main_vm"])
    vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    backup_xml = vmxml.copy()
    add_pcie_to_pci_bridge = params.get("add_pcie_to_pci_bridge")
    guest_src_url = params.get("guest_src_url")
    virtio_model = params['virtio_model']
    boot_with_vsock = (params.get('boot_with_vsock', 'yes') == 'yes')
    hotplug = (params.get('hotplug', 'no') == 'yes')
    addr_pattern = params['addr_pattern']
    device_pattern = params['device_pattern']

    if not libvirt_version.version_compare(5, 0, 0):
        test.cancel("This libvirt version doesn't support "
                    "virtio-transitional model.")

    def check_vsock_inside_guest():
        """
        check vsock device inside guest
        """
        lspci_cmd = 'lspci'
        lspci_output = session.cmd_output(lspci_cmd)
        device_str = re.findall(r'%s\s%s' % (addr_pattern, device_pattern),
                                lspci_output)
        if not device_str:
            test.fail('lspci failed, no device "%s"' % device_pattern)

    # Download and replace image when guest_src_url provided
    if guest_src_url:
        image_name = params['image_path']
        target_path = utils_misc.get_path(data_dir.get_data_dir(), image_name)
        if not os.path.exists(target_path):
            download.get_file(guest_src_url, target_path)
        params["blk_source_name"] = target_path

    # Add pcie-to-pci-bridge when it is required
    if add_pcie_to_pci_bridge:
        pci_controllers = vmxml.get_controllers('pci')
        for controller in pci_controllers:
            if controller.get('model') == 'pcie-to-pci-bridge':
                break
        else:
            contr_dict = {'controller_type': 'pci',
                          'controller_model': 'pcie-to-pci-bridge'}
            cntl_add = libvirt.create_controller_xml(contr_dict)
            libvirt.add_controller(vm_name, cntl_add)

    # Generate xml for device vsock
    vsock_xml = libvirt.create_vsock_xml(virtio_model)
    if boot_with_vsock:  # Add vsock xml to vm only when needed
        libvirt.add_vm_device(vmxml, vsock_xml)
    try:
        if (params["os_variant"] == 'rhel6' or
                'rhel6' in params.get("shortname")):
            # Update interface to virtio-transitional mode for
            # rhel6 guest to make it works for login
            iface_params = {'model': 'virtio-transitional'}
            libvirt.modify_vm_iface(vm_name, "update_iface", iface_params)
        libvirt.set_vm_disk(vm, params)
        if hotplug:
            file_arg = vsock_xml.xml
            with open(file_arg) as vsock_file:
                logging.debug("Attach vsock by XML: %s", vsock_file.read())
            s_attach = virsh.attach_device(vm_name, file_arg, debug=True)
            libvirt.check_exit_status(s_attach)
        if add_pcie_to_pci_bridge:
            # Check device should be plug to right bus
            virtio_transitional_base.check_plug_to(vm_name, 'vsock')
        session = vm.wait_for_login()
        check_vsock_inside_guest()
        if hotplug:
            with open(file_arg) as vsock_file:
                logging.debug("Detach vsock by XML: %s", vsock_file.read())
            s_detach = virsh.detach_device(vm_name, file_arg, debug=True)
            libvirt.check_exit_status(s_detach)
    finally:
        vm.destroy()
        backup_xml.sync()
Exemplo n.º 14
0
def run(test, params, env):
    """
    Test the PCIe controllers' options
    1. Backup guest xml before the tests
    2. Modify guest xml and define the guest
    3. Start guest
    4. Hotplug if needed
    5. Do checking
    6. Destroy guest and restore guest
    """
    def get_disk_bus(disk_dev=None):
        """
        Get the bus list of guest disks

        :param disk_dev: The specified disk device
        :return: list for disks' buses
        """
        disk_bus_list = []

        cur_vm_xml = VMXML.new_from_dumpxml(vm_name)
        disk_dev_list = cur_vm_xml.get_disk_blk(vm_name)
        if disk_dev and disk_dev not in disk_dev_list:
            return disk_bus_list
        for disk_index in range(0, len(disk_dev_list)):
            disk_target = disk_dev if disk_dev else disk_dev_list[disk_index]
            disk_bus = cur_vm_xml.get_disk_attr(vm_name, disk_target,
                                                'address', 'bus')
            disk_bus_list.append(disk_bus)
            if disk_dev:
                break
        logging.debug("Return disk bus list: {}".format(disk_bus_list))
        return disk_bus_list

    def check_guest_disks(ishotplug):
        """
        Check guest disks in different ways

        :param ishotplug: True for hotplug, False for hotunplug
        :raise: test.fail if some errors happen
        """
        def _find_disk_by_cmd():
            """
            Check disk using virsh command

            :return: True if the disk is found, otherwise False
            """
            ret = virsh.domblklist(vm_name, **virsh_options)
            target_disks = re.findall(r"[v,s]d[a-z]", ret.stdout.strip())
            logging.debug(target_disks)

            for one_disk in target_disks:
                if target_dev in one_disk:
                    logging.debug("Found the disk '{}'".format(target_dev))
                    return True
            logging.debug("Can't find the disk '{}'".format(target_dev))
            return False

        def _find_disk_in_xml():
            """
            Check disk in guest xml

            :return: True if the disk is found with right bus
                     False if the disk is not found
            :raise: test.fail if the disk's bus is incorrect
            """
            bus_list = get_disk_bus(target_dev)
            if len(bus_list) == 0:
                return False
            if bus_list[0] != '0x%02x' % int(contr_index):
                test.fail("The found disk's bus is expected to be {}, "
                          "but {} found".format('0x%02x' % int(contr_index),
                                                bus_list[0]))
            return True

        virsh_options.update({'ignore_status': False})
        # Firstly check virsh.domblklist
        found_by_cmd = _find_disk_by_cmd()
        found_in_xml = _find_disk_in_xml()
        msg1 = "Can't find the device with target_dev '{}' by cmd".format(
            target_dev)
        msg2 = "Found the device with target_dev '{}' unexpectedly by cmd".format(
            target_dev)
        msg3 = "The device with target_dev '{}' was not detached successfully in xml".format(
            target_dev)
        msg4 = "The device with target_dev '{}' was detached unexpectedly in xml".format(
            target_dev)
        if ((ishotplug and not status_error and not found_by_cmd)
                or (not ishotplug and status_error and not found_by_cmd)):
            test.fail(msg1)
        if ((ishotplug and status_error and found_by_cmd)
                or (not ishotplug and not status_error and found_by_cmd)):
            test.fail(msg2)
        if ((ishotplug and not status_error and not found_in_xml)
                or (not ishotplug and not status_error and found_in_xml)):
            test.fail(msg3)
        if ((ishotplug and status_error and found_in_xml)
                or (not ishotplug and status_error and not found_in_xml)):
            test.fail(msg4)

    def check_inside_guest(ishotplug):
        """
        Check devices within the guest

        :param ishotplug: True for hotplug, False for hotunplug
        :raise: test.fail if the result is not expected
        """
        def _check_disk_in_guest():
            """
            Compare the disk numbers within the guest

            :return: True if new disk is found, otherwise False
            """
            new_disk_num = len(vm.get_disks())
            if new_disk_num > ori_disk_num:
                logging.debug("New disk is found in vm")
                return True
            logging.debug("New disk is not found in vm")
            return False

        vm_session = vm.wait_for_login()
        status = _check_disk_in_guest()
        vm_session.close()
        msg1 = "Can't find the device in the guest"
        msg2 = "Found the device in the guest unexpectedly"
        if ((ishotplug and not status_error and not status)
                or (not ishotplug and status_error and not status)):
            test.fail(msg1)
        if ((ishotplug and status_error and status)
                or (not ishotplug and not status_error and status)):
            test.fail(msg2)

    def check_guest_contr():
        """
        Check the controller in guest xml

        :raise: test.fail if the controller does not meet the expectation
        """
        cntl = None
        cur_vm_xml = VMXML.new_from_dumpxml(vm_name)
        for cntl in cur_vm_xml.devices.by_device_tag('controller'):
            if (cntl.type == 'pci' and cntl.model == contr_model
                    and cntl.index == contr_index):
                logging.debug(cntl.target)
                cntl_hotplug = cntl.target.get('hotplug')
                logging.debug("Got controller's hotplug:%s", cntl_hotplug)
                if cntl_hotplug != hotplug_option:
                    test.fail("The controller's hotplug option is {}, "
                              "but expect {}".format(cntl_hotplug,
                                                     hotplug_option))
                break
        if not cntl:
            test.fail("The controller with index {} is not found".format(
                contr_index))

    def check_multi_attach(bus_list):
        """
        Check the result of multiple attach devices to the VM

        :param bus_list: List which includes the buses of vm disks
        :raise: test.fail if the result is unexpected
        """
        msg_pattern = "The disk is {} expected to be attached to " \
                      "the controller with index '{}'"
        is_found = False
        if hotplug_option == 'on':
            for one_bus in bus_list:
                is_found = is_found | (one_bus == '0x%02x' % int(contr_index))
            if not is_found:
                test.fail(msg_pattern.format('', contr_index))
            else:
                logging.debug("Found a disk attached to the controller "
                              "with index '{}".format(contr_index))
        else:
            for one_bus in bus_list:
                is_found = one_bus == '0x%02x' % int(contr_index)
                if is_found:
                    test.fail(msg_pattern.format('not', contr_index))
            logging.debug("No disk is found to attach to the "
                          "controller with index '{}'".format(contr_index))

    vm_name = params.get("main_vm", "avocado-vt-vm1")
    setup_controller = params.get("setup_controller", 'yes') == 'yes'
    check_within_guest = params.get("check_within_guest", 'yes') == 'yes'
    check_disk_xml = params.get("check_disk_xml", 'no') == 'yes'
    check_cntl_xml = params.get("check_cntl_xml", 'no') == 'yes'
    contr_model = params.get("controller_model", 'pcie-root-port')
    contr_target = params.get("controller_target")
    hotplug_option = params.get("hotplug_option")
    hotplug = params.get("hotplug", 'yes') == 'yes'
    define_option = params.get("define_option")
    attach_extra = params.get("attach_extra")
    target_dev = params.get("target_dev")
    err_msg = params.get("err_msg")
    status_error = params.get("status_error", "no") == 'yes'
    restart_daemon = params.get("restart_daemon", "no") == 'yes'
    save_restore = params.get("save_restore", "no") == 'yes'
    hotplug_counts = params.get("hotplug_counts")
    addr_twice = params.get("addr_twice", 'no') == 'yes'
    contr_index = None

    virsh_options = {'debug': True, 'ignore_status': False}

    image_path_list = []
    vm = env.get_vm(vm_name)
    vm_xml_obj = VMXML.new_from_inactive_dumpxml(vm_name)
    vm_xml_backup = vm_xml_obj.copy()
    try:
        if check_within_guest:
            if not vm.is_alive():
                virsh.start(vm_name, **virsh_options)
            ori_disk_num = len(vm.get_disks())
            logging.debug("The original disk number in vm is %d", ori_disk_num)
            virsh.destroy(vm_name)

        if setup_controller:
            contr_dict = {
                'controller_type': 'pci',
                'controller_model': contr_model,
                'controller_target': contr_target
            }
            contr_obj = libvirt.create_controller_xml(contr_dict)
            vm_xml_obj.add_device(contr_obj)
            logging.debug("Add a controller: %s" % contr_obj)

        virsh.define(vm_xml_obj.xml, options=define_option, **virsh_options)
        vm_xml = VMXML.new_from_dumpxml(vm_name)
        ret_indexes = libvirt_pcicontr.get_max_contr_indexes(
            vm_xml, 'pci', contr_model)
        if not ret_indexes or len(ret_indexes) < 1:
            test.error("Can't find the controller index for model "
                       "'{}'".format(contr_model))
        contr_index = ret_indexes[0]
        if attach_extra and attach_extra.count('--address '):
            attach_extra = attach_extra % ("%02x" % int(contr_index))
        if err_msg and err_msg.count('%s'):
            err_msg = err_msg % contr_index
        if not save_restore:
            disk_max = int(hotplug_counts) if hotplug_counts else 1
            for disk_inx in range(0, disk_max):
                image_path = os.path.join(data_dir.get_tmp_dir(),
                                          'disk{}.qcow2'.format(disk_inx))
                image_path_list.append(image_path)
                libvirt.create_local_disk("file",
                                          image_path,
                                          '10M',
                                          disk_format='qcow2')
        if not hotplug and not save_restore:
            # Do coldplug before hotunplug to prepare the virtual device
            virsh.attach_disk(vm_name,
                              image_path,
                              target_dev,
                              extra=attach_extra,
                              **virsh_options)
        virsh.start(vm_name, **virsh_options)

        logging.debug("Test VM XML after starting:"
                      "\n%s", VMXML.new_from_dumpxml(vm_name))
        vm.wait_for_login().close()

        if restart_daemon:
            daemon_obj = Libvirtd()
            daemon_obj.restart()

        if save_restore:
            save_path = os.path.join(data_dir.get_tmp_dir(), 'rhel.save')
            virsh.save(vm_name, save_path, **virsh_options)
            time.sleep(10)
            virsh.restore(save_path, **virsh_options)
        # Create virtual device xml
        if hotplug:
            virsh_options.update({'ignore_status': True})
            attach_times = 1 if not hotplug_counts else int(hotplug_counts)

            if attach_times == 1:
                ret = virsh.attach_disk(vm_name,
                                        image_path_list[0],
                                        target_dev,
                                        extra=attach_extra,
                                        **virsh_options)
                libvirt.check_result(ret, expected_fails=err_msg)
            else:
                for attach_inx in range(0, attach_times):
                    disk_dev = 'vd{}'.format(chr(98 + attach_inx))
                    ret = virsh.attach_disk(vm_name,
                                            image_path_list[attach_inx],
                                            disk_dev,
                                            extra=attach_extra,
                                            **virsh_options)
                    if ret.exit_status and not addr_twice:
                        break
                libvirt.check_result(ret, expected_fails=err_msg)
        if not hotplug and check_within_guest:
            virsh_options.update({'ignore_status': True})
            ret = virsh.detach_disk(vm_name, target_dev, **virsh_options)
            libvirt.check_result(ret, expected_fails=err_msg)
        logging.debug(VMXML.new_from_dumpxml(vm_name))
        if check_disk_xml:
            time.sleep(5)
            check_guest_disks(hotplug)
        if check_cntl_xml:
            check_guest_contr()
        if hotplug_counts and not addr_twice:
            check_multi_attach(get_disk_bus())
        if check_within_guest:
            check_inside_guest(hotplug)

    finally:
        vm_xml_backup.sync()
Exemplo n.º 15
0
def run(test, params, env):
    """
    Test virtio/virtio-transitional/virtio-non-transitional model of disk

    :param test: Test object
    :param params: Dictionary with the test parameters
    :param env: Dictionary with test environment
    """
    def reboot():
        """
        Shutdown and restart guest, then wait for login
        """
        vm.destroy()
        vm.start()
        vm.wait_for_login()

    def attach(xml, device_name, plug_method="hot"):
        """
        Attach device with xml, for both hot and cold plug

        :param xml: Device xml to be attached
        :param device_name: Device name to be attached
        :param plug_method: hot or cold for plug method
        """
        device_before_plug = find_device(vm, params)
        with open(xml) as disk_file:
            logging.debug("Attach disk by XML: %s", disk_file.read())
        file_arg = xml
        if plug_method == "cold":
            file_arg += ' --config'
        s_attach = virsh.attach_device(domainarg=vm_name,
                                       filearg=file_arg,
                                       debug=True)
        libvirt.check_exit_status(s_attach)
        if plug_method == "cold":
            reboot()
        detect_time = params.get("detect_disk_time", 20)
        plug_disks = utils_misc.wait_for(
            lambda: get_new_device(device_before_plug, find_device(vm, params)
                                   ), detect_time)
        if not plug_disks:
            test.fail("Failed to hotplug device %s to guest" % device_name)

    def detach(xml, device_name, unplug_method="hot"):
        """
        Detach device with xml, for both hot and cold unplug

        :param xml: Device xml to be attached
        :param device_name: Device name to be attached
        :param plug_method: hot or cold for unplug method
        """
        with open(xml) as disk_file:
            logging.debug("Detach device by XML: %s", disk_file.read())
        file_arg = xml
        if unplug_method == "cold":
            file_arg = xml + ' --config'
        s_detach = virsh.detach_device(domainarg=vm_name,
                                       filearg=file_arg,
                                       debug=True)
        if unplug_method == "cold":
            reboot()
        libvirt.check_exit_status(s_detach)

    def attach_disk():  # pylint: disable=W0611
        """
        Sub test for attach disk, including hot and cold plug/unplug
        """
        plug_method = params.get("plug_method", "hot")
        device_source_format = params.get("at_disk_source_format", "raw")
        device_target = params.get("at_disk_target", "vdb")
        device_disk_bus = params.get("at_disk_bus", "virtio")
        device_source_name = params.get("at_disk_source", "attach.img")
        detect_time = params.get("detect_disk_time", 10)
        device_source_path = os.path.join(tmp_dir, device_source_name)
        device_source = libvirt.create_local_disk(
            "file",
            path=device_source_path,
            size="1",
            disk_format=device_source_format)

        def _generate_disk_xml():
            """Generate xml for device hotplug/unplug usage"""
            diskxml = devices.disk.Disk("file")
            diskxml.device = "disk"
            source_params = {"attrs": {'file': device_source}}
            diskxml.source = diskxml.new_disk_source(**source_params)
            diskxml.target = {'dev': device_target, 'bus': device_disk_bus}
            if params.get("disk_model"):
                diskxml.model = params.get("disk_model")
            if pci_bridge_index and device_disk_bus == 'virtio':
                addr = diskxml.new_disk_address('pci')
                addr.set_attrs({'bus': pci_bridge_index, 'slot': slot})
                diskxml.address = addr
            return diskxml.xml

        v_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
        slot = get_free_slot(pci_bridge_index, v_xml)
        disk_xml = _generate_disk_xml()
        attach(disk_xml, device_target, plug_method)
        if plug_method == "cold":
            disk_xml = _generate_disk_xml()
        detach(disk_xml, device_target, plug_method)
        if not utils_misc.wait_for(
                lambda: not libvirt.device_exists(vm, device_target),
                detect_time):
            test.fail("Detach disk failed.")

    def attach_controller():  # pylint: disable=W0611
        """
        Sub test for attach controller
        """
        v_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
        contr_index = len(v_xml.get_controllers('scsi'))
        contr_type = params.get("controller_type", 'scsi')
        contr_model = params.get("controller_model", "virtio-scsi")
        contr_dict = {
            'controller_type': contr_type,
            'controller_model': contr_model,
            'controller_index': contr_index
        }
        if pci_bridge_index:
            slot = get_free_slot(pci_bridge_index, v_xml)
            addr = '{"bus": %s, "slot": %s}' % (pci_bridge_index, slot)
            contr_dict.update({'controller_addr': addr})
        xml = libvirt.create_controller_xml(contr_dict=contr_dict)
        attach(xml, params['controller_model'])
        xml = libvirt.create_controller_xml(contr_dict=contr_dict)
        detach(xml, params['controller_model'])

    def snapshot():  # pylint: disable=W0611
        """
        Sub test for snapshot
        """
        for i in range(1, 4):
            ret = virsh.snapshot_create_as(vm_name, "sn%s --disk-only" % i)
            libvirt.check_exit_status(ret)
        process.system("systemctl restart libvirtd")
        save_path = os.path.join(tmp_dir, "test.save")
        ret = virsh.save(vm_name, save_path)
        libvirt.check_exit_status(ret)
        ret = virsh.restore(save_path)
        libvirt.check_exit_status(ret)
        session = vm.wait_for_login()
        session.close()

    vm_name = params.get("main_vm", "avocado-vt-vm1")
    vm = env.get_vm(params["main_vm"])
    vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    backup_xml = vmxml.copy()
    add_pcie_to_pci_bridge = params.get("add_pcie_to_pci_bridge")
    pci_bridge_index = None
    tmp_dir = data_dir.get_tmp_dir()
    guest_src_url = params.get("guest_src_url")

    if not libvirt_version.version_compare(5, 0, 0):
        test.cancel("This libvirt version doesn't support "
                    "virtio-transitional model.")

    if guest_src_url:

        def _download():
            download_cmd = ("wget %s -O %s" % (guest_src_url, target_path))
            if process.system(download_cmd, shell=True):
                test.error("Failed to download file")

        image_name = params['image_path']
        target_path = utils_misc.get_path(data_dir.get_data_dir(), image_name)
        if not os.path.exists(target_path):
            if utils_package.package_install("wget"):
                utils_misc.wait_for(_download, timeout=360)
            else:
                test.error("Fail to install wget")
        params["blk_source_name"] = target_path

    if add_pcie_to_pci_bridge:
        pci_controllers = vmxml.get_controllers('pci')
        for controller in pci_controllers:
            if controller.get('model') == 'pcie-to-pci-bridge':
                pci_bridge = controller
                break
        else:
            contr_dict = {
                'controller_type': 'pci',
                'controller_model': 'pcie-to-pci-bridge'
            }
            pci_bridge = libvirt.create_controller_xml(contr_dict,
                                                       "add_controller",
                                                       vm_name)
        pci_bridge_index = '%0#4x' % int(pci_bridge.get("index"))
    try:
        if (params["os_variant"] == 'rhel6'
                or 'rhel6' in params.get("shortname")):
            iface_params = {'model': 'virtio-transitional'}
            libvirt.modify_vm_iface(vm_name, "update_iface", iface_params)
        libvirt.set_vm_disk(vm, params)
        if pci_bridge_index:
            v_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
            if params.get("disk_target_bus") == "scsi":
                scsi_controllers = v_xml.get_controllers('scsi')
                for index, controller in enumerate(scsi_controllers):
                    controller.find('address').set('bus', pci_bridge_index)
                    controller.find('address').set(
                        'slot', get_free_slot(pci_bridge_index, v_xml))
            else:
                disks = v_xml.get_devices(device_type="disk")
                for index, disk in enumerate(disks):
                    args = {
                        'bus': pci_bridge_index,
                        'slot': get_free_slot(pci_bridge_index, v_xml)
                    }
                    libvirt.set_disk_attr(v_xml, disk.target['dev'], 'address',
                                          args)
            v_xml.xmltreefile.write()
            v_xml.sync()
        if not vm.is_alive():
            vm.start()
        vm.wait_for_login()
        test_step = params.get("sub_test_step")
        if test_step:
            eval(test_step)()
    finally:
        vm.destroy()
        libvirt.clean_up_snapshots(vm_name)
        backup_xml.sync()
Exemplo n.º 16
0
def run(test, params, env):
    """
    Test virtio/virtio-transitional/virtio-non-transitional model of interface

    :param test: Test object
    :param params: Dictionary with the test parameters
    :param env: Dictionary with test environment
    """
    def reboot():
        """
        Shutdown and restart guest, then wait for login
        """
        vm.destroy()
        vm.start()
        return vm.wait_for_login()

    def check_plug_to_pci_bridge(vm_name, mac):
        """
        Check if the nic is plugged onto pcie-to-pci-bridge

        :param vm_name: Vm name
        :param mac:  The mac address of plugged interface
        :return True if plugged onto pcie-to-pci-bridge, otherwise False
        """
        v_xml = vm_xml.VMXML.new_from_dumpxml(vm_name)
        interface = v_xml.get_iface_all()[mac]
        bus = int(eval(interface.find('address').get('bus')))
        controllers = vmxml.get_controllers('pci')
        for controller in controllers:
            if controller.get('index') == bus:
                if controller.get('model') == 'pcie-to-pci-bridge':
                    return True
                break
        return False

    def detect_new_nic(mac):
        """
        Detect the new interface by domiflist

        :param mac: The mac address of plugged interface
        :return plugged interface name
        """
        def check_mac_exist():
            all_infos = libvirt.get_interface_details(vm_name)
            for nic_info in all_infos:
                if nic_info.get('mac') == mac:
                    return nic_info.get('interface')
            return False

        plugged_nic = utils_misc.wait_for(lambda: check_mac_exist(), 5)
        if not plugged_nic:
            test.fail("Failed to plug device %s" % mac)
        return plugged_nic

    def renew_ip_address(session, mac, guest_os_type):
        """
        Renew ip for plugged nic

        :param session: Vm session
        :param mac: The mac address of plugged interface
        :param guest_os_type: Guest os type, Linux or Windows
        """
        if guest_os_type == 'Windows':
            utils_net.restart_windows_guest_network_by_key(
                session, "macaddress", mac)
        ifname = utils_net.get_linux_ifname(session, mac)
        utils_net.create_network_script(ifname, mac, 'dhcp', '255.255.255.0')
        utils_net.restart_guest_network(session, mac)
        arp_clean = "arp -n|awk '/^[1-9]/{print \"arp -d \" $1}'|sh"
        session.cmd_output_safe(arp_clean)

    def get_hotplug_nic_ip(vm, nic, session, guest_os_type):
        """
        Get if of the plugged interface

        :param vm: Vm object
        :param nic: Nic object
        :param session: Vm session
        :param guest_os_type: Guest os type, Linux or Windows
        :return: Nic ip
        """
        def __get_address():
            """
            Get ip address and return it, configure a new ip if device
            exists but no ip

            :return: Ip address if get, otherwise None
            """
            try:
                index = [
                    _idx for _idx, _nic in enumerate(vm.virtnet) if _nic == nic
                ][0]
                return vm.wait_for_get_address(index, timeout=90)
            except IndexError:
                test.error("Nic '%s' not exists in VM '%s'" %
                           (nic["nic_name"], vm.name))
            except (virt_vm.VMIPAddressMissingError,
                    virt_vm.VMAddressVerificationError):
                renew_ip_address(session, nic["mac"], guest_os_type)
            return

        # Wait for ip address is configured for the nic device
        nic_ip = utils_misc.wait_for(__get_address, timeout=360)
        if nic_ip:
            return nic_ip
        cached_ip = vm.address_cache.get(nic["mac"])
        arps = process.system_output("arp -aen").decode()
        logging.debug("Can't get IP address:")
        logging.debug("\tCached IP: %s", cached_ip)
        logging.debug("\tARP table: %s", arps)
        return None

    def check_nic_removed(mac, session):
        """
        Get interface IP address by given MAC addrss. If try_dhclint is
        True, then try to allocate IP addrss for the interface.

        :param mac: The mac address of plugged interface
        :param session: Vm session
        """
        except_mesg = ''
        try:
            if guest_os_type == 'Windows':
                except_mesg = "Get nic netconnectionid failed"
                utils_net.restart_windows_guest_network_by_key(
                    session, "macaddress", mac)
            else:
                except_mesg = ("Failed to determine interface"
                               " name with mac %s" % mac)
                utils_net.get_linux_ifname(session, mac)
        except exceptions.TestError as e:
            if except_mesg in str(e):
                return True
        else:
            return False

    def attach_nic():  # pylint: disable=W0611
        """
        Attach interface, by xml or cmd, for both hot and cold plug
        """
        def create_iface_xml(mac):
            """
            Create interface xml file

            :param mac: The mac address of nic device
            """
            iface = Interface(type_name='network')
            iface.source = iface_source
            iface.model = iface_model
            iface.mac_address = mac
            logging.debug("Create new interface xml: %s", iface)
            return iface

        plug_method = params.get('plug_method', 'interface')
        cold_plug = params.get('cold_plug', 'no')
        mac = utils_net.generate_mac_address_simple()
        iface_source = {'network': 'default'}
        iface_model = params["virtio_model"]
        options = ("network %s --model %s --mac %s" %
                   (iface_source['network'], iface_model, mac))
        nic_params = {
            'mac': mac,
            'nettype': params['nettype'],
            'ip_version': 'ipv4'
        }
        if cold_plug == "yes":
            options += ' --config'
        if plug_method == 'interface':  # Hotplug nic vir attach-interface
            ret = virsh.attach_interface(vm_name, options, ignore_status=True)
        else:  # Hotplug nic via attach-device
            nic_xml = create_iface_xml(mac)
            nic_xml.xmltreefile.write()
            xml_file = nic_xml.xml
            with open(xml_file) as nic_file:
                logging.debug("Attach device by XML: %s", nic_file.read())
            ret = virsh.attach_device(domainarg=vm_name,
                                      filearg=xml_file,
                                      debug=True)
        libvirt.check_exit_status(ret)
        if cold_plug == "yes":
            reboot()  # Reboot guest if it is cold plug test
        detect_new_nic(mac)
        if plug_method == 'interface' and cold_plug == 'no':
            check_plug_to_pci_bridge(vm_name, mac)
        session = vm.wait_for_login(serial=True)
        # Add nic to VM object for further check
        nic_name = vm.add_nic(**nic_params)["nic_name"]
        nic = vm.virtnet[nic_name]
        # Config ip inside guest for new added nic
        if not utils_misc.wait_for(
                lambda: get_hotplug_nic_ip(vm, nic, session, guest_os_type),
                timeout=30):
            test.fail("Does not find plugged nic %s in guest" % mac)
        options = ("network %s" % mac)
        if cold_plug == "yes":
            options += ' --config'
        # Detach nic device
        if plug_method == 'interface':
            ret = virsh.detach_interface(vm_name, options, ignore_status=True)
        else:
            with open(xml_file) as nic_file:
                logging.debug("Detach device by XML: %s", nic_file.read())
            ret = virsh.detach_device(domainarg=vm_name,
                                      filearg=xml_file,
                                      debug=True)
        libvirt.check_exit_status(ret)
        if cold_plug == "yes":
            session = reboot()  # Reboot guest if it is cold plug test
        # Check if nic is removed from guest
        if not utils_misc.wait_for(lambda: check_nic_removed(mac, session),
                                   timeout=30):
            test.fail("The nic %s still exist in guest after being unplugged" %
                      nic_name)

    def save_restore():  # pylint: disable=W0611
        """
        Sub test for save and restore
        """
        save_path = os.path.join(data_dir.get_tmp_dir(),
                                 '%s.save' % params['os_variant'])
        ret = virsh.save(vm_name, save_path)
        libvirt.check_exit_status(ret)
        ret = virsh.restore(save_path)
        libvirt.check_exit_status(ret)

    def ping_test(restart_network=False):
        """
        Basic ping test for interface

        :param restart_network: True or False. Whether to restart network
        :raise: test.fail if ping test fails
        """
        session = vm.wait_for_login()
        if restart_network:
            utils_net.restart_guest_network(session)

        dest = params.get('ping_dest', 'www.baidu.com')
        status, output = utils_test.ping(dest, 10, session=session, timeout=20)
        session.close()
        if status != 0:
            test.fail("Ping failed, status: %s,"
                      " output: %s" % (status, output))

    vm_name = params.get("main_vm", "avocado-vt-vm1")
    vm = env.get_vm(params["main_vm"])
    vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    backup_xml = vmxml.copy()
    guest_src_url = params.get("guest_src_url")
    params['disk_model'] = params['virtio_model']
    guest_os_type = params['os_type']

    target_path = None

    if not libvirt_version.version_compare(5, 0, 0):
        test.cancel("This libvirt version doesn't support "
                    "virtio-transitional model.")

    # Download and replace image when guest_src_url provided
    if guest_src_url:
        image_name = params['image_path']
        target_path = utils_misc.get_path(data_dir.get_data_dir(), image_name)
        if not os.path.exists(target_path):
            download.get_file(guest_src_url, target_path)
        params["blk_source_name"] = target_path
    libvirt.set_vm_disk(vm, params)

    # Add pcie-to-pci-bridge when there is no one
    pci_controllers = vmxml.get_controllers('pci')
    for controller in pci_controllers:
        if controller.get('model') == 'pcie-to-pci-bridge':
            break
    else:
        contr_dict = {
            'controller_type': 'pci',
            'controller_model': 'pcie-to-pci-bridge'
        }
        cntl_add = libvirt.create_controller_xml(contr_dict)
        libvirt.add_controller(vm_name, cntl_add)
    try:  # Update interface model as defined
        iface_params = {'model': params['virtio_model']}
        libvirt.modify_vm_iface(vm_name, "update_iface", iface_params)
        if not vm.is_alive():
            vm.start()
        # Test if nic works well via ping
        ping_test()
        test_step = params.get("sub_test_step")
        if test_step:
            eval(test_step)()
            # Test if nic still work well afeter sub steps test
            ping_test(True)
    finally:
        vm.destroy()
        backup_xml.sync()

        if guest_src_url and target_path:
            libvirt.delete_local_disk("file", path=target_path)
Exemplo n.º 17
0
def run(test, params, env):
    """
    Test detach-device-alias command with
    --config, --live, --current

    1. Test hostdev device detach
    2. Test scsi controller device detach
    3. Test redirect device detach
    4. Test channel devices detach
    """
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    detach_options = params.get("detach_alias_options", "")
    detach_check_xml = params.get("detach_check_xml")
    # hostdev device params
    hostdev_type = params.get("detach_hostdev_type", "")
    hostdev_managed = params.get("detach_hostdev_managed")
    # controller params
    contr_type = params.get("detach_controller_type")
    contr_model = params.get("detach_controller_mode")
    # redirdev params
    redir_type = params.get("detach_redirdev_type")
    redir_bus = params.get("detach_redirdev_bus")
    # channel params
    channel_type = params.get("detach_channel_type")
    channel_target = eval(params.get("detach_channel_target", "{}"))

    device_alias = "ua-" + str(uuid.uuid4())

    def get_usb_info():
        """
        Get local host usb info

        :return: usb verndor and product id
        """
        install_cmd = process.run("yum install usbutils* -y", shell=True)
        result = process.run("lsusb|awk '{print $6\":\"$2\":\"$4}'",
                             shell=True)
        if not result.exit_status:
            return result.stdout_text.rstrip(':')
        else:
            test.error("Can not get usb hub info for testing")

    # backup xml
    vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    backup_xml = vmxml.copy()

    if not vm.is_alive():
        vm.start()
    # wait for vm start successfully
    vm.wait_for_login()

    if hostdev_type:
        if hostdev_type in ["usb", "scsi"]:
            if hostdev_type == "usb":
                pci_id = get_usb_info()
            elif hostdev_type == "scsi":
                source_disk = libvirt.create_scsi_disk(scsi_option="",
                                                       scsi_size="8")
                pci_id = get_scsi_info(source_disk)
            device_xml = libvirt.create_hostdev_xml(pci_id=pci_id,
                                                    dev_type=hostdev_type,
                                                    managed=hostdev_managed,
                                                    alias=device_alias)
        else:
            test.error("Hostdev type %s not handled by test."
                       " Please check code." % hostdev_type)
    if contr_type:
        controllers = vmxml.get_controllers(contr_type)
        contr_index = len(controllers) + 1
        contr_dict = {
            "controller_type": contr_type,
            "controller_model": contr_model,
            "controller_index": contr_index,
            "contr_alias": device_alias
        }
        device_xml = libvirt.create_controller_xml(contr_dict)
        detach_check_xml = detach_check_xml % contr_index

    if redir_type:
        device_xml = libvirt.create_redirdev_xml(redir_type, redir_bus,
                                                 device_alias)

    if channel_type:
        channel_params = {'channel_type_name': channel_type}
        channel_params.update(channel_target)
        device_xml = libvirt.create_channel_xml(channel_params,
                                                device_alias).xml

    try:
        dump_option = ""
        if "--config" in detach_options:
            dump_option = "--inactive"

        # Attach xml to domain
        logging.info("Attach xml is %s" %
                     process.run("cat %s" % device_xml).stdout_text)
        virsh.attach_device(vm_name,
                            device_xml,
                            flagstr=detach_options,
                            debug=True,
                            ignore_status=False)
        domxml_at = virsh.dumpxml(vm_name, dump_option,
                                  debug=True).stdout.strip()
        if detach_check_xml not in domxml_at:
            test.error("Can not find %s in domxml after attach" %
                       detach_check_xml)

        # Detach xml with alias
        result = virsh.detach_device_alias(vm_name,
                                           device_alias,
                                           detach_options,
                                           debug=True)
        time.sleep(10)
        libvirt.check_exit_status(result)
        domxml_dt = virsh.dumpxml(vm_name, dump_option,
                                  debug=True).stdout.strip()
        if detach_check_xml in domxml_dt:
            test.fail("Still can find %s in domxml" % detach_check_xml)
    finally:
        backup_xml.sync()
        if hostdev_type == "scsi":
            libvirt.delete_scsi_disk()
def run(test, params, env):
    """
    Test virtio/virtio-transitional model of serial device

    :param test: Test object
    :param params: Dictionary with the test parameters
    :param env: Dictionary with test environment
    """
    def get_free_pci_slot():
        """
        Get a free slot from pcie-to-pci-bridge

        :return: The free slot
        """
        used_slot = []
        for dev in pci_devices:
            address = dev.find('address')
            if (address is not None
                    and address.get('bus') == pci_bridge_index):
                used_slot.append(address.get('slot'))
        for slot_index in range(1, 30):
            slot = "%0#4x" % slot_index
            if slot not in used_slot:
                return slot
        return None

    def test_data_transfer(dev_type):
        """
        Test data transfer between guest and host via console/serial device

        :param dev_type: The device type to be tested, console or channel
        """
        vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
        console_xml = vmxml.xmltreefile.find('devices').find(dev_type)
        host_path = console_xml.find('source').get('path')
        guest_path = '/dev/hvc0' if dev_type == 'console' else '/dev/vport0p1'
        test_message = 'virtiochannel'
        cat_cmd = "cat %s" % host_path
        logfile = "test_data_transfer-%s.log" % dev_type
        host_process = aexpect.ShellSession(cat_cmd,
                                            auto_close=False,
                                            output_func=utils_misc.log_line,
                                            output_params=(logfile, ))
        guest_session = vm.wait_for_login()
        guest_session.cmd_output('echo %s > %s' % (test_message, guest_path))
        guest_session.close()
        try:
            host_process.read_until_last_line_matches(test_message, timeout=10)
        except aexpect.exceptions.ExpectError as e:
            test.fail('Did not catch the expected output from host side,'
                      ' the detail of the failure: %s' % str(e))
        finally:
            host_process.close()

    vm_name = params.get("main_vm", "avocado-vt-vm1")
    vm = env.get_vm(params["main_vm"])
    vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    backup_xml = vmxml.copy()
    add_pcie_to_pci_bridge = params.get("add_pcie_to_pci_bridge")
    guest_src_url = params.get("guest_src_url")
    virtio_model = params['virtio_model']

    # Download and replace image when guest_src_url provided
    if guest_src_url:
        image_name = params['image_path']
        target_path = utils_misc.get_path(data_dir.get_data_dir(), image_name)
        if not os.path.exists(target_path):
            download.get_file(guest_src_url, target_path)
        params["blk_source_name"] = target_path

    # Add pcie-to-pci-bridge when it is required
    if add_pcie_to_pci_bridge:
        pci_controllers = vmxml.get_controllers('pci')
        for controller in pci_controllers:
            if controller.get('model') == 'pcie-to-pci-bridge':
                pci_bridge = controller
                break
        else:
            contr_dict = {
                'controller_type': 'pci',
                'controller_model': 'pcie-to-pci-bridge'
            }
            pci_bridge = libvirt.create_controller_xml(contr_dict,
                                                       "add_controller",
                                                       vm_name)
        pci_bridge_index = '%0#4x' % int(pci_bridge.get("index"))

    try:
        # Update interface to virtio-transitional mode for
        # rhel6 guest to make it works for login
        iface_params = {'model': 'virtio-transitional'}
        libvirt.modify_vm_iface(vm_name, "update_iface", iface_params)
        libvirt.set_vm_disk(vm, params)
        # vmxml will not be updated since set_vm_disk
        # sync with another dumped xml inside the function
        vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)

        # Remove all current serial devices
        vmxml.remove_all_device_by_type('serial')
        vmxml.remove_all_device_by_type('channel')
        vmxml.remove_all_device_by_type('console')
        vmxml.del_controller('virtio-serial')
        vmxml.sync()

        # Add virtio-serial with right model
        contr_dict = {
            'controller_type': 'virtio-serial',
            'controller_model': virtio_model
        }
        if add_pcie_to_pci_bridge:
            pci_devices = vmxml.xmltreefile.find('devices').getchildren()
            slot = get_free_pci_slot()
            addr = '{"bus": %s, "slot": %s}' % (pci_bridge_index, slot)
            contr_dict.update({'controller_addr': addr})
        libvirt.create_controller_xml(contr_dict, "add_controller", vm_name)
        # vmxml will not be updated since set_vm_disk
        # sync with another dumped xml inside the function
        vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)

        # Add channel and console device attached to virtio-serial bus
        target_dict = {'type': 'virtio', 'name': 'ptychannel'}
        address_dict = {'type': 'virtio-serial', 'controller': '0', 'bus': '0'}
        channel_xml = Channel('pty')
        channel_xml.target = target_dict
        channel_xml.address = address_dict
        console_xml = Console()
        console_xml.target_port = '0'
        console_xml.target_type = 'virtio'
        vmxml.add_device(channel_xml)
        vmxml.add_device(console_xml)
        vmxml.sync()
        if vm.is_alive():
            vm.destroy()
        vm.start(autoconsole=False)

        # Test data transfer via console and channel devices
        test_data_transfer('console')
        test_data_transfer('channel')
    finally:
        vm.destroy()
        backup_xml.sync()
def run(test, params, env):
    """
    Test detach-device-alias command with
    --config, --live, --current

    1. Test hostdev device detach
    2. Test scsi controller device detach
    3. Test redirect device detach
    4. Test channel devices detach
    """
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    detach_options = params.get("detach_alias_options", "")
    detach_check_xml = params.get("detach_check_xml")
    # hostdev device params
    hostdev_type = params.get("detach_hostdev_type", "")
    hostdev_managed = params.get("detach_hostdev_managed")
    # controller params
    contr_type = params.get("detach_controller_type")
    contr_model = params.get("detach_controller_mode")
    # redirdev params
    redir_type = params.get("detach_redirdev_type")
    redir_bus = params.get("detach_redirdev_bus")
    # channel params
    channel_type = params.get("detach_channel_type")
    channel_target = eval(params.get("detach_channel_target", "{}"))
    # watchdog params
    watchdog_type = params.get("detach_watchdog_type")
    watchdog_dict = eval(params.get('watchdog_dict', '{}'))

    device_alias = "ua-" + str(uuid.uuid4())

    def check_detached_xml_noexist():
        """
        Check detached xml does not exist in the guest dumpxml

        :return: True if it does not exist, False if still exists
        """
        domxml_dt = virsh.dumpxml(vm_name, dump_option).stdout_text.strip()
        if detach_check_xml not in domxml_dt:
            return True
        else:
            return False

    def get_usb_info():
        """
        Get local host usb info

        :return: usb vendor and product id
        """
        install_cmd = process.run("yum install usbutils* -y", shell=True)
        result = process.run("lsusb|awk '{print $6\":\"$2\":\"$4}'",
                             shell=True)
        if not result.exit_status:
            return result.stdout_text.rstrip(':')
        else:
            test.error("Can not get usb hub info for testing")

    # backup xml
    vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    backup_xml = vmxml.copy()
    device_xml = None
    attach_device = True

    if not vm.is_alive():
        vm.start()
    # wait for vm start successfully
    vm.wait_for_login()

    if hostdev_type:
        if hostdev_type in ["usb", "scsi"]:
            if hostdev_type == "usb":
                pci_id = get_usb_info()
            elif hostdev_type == "scsi":
                source_disk = libvirt.create_scsi_disk(scsi_option="",
                                                       scsi_size="8")
                pci_id = get_scsi_info(source_disk)
            device_xml = libvirt.create_hostdev_xml(pci_id=pci_id,
                                                    dev_type=hostdev_type,
                                                    managed=hostdev_managed,
                                                    alias=device_alias)
        else:
            test.error("Hostdev type %s not handled by test."
                       " Please check code." % hostdev_type)
    if contr_type:
        controllers = vmxml.get_controllers(contr_type)
        contr_index = len(controllers) + 1
        contr_dict = {
            "controller_type": contr_type,
            "controller_model": contr_model,
            "controller_index": contr_index,
            "contr_alias": device_alias
        }
        device_xml = libvirt.create_controller_xml(contr_dict)
        detach_check_xml = detach_check_xml % contr_index

    if redir_type:
        device_xml = libvirt.create_redirdev_xml(redir_type, redir_bus,
                                                 device_alias)

    if channel_type:
        channel_params = {'channel_type_name': channel_type}
        channel_params.update(channel_target)
        device_xml = libvirt.create_channel_xml(channel_params, device_alias)

    if watchdog_type:
        vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
        vmxml.remove_all_device_by_type('watchdog')

        device_xml_file = Watchdog()
        device_xml_file.update({"alias": {"name": device_alias}})
        device_xml_file.setup_attrs(**watchdog_dict)
        vmxml.devices = vmxml.devices.append(device_xml_file)
        vmxml.xmltreefile.write()
        vmxml.sync()

        vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
        logging.debug('The vmxml after attached watchdog is:%s', vmxml)

        if not vm.is_alive():
            vm.start()
        vm.wait_for_login().close()

        attach_device = False

    try:
        dump_option = ""
        wait_event = True
        if "--config" in detach_options:
            dump_option = "--inactive"
            wait_event = False

        # Attach xml to domain
        if attach_device:
            logging.info("Attach xml is %s" %
                         process.run("cat %s" % device_xml.xml).stdout_text)
            virsh.attach_device(vm_name,
                                device_xml.xml,
                                flagstr=detach_options,
                                debug=True,
                                ignore_status=False)

        domxml_at = virsh.dumpxml(vm_name, dump_option,
                                  debug=True).stdout.strip()
        if detach_check_xml not in domxml_at:
            test.error("Can not find %s in domxml after attach" %
                       detach_check_xml)

        # Detach xml with alias
        result = virsh.detach_device_alias(vm_name,
                                           device_alias,
                                           detach_options,
                                           wait_for_event=wait_event,
                                           event_timeout=20,
                                           debug=True)
        libvirt.check_exit_status(result)
        if not utils_misc.wait_for(
                check_detached_xml_noexist,
                60,
                step=2,
                text="Repeatedly search guest dumpxml with detached xml"):
            test.fail("Still can find %s in domxml" % detach_check_xml)
    finally:
        backup_xml.sync()
        if hostdev_type == "scsi":
            libvirt.delete_scsi_disk()