def unhotplug_serial_device(hotplug_type, char_dev, index=1):
     if hotplug_type == "qmp":
         del_dev_opt = "device_del %s%s" % (char_dev, index)
         del_char_opt = "chardev-remove %s%s" % (char_dev, index)
         virsh.qemu_monitor_command(vm_name, del_dev_opt, "--hmp")
         virsh.qemu_monitor_command(vm_name, del_char_opt, "--hmp")
     elif hotplug_type == "attach":
         xml_file = "%s/xml_%s%s" % (tmp_dir, char_dev, index)
         virsh.detach_device(vm_name, xml_file, flagstr="--live")
Exemplo n.º 2
0
 def unhotplug_serial_device(type, char_dev):
     if type == "qmp":
         del_dev_opt = "device_del %s" % char_dev
         del_char_opt = "chardev-remove %s" % char_dev
         result = virsh.qemu_monitor_command(vm_name, del_dev_opt, "--hmp")
         if result.exit_status:
             raise error.TestError('Failed to del device %s from %s.Result:\n%s'
                                   % (char_dev, vm_name, result))
         result = virsh.qemu_monitor_command(vm_name, del_char_opt, "--hmp")
     elif type == "attach":
         result = virsh.detach_device(vm_name, xml_file)
 def device_hotunplug():
     result = virsh.detach_device(vm_name, dev.xml,
                                  flagstr="--live", debug=True)
     if result.exit_status:
         test.fail(result.stdout.strip())
     else:
         logging.debug(result.stdout.strip())
     # Fix me
     # the purpose of waiting here is after detach the device from
     #  guest it need time to perform any other operation on the device
     time.sleep(timeout)
     if not libvirt_version.version_compare(3, 10, 0):
         pci_devs.sort()
         reattach_device(pci_devs, pci_ids)
Exemplo n.º 4
0
    def detach_interface():
        """
            Detach interface:

            1.Detach interface from xml;
            2.Check the live xml after detach interface;
            3.Check the vf driver after detach interface.
        """
        def _detach_completed():
            result = virsh.domiflist(vm_name, "", ignore_status=True)
            return result.stdout.find(mac_addr) == -1

        result = virsh.detach_device(vm_name, new_iface.xml)
        utils_test.libvirt.check_exit_status(result, expect_error=False)
        utils_misc.wait_for(_detach_completed, timeout=60)
        live_xml = vm_xml.VMXML.new_from_dumpxml(vm_name)
        device = live_xml.devices
        logging.debug("Domain xml after detach interface:\n %s", live_xml)
        if vf_type == "vf" or vf_type == "vf_pool":
            for interface in device.by_device_tag("interface"):
                if interface.type_name == "hostdev":
                    if interface.hostdev_address.attrs == vf_addr_attrs:
                        test.fail("The hostdev interface still in the guest xml after detach\n")
                    break
            driver = os.readlink(os.path.join(pci_device_dir, vf_addr, "driver")).split('/')[-1]
            logging.debug("The driver after vf detached from guest is %s\n", driver)
            if managed == "no":
                if driver != "vfio-pci":
                    test.fail("The vf pci driver is not vfio-pci after detached from guest with managed as no\n")
                result = virsh.nodedev_reattach(nodedev_pci_addr)
                utils_test.libvirt.check_exit_status(result, expect_error=False)
            elif driver != origin_driver:
                test.fail("The vf pci driver is not reset to the origin driver after detach from guest: %s vs %s\n" % (driver, origin_driver))
        else:
            for interface in device.by_device_tag("interface"):
                if interface.type_name == "direct":
                    if interface.source["dev"] == vf_name:
                        test.fail("The macvtap interface still exist in the guest xml after detach\n")
                    break
Exemplo n.º 5
0
def run(test, params, env):
    """
    Test multiple disks attachment.

    1.Prepare test environment,destroy or suspend a VM.
    2.Prepare disk image.
    3.Edit disks xml and start the domain.
    4.Perform test operation.
    5.Recover test environment.
    6.Confirm the test result.
    """
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    virsh_dargs = {'debug': True, 'ignore_status': True}

    def prepare_gluster_disk(disk_img, disk_format):
        """
        Setup glusterfs and prepare disk image.
        """
        # Get the image path
        image_source = vm.get_first_disk_devices()['source']

        # Setup gluster
        host_ip = libvirt.setup_or_cleanup_gluster(True, vol_name,
                                                   brick_path, pool_name)
        logging.debug("host ip: %s ", host_ip)
        image_info = utils_misc.get_image_info(image_source)
        image_dest = "/mnt/%s" % disk_img

        if image_info["format"] == disk_format:
            disk_cmd = ("cp -f %s %s" % (image_source, image_dest))
        else:
            # Convert the disk format
            disk_cmd = ("qemu-img convert -f %s -O %s %s %s" %
                        (image_info["format"], disk_format,
                         image_source, image_dest))

        # Mount the gluster disk and create the image.
        process.run("mount -t glusterfs %s:%s /mnt && "
                    "%s && chmod a+rw /mnt/%s && umount /mnt"
                    % (host_ip, vol_name, disk_cmd, disk_img),
                    shell=True)

        return host_ip

    def build_disk_xml(disk_img, disk_format, host_ip):
        """
        Try to rebuild disk xml
        """
        if default_pool:
            disk_xml = Disk(type_name="file")
        else:
            disk_xml = Disk(type_name="network")
        disk_xml.device = "disk"
        driver_dict = {"name": "qemu",
                       "type": disk_format,
                       "cache": "none"}
        if driver_iothread:
            driver_dict.update({"iothread": driver_iothread})
        disk_xml.driver = driver_dict
        disk_xml.target = {"dev": "vdb", "bus": "virtio"}
        if default_pool:
            utils_misc.mount("%s:%s" % (host_ip, vol_name),
                             default_pool, "glusterfs")
            process.run("setsebool virt_use_fusefs on", shell=True)
            source_dict = {"file": "%s/%s" % (default_pool, disk_img)}
            disk_xml.source = disk_xml.new_disk_source(
                **{"attrs": source_dict})
        else:
            source_dict = {"protocol": "gluster",
                           "name": "%s/%s" % (vol_name, disk_img)}
            host_dict = [{"name": host_ip, "port": "24007"}]
            # If mutiple_hosts is True, attempt to add multiple hosts.
            if multiple_hosts:
                host_dict.append({"name": params.get("dummy_host1"), "port": "24007"})
                host_dict.append({"name": params.get("dummy_host2"), "port": "24007"})
            if transport:
                host_dict[0]['transport'] = transport
            disk_xml.source = disk_xml.new_disk_source(
                **{"attrs": source_dict, "hosts": host_dict})
        return disk_xml

    def test_pmsuspend(vm_name):
        """
        Test pmsuspend command.
        """
        if vm.is_dead():
            vm.start()
            vm.wait_for_login()
        # Create swap partition if nessesary.
        if not vm.has_swap():
            swap_path = os.path.join(test.tmpdir, 'swap.img')
            vm.create_swap_partition(swap_path)
        ret = virsh.dompmsuspend(vm_name, "disk", **virsh_dargs)
        libvirt.check_exit_status(ret)
        # wait for vm to shutdown

        if not utils_misc.wait_for(lambda: vm.state() == "shut off", 60):
            test.fail("vm is still alive after S4 operation")

        # Wait for vm and qemu-ga service to start
        vm.start()
        # Prepare guest agent and start guest
        try:
            vm.prepare_guest_agent()
        except (remote.LoginError, virt_vm.VMError) as detail:
            test.fail("failed to prepare agent:\n%s" % detail)

        #TODO This step may hang for rhel6 guest
        ret = virsh.dompmsuspend(vm_name, "mem", **virsh_dargs)
        libvirt.check_exit_status(ret)

        # Check vm state
        if not utils_misc.wait_for(lambda: vm.state() == "pmsuspended", 60):
            test.fail("vm isn't suspended after S3 operation")

        ret = virsh.dompmwakeup(vm_name, **virsh_dargs)
        libvirt.check_exit_status(ret)
        if not vm.is_alive():
            test.fail("vm is not alive after dompmwakeup")

    # Disk specific attributes.
    pm_enabled = "yes" == params.get("pm_enabled", "no")
    gluster_disk = "yes" == params.get("gluster_disk", "no")
    disk_format = params.get("disk_format", "qcow2")
    vol_name = params.get("vol_name")
    transport = params.get("transport", "")
    default_pool = params.get("default_pool", "")
    pool_name = params.get("pool_name")
    driver_iothread = params.get("driver_iothread")
    dom_iothreads = params.get("dom_iothreads")
    brick_path = os.path.join(test.virtdir, pool_name)
    test_qemu_cmd = "yes" == params.get("test_qemu_cmd", "no")

    # Gluster server multiple hosts flag.
    multiple_hosts = "yes" == params.get("multiple_hosts", "no")

    pre_vm_state = params.get("pre_vm_state", "running")

    # Destroy VM first.
    if vm.is_alive():
        vm.destroy(gracefully=False)

    # Back up xml file.
    vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    vmxml = vmxml_backup.copy()
    mnt_src = ""
    try:
        # Build new vm xml.
        if pm_enabled:
            vm_xml.VMXML.set_pm_suspend(vm_name)
            vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm.name)
            logging.debug("Attempting to set guest agent channel")
            vmxml.set_agent_channel()
            vmxml.sync()

        if gluster_disk:
            # Setup glusterfs and disk xml.
            disk_img = "gluster.%s" % disk_format
            host_ip = prepare_gluster_disk(disk_img, disk_format)
            mnt_src = "%s:%s" % (host_ip, vol_name)
            global custom_disk
            custom_disk = build_disk_xml(disk_img, disk_format, host_ip)

        start_vm = "yes" == params.get("start_vm", "yes")

        # set domain options
        if dom_iothreads:
            try:
                vmxml.iothreads = int(dom_iothreads)
                vmxml.sync()
            except ValueError:
                # 'iothreads' may not invalid number in negative tests
                logging.debug("Can't convert '%s' to integer type",
                              dom_iothreads)
        if default_pool:
            disks_dev = vmxml.get_devices(device_type="disk")
            for disk in disks_dev:
                vmxml.del_device(disk)
            vmxml.sync()

        # If hot plug, start VM first, otherwise stop VM if running.
        if start_vm:
            if vm.is_dead():
                vm.start()
        else:
            if not vm.is_dead():
                vm.destroy()

        # If gluster_disk is True, use attach_device.
        attach_option = params.get("attach_option", "")
        if gluster_disk:
            cmd_result = virsh.attach_device(domainarg=vm_name, filearg=custom_disk.xml,
                                             flagstr=attach_option,
                                             dargs=virsh_dargs, debug=True)
            libvirt.check_exit_status(cmd_result)

        # Turn VM into certain state.
        if pre_vm_state == "running":
            logging.info("Starting %s...", vm_name)
            if vm.is_dead():
                vm.start()
        elif pre_vm_state == "transient":
            logging.info("Creating %s...", vm_name)
            vmxml_for_test = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
            vm.undefine()
            if virsh.create(vmxml_for_test.xml, **virsh_dargs).exit_status:
                vmxml_backup.define()
                test.skip("can't create the domain")

        # Run the tests.
        if pm_enabled:
            # Makesure the guest agent is started
            try:
                vm.prepare_guest_agent()
            except (remote.LoginError, virt_vm.VMError) as detail:
                test.fail("failed to prepare agent: %s" % detail)
            # Run dompmsuspend command.
            test_pmsuspend(vm_name)
        if test_qemu_cmd:
            # Check qemu-kvm command line
            cmd = ("ps -ef | grep %s | grep -v grep " % vm_name)
            if transport == "rdma":
                cmd += " | grep gluster+%s.*format=%s" % (transport, disk_format)
            else:
                cmd += " | grep gluster.*format=%s" % disk_format
            if driver_iothread:
                cmd += " | grep iothread=iothread%s" % driver_iothread
            if process.run(cmd, ignore_status=True, shell=True).exit_status:
                test.fail("Can't see gluster option '%s' "
                          "in command line" % cmd)
        # Detach hot plugged device.
        if start_vm and not default_pool:
            if gluster_disk:
                ret = virsh.detach_device(vm_name, custom_disk.xml,
                                          flagstr=attach_option, dargs=virsh_dargs)
                libvirt.check_exit_status(ret)

    finally:
        # Recover VM.
        if vm.is_alive():
            vm.destroy(gracefully=False)
        logging.info("Restoring vm...")
        vmxml_backup.sync()
        if utils_misc.is_mounted(mnt_src, default_pool, 'glusterfs'):
            process.run("umount %s" % default_pool,
                        ignore_status=True, shell=True)

        if gluster_disk:
            libvirt.setup_or_cleanup_gluster(False, vol_name, brick_path)
Exemplo n.º 6
0
                if devices[i] == "cdrom":
                    dt_options = "--config"
                ret = virsh.detach_disk(vm_name, device_targets[i], dt_options)
                libvirt.check_exit_status(ret)
            # Check disks in VM after hotunplug.
            if check_patitions_hotunplug:
                if not check_vm_partitions(devices, device_targets, False):
                    raise error.TestFail("See device in VM after hotunplug")

        elif hotplug:
            for i in range(len(disks_xml)):
                if len(device_attach_error) > i:
                    if device_attach_error[i] == "yes":
                        continue
                ret = virsh.detach_device(vm_name,
                                          disks_xml[i].xml,
                                          flagstr=attach_option)
                os.remove(disks_xml[i].xml)
                libvirt.check_exit_status(ret)

            # Check disks in VM after hotunplug.
            if check_patitions_hotunplug:
                if not check_vm_partitions(devices, device_targets, False):
                    raise error.TestFail("See device in VM after hotunplug")

    finally:
        # Delete snapshots.
        snapshot_lists = virsh.snapshot_list(vm_name)
        if len(snapshot_lists) > 0:
            libvirt.clean_up_snapshots(vm_name, snapshot_lists)
            for snapshot in snapshot_lists:
Exemplo n.º 7
0
def run(test, params, env):
    """
    Test multiple disks attachment.

    1.Prepare test environment,destroy or suspend a VM.
    2.Prepare disk image.
    3.Edit disks xml and start the domain.
    4.Perform test operation.
    5.Recover test environment.
    6.Confirm the test result.
    """
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    virsh_dargs = {'debug': True, 'ignore_status': True}

    def prepare_gluster_disk(disk_img, disk_format):
        """
        Setup glusterfs and prepare disk image.
        """
        # Get the image path
        image_source = vm.get_first_disk_devices()['source']

        # Setup gluster
        host_ip = libvirt.setup_or_cleanup_gluster(True, vol_name,
                                                   brick_path, pool_name)
        logging.debug("host ip: %s ", host_ip)
        image_info = utils_misc.get_image_info(image_source)
        image_dest = "/mnt/%s" % disk_img

        if image_info["format"] == disk_format:
            disk_cmd = ("cp -f %s %s" % (image_source, image_dest))
        else:
            # Convert the disk format
            disk_cmd = ("qemu-img convert -f %s -O %s %s %s" %
                        (image_info["format"], disk_format,
                         image_source, image_dest))

        # Mount the gluster disk and create the image.
        process.run("mount -t glusterfs %s:%s /mnt && "
                    "%s && chmod a+rw /mnt/%s && umount /mnt"
                    % (host_ip, vol_name, disk_cmd, disk_img),
                    shell=True)

        return host_ip

    def build_disk_xml(disk_img, disk_format, host_ip):
        """
        Try to rebuild disk xml
        """
        if default_pool:
            disk_xml = Disk(type_name="file")
        else:
            disk_xml = Disk(type_name="network")
        disk_xml.device = "disk"
        driver_dict = {"name": "qemu",
                       "type": disk_format,
                       "cache": "none"}
        if driver_iothread:
            driver_dict.update({"iothread": driver_iothread})
        disk_xml.driver = driver_dict
        disk_xml.target = {"dev": "vdb", "bus": "virtio"}
        if default_pool:
            utils_misc.mount("%s:%s" % (host_ip, vol_name),
                             default_pool, "glusterfs")
            process.run("setsebool virt_use_fusefs on", shell=True)
            source_dict = {"file": "%s/%s" % (default_pool, disk_img)}
            disk_xml.source = disk_xml.new_disk_source(
                **{"attrs": source_dict})
        else:
            source_dict = {"protocol": "gluster",
                           "name": "%s/%s" % (vol_name, disk_img)}
            host_dict = [{"name": host_ip, "port": "24007"}]
            # If mutiple_hosts is True, attempt to add multiple hosts.
            if multiple_hosts:
                host_dict.append({"name": params.get("dummy_host1"), "port": "24007"})
                host_dict.append({"name": params.get("dummy_host2"), "port": "24007"})
            if transport:
                host_dict[0]['transport'] = transport
            disk_xml.source = disk_xml.new_disk_source(
                **{"attrs": source_dict, "hosts": host_dict})
        return disk_xml

    def test_pmsuspend(vm_name):
        """
        Test pmsuspend command.
        """
        if vm.is_dead():
            vm.start()
            vm.wait_for_login()
        # Create swap partition if nessesary.
        if not vm.has_swap():
            swap_path = os.path.join(data_dir.get_tmp_dir(), 'swap.img')
            vm.create_swap_partition(swap_path)
        ret = virsh.dompmsuspend(vm_name, "disk", **virsh_dargs)
        libvirt.check_exit_status(ret)
        # wait for vm to shutdown

        if not utils_misc.wait_for(lambda: vm.state() == "shut off", 60):
            test.fail("vm is still alive after S4 operation")

        # Wait for vm and qemu-ga service to start
        vm.start()
        # Prepare guest agent and start guest
        try:
            vm.prepare_guest_agent()
        except (remote.LoginError, virt_vm.VMError) as detail:
            test.fail("failed to prepare agent:\n%s" % detail)

        #TODO This step may hang for rhel6 guest
        ret = virsh.dompmsuspend(vm_name, "mem", **virsh_dargs)
        libvirt.check_exit_status(ret)

        # Check vm state
        if not utils_misc.wait_for(lambda: vm.state() == "pmsuspended", 60):
            test.fail("vm isn't suspended after S3 operation")

        ret = virsh.dompmwakeup(vm_name, **virsh_dargs)
        libvirt.check_exit_status(ret)
        if not vm.is_alive():
            test.fail("vm is not alive after dompmwakeup")

    # Disk specific attributes.
    pm_enabled = "yes" == params.get("pm_enabled", "no")
    gluster_disk = "yes" == params.get("gluster_disk", "no")
    disk_format = params.get("disk_format", "qcow2")
    vol_name = params.get("vol_name")
    transport = params.get("transport", "")
    default_pool = params.get("default_pool", "")
    pool_name = params.get("pool_name")
    driver_iothread = params.get("driver_iothread")
    dom_iothreads = params.get("dom_iothreads")
    brick_path = os.path.join(test.virtdir, pool_name)
    test_qemu_cmd = "yes" == params.get("test_qemu_cmd", "no")

    # Gluster server multiple hosts flag.
    multiple_hosts = "yes" == params.get("multiple_hosts", "no")

    pre_vm_state = params.get("pre_vm_state", "running")

    # Destroy VM first.
    if vm.is_alive():
        vm.destroy(gracefully=False)

    # Back up xml file.
    vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    vmxml = vmxml_backup.copy()
    mnt_src = ""
    try:
        # Build new vm xml.
        if pm_enabled:
            vm_xml.VMXML.set_pm_suspend(vm_name)
            vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm.name)
            logging.debug("Attempting to set guest agent channel")
            vmxml.set_agent_channel()
            vmxml.sync()

        if gluster_disk:
            # Setup glusterfs and disk xml.
            disk_img = "gluster.%s" % disk_format
            host_ip = prepare_gluster_disk(disk_img, disk_format)
            mnt_src = "%s:%s" % (host_ip, vol_name)
            global custom_disk
            custom_disk = build_disk_xml(disk_img, disk_format, host_ip)

        start_vm = "yes" == params.get("start_vm", "yes")

        # set domain options
        if dom_iothreads:
            try:
                vmxml.iothreads = int(dom_iothreads)
                vmxml.sync()
            except ValueError:
                # 'iothreads' may not invalid number in negative tests
                logging.debug("Can't convert '%s' to integer type",
                              dom_iothreads)
        if default_pool:
            disks_dev = vmxml.get_devices(device_type="disk")
            for disk in disks_dev:
                vmxml.del_device(disk)
            vmxml.sync()

        # If hot plug, start VM first, otherwise stop VM if running.
        if start_vm:
            if vm.is_dead():
                vm.start()
        else:
            if not vm.is_dead():
                vm.destroy()

        # If gluster_disk is True, use attach_device.
        attach_option = params.get("attach_option", "")
        if gluster_disk:
            cmd_result = virsh.attach_device(domainarg=vm_name, filearg=custom_disk.xml,
                                             flagstr=attach_option,
                                             dargs=virsh_dargs, debug=True)
            libvirt.check_exit_status(cmd_result)

        # Turn VM into certain state.
        if pre_vm_state == "running":
            logging.info("Starting %s...", vm_name)
            if vm.is_dead():
                vm.start()
        elif pre_vm_state == "transient":
            logging.info("Creating %s...", vm_name)
            vmxml_for_test = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
            vm.undefine()
            if virsh.create(vmxml_for_test.xml, **virsh_dargs).exit_status:
                vmxml_backup.define()
                test.skip("can't create the domain")

        # Run the tests.
        if pm_enabled:
            # Makesure the guest agent is started
            try:
                vm.prepare_guest_agent()
            except (remote.LoginError, virt_vm.VMError) as detail:
                test.fail("failed to prepare agent: %s" % detail)
            # Run dompmsuspend command.
            test_pmsuspend(vm_name)
        if test_qemu_cmd:
            # Check qemu-kvm command line
            cmd = ("ps -ef | grep %s | grep -v grep " % vm_name)
            if transport == "rdma":
                cmd += " | grep gluster+%s.*format=%s" % (transport, disk_format)
            else:
                cmd += " | grep gluster.*format=%s" % disk_format
            if driver_iothread:
                cmd += " | grep iothread=iothread%s" % driver_iothread
            if process.run(cmd, ignore_status=True, shell=True).exit_status:
                test.fail("Can't see gluster option '%s' "
                          "in command line" % cmd)
        # Detach hot plugged device.
        if start_vm and not default_pool:
            if gluster_disk:
                ret = virsh.detach_device(vm_name, custom_disk.xml,
                                          flagstr=attach_option, dargs=virsh_dargs)
                libvirt.check_exit_status(ret)

    finally:
        # Recover VM.
        if vm.is_alive():
            vm.destroy(gracefully=False)
        logging.info("Restoring vm...")
        vmxml_backup.sync()
        if utils_misc.is_mounted(mnt_src, default_pool, 'glusterfs'):
            process.run("umount %s" % default_pool,
                        ignore_status=True, shell=True)

        if gluster_disk:
            libvirt.setup_or_cleanup_gluster(False, vol_name, brick_path)
Exemplo n.º 8
0
def run(test, params, env):
    """
    Test memory management of nvdimm
    """
    vm_name = params.get('main_vm')

    nvdimm_file = params.get('nvdimm_file')
    check = params.get('check', '')
    qemu_checks = params.get('qemu_checks', '').split('`')
    test_str = 'This is a test!'

    def check_boot_config(session):
        """
        Check /boot/config-$KVER file
        """
        check_list = [
            'CONFIG_LIBNVDIMM=m',
            'CONFIG_BLK_DEV_PMEM=m',
            'CONFIG_ACPI_NFIT=m'
        ]
        current_boot = session.cmd('uname -r').strip()
        content = session.cmd('cat /boot/config-%s' % current_boot).strip()
        for item in check_list:
            if item in content:
                logging.info(item)
            else:
                logging.error(item)
                test.fail('/boot/config content not correct')

    def check_file_in_vm(session, path, expect=True):
        """
        Check whether the existance of file meets expectation
        """
        exist = session.cmd_status('ls %s' % path)
        logging.debug(exist)
        exist = True if exist == 0 else False
        status = '' if exist else 'NOT'
        logging.info('File %s does %s exist', path, status)
        if exist != expect:
            err_msg = 'Existance doesn\'t meet expectation: %s ' % path
            if expect:
                err_msg += 'should exist.'
            else:
                err_msg += 'should not exist'
            test.fail(err_msg)

    def create_cpuxml():
        """
        Create cpu xml for test
        """
        cpu_params = {k: v for k, v in params.items() if k.startswith('cpuxml_')}
        logging.debug(cpu_params)
        cpu_xml = vm_xml.VMCPUXML()
        cpu_xml.xml = "<cpu><numa/></cpu>"
        for attr_key in cpu_params:
            val = cpu_params[attr_key]
            logging.debug('Set cpu params')
            setattr(cpu_xml, attr_key.replace('cpuxml_', ''),
                    eval(val) if ':' in val else val)
        logging.debug(cpu_xml)
        return cpu_xml.copy()

    def create_nvdimm_xml(**mem_param):
        """
        Create xml of nvdimm memory device
        """
        mem_xml = utils_hotplug.create_mem_xml(
            tg_size=mem_param['target_size'],
            mem_addr={'slot': mem_param['address_slot']},
            tg_sizeunit=mem_param['target_size_unit'],
            tg_node=mem_param['target_node'],
            mem_model="nvdimm",
            lb_size=mem_param.get('label_size'),
            lb_sizeunit=mem_param.get('label_size_unit'),
            mem_access=mem_param['mem_access']
        )

        source_xml = memory.Memory.Source()
        source_xml.path = mem_param['source_path']
        mem_xml.source = source_xml
        logging.debug(mem_xml)

        return mem_xml.copy()

    bkxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)

    try:
        vm = env.get_vm(vm_name)
        # Create nvdimm file on the host
        process.run('truncate -s 512M %s' % nvdimm_file, verbose=True)
        vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)

        # Set cpu according to params
        cpu_xml = create_cpuxml()
        vmxml.cpu = cpu_xml

        # Update other vcpu, memory info according to params
        update_vm_args = {k: params[k] for k in params
                          if k.startswith('setvm_')}
        logging.debug(update_vm_args)
        for key, value in list(update_vm_args.items()):
            attr = key.replace('setvm_', '')
            logging.debug('Set %s = %s', attr, value)
            setattr(vmxml, attr, int(value) if value.isdigit() else value)
        logging.debug(virsh.dumpxml(vm_name))

        # Add an nvdimm mem device to vm xml
        nvdimm_params = {k.replace('nvdimmxml_', ''): v
                         for k, v in params.items() if k.startswith('nvdimmxml_')}
        nvdimm_xml = create_nvdimm_xml(**nvdimm_params)
        vmxml.add_device(nvdimm_xml)
        vmxml.sync()
        logging.debug(virsh.dumpxml(vm_name))

        virsh.start(vm_name, debug=True, ignore_status=False)

        # Check qemu command line one by one
        map(libvirt.check_qemu_cmd_line, qemu_checks)

        alive_vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)

        # Check if the guest support NVDIMM:
        # check /boot/config-$KVER file
        vm_session = vm.wait_for_login()
        check_boot_config(vm_session)

        # check /dev/pmem0 existed inside guest
        check_file_in_vm(vm_session, '/dev/pmem0')

        if check == 'back_file':
            # Create an ext4 file system on /dev/pmem0
            cmd_list = [
                "mkfs.ext4 /dev/pmem0",
                "mount -o dax /dev/pmem0 /mnt",
                "echo '%s' >/mnt/foo" % test_str,
                "umount /mnt"
            ]
            map(vm_session.cmd, cmd_list)
            vm_session.close()

            # Shutdown the guest, then start it, remount /dev/pmem0,
            # check if the test file is still on the file system
            vm.destroy()
            vm.start()
            vm_session = vm.wait_for_login()

            vm_session.cmd('mount -o dax /dev/pmem0 /mnt')
            if test_str not in vm_session.cmd('cat /mnt/foo'):
                test.fail('"%s" should be in /mnt/foo')

            # From the host, check the file has changed:
            host_output = process.run('hexdump -C /tmp/nvdimm',
                                      shell=True, verbose=True).stdout_text
            if test_str not in host_output:
                test.fail('"%s" should be in output')

            # Shutdown the guest, and edit the xml,
            # include: access='private'
            vm_session.close()
            vm.destroy()
            vm_devices = vmxml.devices
            nvdimm_device = vm_devices.by_device_tag('memory')[0]
            nvdimm_index = vm_devices.index(nvdimm_device)
            vm_devices[nvdimm_index].mem_access = 'private'
            vmxml.devices = vm_devices
            vmxml.sync()

            # Login to the guest, mount the /dev/pmem0 and .
            # create a file: foo-private
            vm.start()
            vm_session = vm.wait_for_login()

            libvirt.check_qemu_cmd_line('mem-path=/tmp/nvdimm,share=no')

            private_str = 'This is a test for foo-private!'
            vm_session.cmd('mount -o dax /dev/pmem0 /mnt/')

            file_private = 'foo-private'
            vm_session.cmd("echo '%s' >/mnt/%s" % (private_str, file_private))
            if private_str not in vm_session.cmd('cat /mnt/%s' % file_private):
                test.fail('"%s" should be in output' % private_str)

            # Shutdown the guest, then start it,
            # check the file: foo-private is no longer existed
            vm_session.close()
            vm.destroy()

            vm.start()
            vm_session = vm.wait_for_login()
            vm_session.cmd('mount -o dax /dev/pmem0 /mnt/')
            if file_private in vm_session.cmd('ls /mnt/'):
                test.fail('%s should not exist, for it was '
                          'created when access=private' % file_private)

        if check == 'label_back_file':
            # Create an xfs file system on /dev/pmem0
            vm_session.cmd('mkfs.xfs -f -b size=4096 /dev/pmem0')
            # Mount the file system with DAX enabled for page cache bypass
            output = vm_session.cmd_output('mount -o dax /dev/pmem0 /mnt/')
            logging.info(output)

            # Create a file on the nvdimm device.
            test_str = 'This is a test with label'
            vm_session.cmd('echo "%s" >/mnt/foo-label' % test_str)
            if test_str not in vm_session.cmd('cat /mnt/foo-label '):
                test.fail('"%s" should be in output' % test_str)

            # Reboot the guest, and remount the nvdimm device in the guest.
            # Check the file foo-label is exited
            vm_session.close()
            virsh.reboot(vm_name, debug=True)
            vm_session = vm.wait_for_login()

            vm_session.cmd('mount -o dax /dev/pmem0  /mnt')
            if test_str not in vm_session.cmd('cat /mnt/foo-label '):
                test.fail('"%s" should be in output' % test_str)

        if check == 'hot_plug':
            # Create file for 2nd nvdimm device
            nvdimm_file_2 = params.get('nvdimm_file_2')
            process.run('truncate -s 512M %s' % nvdimm_file_2)

            # Add 2nd nvdimm device to vm xml
            nvdimm2_params = {k.replace('nvdimmxml2_', ''): v
                              for k, v in params.items() if k.startswith('nvdimmxml2_')}
            nvdimm2_xml = create_nvdimm_xml(**nvdimm2_params)

            ori_devices = vm_xml.VMXML.new_from_dumpxml(vm_name).get_devices('memory')
            logging.debug('Starts with %d memory devices', len(ori_devices))

            result = virsh.attach_device(vm_name, nvdimm2_xml.xml, debug=True)
            libvirt.check_exit_status(result)

            # After attach, there should be an extra memory device
            devices_after_attach = vm_xml.VMXML.new_from_dumpxml(vm_name).get_devices('memory')
            logging.debug('After detach, vm has %d memory devices',
                          len(devices_after_attach))
            if len(ori_devices) != len(devices_after_attach) - 1:
                test.fail('Number of memory devices after attach is %d, should be %d'
                          % (len(devices_after_attach), len(ori_devices) + 1))

            time.sleep(5)
            check_file_in_vm(vm_session, '/dev/pmem1')

            nvdimm_detach = alive_vmxml.get_devices('memory')[-1]
            logging.debug(nvdimm_detach)

            # Hot-unplug nvdimm device
            result = virsh.detach_device(vm_name, nvdimm_detach.xml, debug=True)
            libvirt.check_exit_status(result)

            vm_session.close()
            vm_session = vm.wait_for_login()

            virsh.dumpxml(vm_name, debug=True)

            left_devices = vm_xml.VMXML.new_from_dumpxml(vm_name).get_devices('memory')
            logging.debug(left_devices)

            if len(left_devices) != len(ori_devices):
                test.fail('Number of memory devices after detach is %d, should be %d'
                          % (len(left_devices), len(ori_devices)))

            time.sleep(5)
            check_file_in_vm(vm_session, '/dev/pmem1', expect=False)
    finally:
        if vm.is_alive():
            vm.destroy(gracefully=False)
        bkxml.sync()
        os.remove(nvdimm_file)
def run(test, params, env):
    """
    Test disk encryption option.

    1.Prepare test environment,destroy or suspend a VM.
    2.Prepare pool, volume.
    3.Edit disks xml and start the domain.
    4.Perform test operation.
    5.Recover test environment.
    6.Confirm the test result.
    """

    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    virsh_dargs = {'debug': True, 'ignore_status': True}

    def create_pool(p_name, p_type, p_target):
        """
        Define and start a pool.

        :param p_name. Pool name.
        :param p_type. Pool type.
        :param p_target. Pool target path.
        """
        p_xml = pool_xml.PoolXML(pool_type=p_type)
        p_xml.name = p_name
        p_xml.target_path = p_target

        if not os.path.exists(p_target):
            os.mkdir(p_target)
        p_xml.xmltreefile.write()
        ret = virsh.pool_define(p_xml.xml, **virsh_dargs)
        libvirt.check_exit_status(ret)
        ret = virsh.pool_build(p_name, **virsh_dargs)
        libvirt.check_exit_status(ret)
        ret = virsh.pool_start(p_name, **virsh_dargs)
        libvirt.check_exit_status(ret)

    def create_vol(p_name, target_encrypt_params, vol_params):
        """
        Create volume.

        :param p_name. Pool name.
        :param target_encrypt_params encrypt parameters in dict.
        :param vol_params. Volume parameters dict.
        :return: True if create successfully.
        """
        volxml = vol_xml.VolXML()
        v_xml = volxml.new_vol(**vol_params)
        v_xml.encryption = volxml.new_encryption(**target_encrypt_params)
        v_xml.xmltreefile.write()

        ret = virsh.vol_create(p_name, v_xml.xml, **virsh_dargs)
        libvirt.check_exit_status(ret)

    def create_secret(vol_path):
        """
        Create secret.

        :param vol_path. volume path.
        :return: secret id if create successfully.
        """
        sec_xml = secret_xml.SecretXML("no", "yes")
        sec_xml.description = "volume secret"

        sec_xml.usage = 'volume'
        sec_xml.volume = vol_path
        sec_xml.xmltreefile.write()

        ret = virsh.secret_define(sec_xml.xml)
        libvirt.check_exit_status(ret)
        # Get secret uuid.
        try:
            encryption_uuid = re.findall(r".+\S+(\ +\S+)\ +.+\S+",
                                         ret.stdout.strip())[0].lstrip()
        except IndexError as e:
            test.error("Fail to get newly created secret uuid")
        logging.debug("Secret uuid %s", encryption_uuid)

        # Set secret value.
        encoding = locale.getpreferredencoding()
        secret_string = base64.b64encode(secret_password_no_encoded.encode(encoding)).decode(encoding)
        ret = virsh.secret_set_value(encryption_uuid, secret_string,
                                     **virsh_dargs)
        libvirt.check_exit_status(ret)
        return encryption_uuid

    def check_in_vm(vm, target, old_parts):
        """
        Check mount/read/write disk in VM.
        :param vm. VM guest.
        :param target. Disk dev in VM.
        :return: True if check successfully.
        """
        try:
            session = vm.wait_for_login()
            rpm_stat = session.cmd_status("rpm -q parted || "
                                          "yum install -y parted", 300)
            if rpm_stat != 0:
                test.fail("Failed to query/install parted, make sure"
                          " that you have usable repo in guest")

            new_parts = libvirt.get_parts_list(session)
            added_parts = list(set(new_parts).difference(set(old_parts)))
            logging.info("Added parts:%s", added_parts)
            if len(added_parts) != 1:
                logging.error("The number of new partitions is invalid in VM")
                return False

            added_part = None
            if target.startswith("vd"):
                if added_parts[0].startswith("vd"):
                    added_part = added_parts[0]
            elif target.startswith("hd"):
                if added_parts[0].startswith("sd"):
                    added_part = added_parts[0]

            if not added_part:
                logging.error("Can't see added partition in VM")
                return False

            device_source = os.path.join(os.sep, 'dev', added_part)
            libvirt.mk_label(device_source, session=session)
            libvirt.mk_part(device_source, size="10M", session=session)
            # Run partprobe to make the change take effect.
            process.run("partprobe", ignore_status=True, shell=True)
            libvirt.mkfs("/dev/%s1" % added_part, "ext3", session=session)

            cmd = ("mount /dev/%s1 /mnt && echo '123' > /mnt/testfile"
                   " && cat /mnt/testfile && umount /mnt" % added_part)
            s, o = session.cmd_status_output(cmd)
            logging.info("Check disk operation in VM:\n%s", o)
            session.close()
            if s != 0:
                return False
            return True
        except (remote.LoginError, virt_vm.VMError, aexpect.ShellError) as e:
            logging.error(str(e))
            return False

    # Disk specific attributes.
    device = params.get("virt_disk_device", "disk")
    device_target = params.get("virt_disk_device_target", "vdd")
    device_type = params.get("virt_disk_device_type", "file")
    device_bus = params.get("virt_disk_device_bus", "virtio")
    encryption_in_source = "yes" == params.get("encryption_in_source")
    encryption_out_source = "yes" == params.get("encryption_out_source")
    if encryption_in_source and not libvirt_version.version_compare(3, 9, 0):
        test.cancel("Cannot put <encryption> inside disk <source> in "
                    "this libvirt version.")
    # Pool/Volume options.
    pool_name = params.get("pool_name")
    pool_type = params.get("pool_type")
    pool_target = params.get("pool_target")
    volume_name = params.get("vol_name")
    volume_alloc = params.get("vol_alloc")
    volume_cap_unit = params.get("vol_cap_unit")
    volume_cap = params.get("vol_cap")
    volume_target_path = params.get("target_path")
    volume_target_format = params.get("target_format")
    volume_target_encypt = params.get("target_encypt", "")
    volume_target_label = params.get("target_label")

    hotplug = "yes" == params.get("virt_disk_device_hotplug")
    status_error = "yes" == params.get("status_error")
    secret_type = params.get("secret_type", "passphrase")
    secret_password_no_encoded = params.get("secret_password_no_encoded", "redhat")
    virt_disk_qcow2_format = "yes" == params.get("virt_disk_qcow2_format")

    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)

    # Start vm and get all partions in vm.
    if vm.is_dead():
        vm.start()
    session = vm.wait_for_login()
    old_parts = libvirt.get_parts_list(session)
    session.close()
    vm.destroy(gracefully=False)

    # Back up xml file.
    vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    sec_encryption_uuid = None
    try:
        # Prepare the disk.
        sec_uuids = []
        create_pool(pool_name, pool_type, pool_target)
        vol_params = {"name": volume_name, "capacity": int(volume_cap),
                      "allocation": int(volume_alloc), "format":
                      volume_target_format, "path": volume_target_path,
                      "label": volume_target_label,
                      "capacity_unit": volume_cap_unit}
        vol_encryption_params = {}
        vol_encryption_params.update({"format": volume_target_encypt})
        # For any disk format other than qcow2, it need create secret firstly.
        if not virt_disk_qcow2_format:
            # create secret.
            sec_encryption_uuid = create_secret(volume_target_path)
            sec_uuids.append(sec_encryption_uuid)
            vol_encryption_params.update({"secret": {"type": secret_type, "uuid": sec_encryption_uuid}})
        try:
            # If Libvirt version is lower than 2.5.0
            # Creating luks encryption volume is not supported,so skip it.
            create_vol(pool_name, vol_encryption_params, vol_params)
        except AssertionError as info:
            err_msgs = ("create: invalid option")
            if str(info).count(err_msgs):
                test.error("Creating luks encryption volume "
                           "is not supported on this libvirt version")
            else:
                test.error("Failed to create volume."
                           "Error: %s" % str(info))
        # Add disk xml.
        vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)

        disk_xml = Disk(type_name=device_type)
        disk_xml.device = device
        if device_type == "file":
            dev_attrs = "file"
        elif device_type == "dir":
            dev_attrs = "dir"
        else:
            dev_attrs = "dev"
        disk_source = disk_xml.new_disk_source(
                **{"attrs": {dev_attrs: volume_target_path}})
        disk_xml.driver = {"name": "qemu", "type": volume_target_format,
                           "cache": "none"}
        disk_xml.target = {"dev": device_target, "bus": device_bus}
        v_xml = vol_xml.VolXML.new_from_vol_dumpxml(volume_name, pool_name)
        sec_uuids.append(v_xml.encryption.secret["uuid"])
        if not status_error:
            logging.debug("vol info -- format: %s, type: %s, uuid: %s",
                          v_xml.encryption.format,
                          v_xml.encryption.secret["type"],
                          v_xml.encryption.secret["uuid"])
            encryption_dict = {"encryption": v_xml.encryption.format,
                               "secret": {"type": v_xml.encryption.secret["type"],
                                          "uuid": v_xml.encryption.secret["uuid"]}}
            if encryption_in_source:
                disk_source.encryption = disk_xml.new_encryption(
                        **encryption_dict)
            if encryption_out_source:
                disk_xml.encryption = disk_xml.new_encryption(
                        **encryption_dict)
        disk_xml.source = disk_source
        logging.debug("disk xml is:\n%s" % disk_xml)
        if not hotplug:
            # Sync VM xml.
            vmxml.add_device(disk_xml)
            vmxml.sync()

        try:
            # Start the VM and do disk hotplug if required,
            # then check disk status in vm.
            # Note that LUKS encrypted virtual disk without <encryption>
            # can be normally started or attached since qemu will just treat
            # it as RAW, so we don't test LUKS with status_error=TRUE.
            vm.start()
            vm.wait_for_login()
            if status_error:
                if hotplug:
                    logging.debug("attaching disk, expecting error...")
                    result = virsh.attach_device(vm_name, disk_xml.xml)
                    libvirt.check_exit_status(result, status_error)
                else:
                    test.fail("VM started unexpectedly.")
            else:
                if hotplug:
                    result = virsh.attach_device(vm_name, disk_xml.xml,
                                                 debug=True)
                    libvirt.check_exit_status(result)
                    if not check_in_vm(vm, device_target, old_parts):
                        test.fail("Check encryption disk in VM failed")
                    result = virsh.detach_device(vm_name, disk_xml.xml,
                                                 debug=True)
                    libvirt.check_exit_status(result)
                else:
                    if not check_in_vm(vm, device_target, old_parts):
                        test.fail("Check encryption disk in VM failed")
        except virt_vm.VMStartError as e:
            if status_error:
                if hotplug:
                    test.fail("In hotplug scenario, VM should "
                              "start successfully but not."
                              "Error: %s", str(e))
                else:
                    logging.debug("VM failed to start as expected."
                                  "Error: %s", str(e))
            else:
                # Libvirt2.5.0 onward,AES-CBC encrypted qcow2 images is no
                # longer supported.
                err_msgs = ("AES-CBC encrypted qcow2 images is"
                            " no longer supported in system emulators")
                if str(e).count(err_msgs):
                    test.cancel(err_msgs)
                else:
                    test.fail("VM failed to start."
                              "Error: %s" % str(e))
    finally:
        # Recover VM.
        if vm.is_alive():
            vm.destroy(gracefully=False)
        logging.info("Restoring vm...")
        vmxml_backup.sync()

        # Clean up pool, vol
        for sec_uuid in set(sec_uuids):
            virsh.secret_undefine(sec_uuid, **virsh_dargs)
            virsh.vol_delete(volume_name, pool_name, **virsh_dargs)
        if pool_name in virsh.pool_state_dict():
            virsh.pool_destroy(pool_name, **virsh_dargs)
            virsh.pool_undefine(pool_name, **virsh_dargs)
def run(test, params, env):
    """
    Test SCSI3 Persistent Reservation functions.

    1.Prepare iscsi backend storage.
    2.Prepare disk xml.
    3.Hot/cold plug the disk to vm.
    4.Check if SCSI3 Persistent Reservation commands can be issued to that disk.
    5.Recover test environment.
    6.Confirm the test result.
    """
    def get_delta_parts(vm, old_parts):
        """
        Get the newly added partitions/blockdevs in vm.
        :param vm: The vm to be operated.
        :param old_parts: The original partitions/blockdevs in vm.
        :return: Newly added partitions/blockdevs.
        """
        session = vm.wait_for_login()
        new_parts = utils_disk.get_parts_list(session)
        new_parts = list(set(new_parts).difference(set(old_parts)))
        session.close()
        return new_parts

    def check_pr_cmds(vm, blk_dev):
        """
        Check if SCSI3 Persistent Reservation commands can be used in vm.
        :param vm: The vm to be checked.
        :param blk_dev: The block device in vm to be checked.
        """
        session = vm.wait_for_login()
        cmd = ("sg_persist --no-inquiry -v --out --register-ignore --param-sark 123aaa /dev/{0} &&"
               "sg_persist --no-inquiry --in -k /dev/{0} &&"
               "sg_persist --no-inquiry -v --out --reserve --param-rk 123aaa --prout-type 5 /dev/{0} &&"
               "sg_persist --no-inquiry --in -r /dev/{0} &&"
               "sg_persist --no-inquiry -v --out --release --param-rk 123aaa --prout-type 5 /dev/{0} &&"
               "sg_persist --no-inquiry --in -r /dev/{0} &&"
               "sg_persist --no-inquiry -v --out --register --param-rk 123aaa --prout-type 5 /dev/{0} &&"
               "sg_persist --no-inquiry --in -k /dev/{0}"
               .format(blk_dev))
        cmd_status, cmd_output = session.cmd_status_output(cmd)
        session.close()
        if cmd_status == 127:
            test.error("sg3_utils not installed in test image")
        elif cmd_status != 0:
            test.fail("persistent reservation failed for /dev/%s" % blk_dev)
        else:
            logging.info("persistent reservation successful for /dev/%s" % blk_dev)

    def start_or_stop_qemu_pr_helper(is_start=True, path_to_sock="/var/run/qemu-pr-helper.sock"):
        """
        Start or stop qemu-pr-helper daemon
        :param is_start: Set True to start, False to stop.
        """
        service_mgr = service.ServiceManager()
        if is_start:
            service_mgr.start('qemu-pr-helper')
            time.sleep(2)
            shutil.chown(path_to_sock, "qemu", "qemu")
        else:
            service_mgr.stop('qemu-pr-helper')

    # Check if SCSI3 Persistent Reservations supported by
    # current libvirt versions.
    if not libvirt_version.version_compare(4, 4, 0):
        test.cancel("The <reservations> tag supported by libvirt from version "
                    "4.4.0")

    # This part is a temporary workaround for bz 1658988, should be removed
    # when that bug fixed.
    #===== BEGIN =====
    target_pr_dir = "/etc/target/pr"
    if not os.path.isdir(target_pr_dir):
        os.mkdir(target_pr_dir)
    #===== END =====

    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    virsh_dargs = {'debug': True, 'ignore_status': True}

    # Disk specific attributes
    device = params.get("virt_disk_device", "lun")
    device_target = params.get("virt_disk_device_target", "sdb")
    device_format = params.get("virt_disk_device_format", "raw")
    device_type = params.get("virt_disk_device_type", "block")
    device_bus = params.get("virt_disk_device_bus", "scsi")
    # Iscsi options
    iscsi_host = params.get("iscsi_host")
    iscsi_port = params.get("iscsi_port")
    emulated_size = params.get("iscsi_image_size", "1G")
    auth_uuid = "yes" == params.get("auth_uuid")
    auth_usage = "yes" == params.get("auth_usage")
    # SCSI3 PR options
    reservations_managed = "yes" == params.get("reservations_managed", "yes")
    reservations_source_type = params.get("reservations_source_type", "unix")
    reservations_source_path = params.get("reservations_source_path",
                                          "/var/run/qemu-pr-helper.sock")
    reservations_source_mode = params.get("reservations_source_mode", "client")
    secret_uuid = ""
    # Case step options
    hotplug_disk = "yes" == params.get("hotplug_disk", "no")

    # Start vm and get all partions in vm
    if vm.is_dead():
        vm.start()
    session = vm.wait_for_login()
    old_parts = utils_disk.get_parts_list(session)
    session.close()
    vm.destroy(gracefully=False)

    # Back up xml file
    vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)

    try:
        chap_user = ""
        chap_passwd = ""
        if auth_uuid or auth_usage:
            auth_in_source = "yes" == params.get("auth_in_source", "no")
            if auth_in_source and not libvirt_version.version_compare(3, 9, 0):
                test.cancel("place auth in source is not supported in "
                            "current libvirt version.")
            auth_type = params.get("auth_type", "chap")
            secret_usage_target = params.get("secret_usage_target",
                                             "libvirtiscsi")
            secret_usage_type = params.get("secret_usage_type", "iscsi")
            chap_user = params.get("iscsi_user", "redhat")
            chap_passwd = params.get("iscsi_password", "redhat")

            sec_xml = secret_xml.SecretXML("no", "yes")
            sec_xml.description = "iSCSI secret"
            sec_xml.auth_type = auth_type
            sec_xml.auth_username = chap_user
            sec_xml.usage = secret_usage_type
            sec_xml.target = secret_usage_target
            sec_xml.xmltreefile.write()

            ret = virsh.secret_define(sec_xml.xml)
            libvirt.check_exit_status(ret)

            secret_uuid = re.findall(r".+\S+(\ +\S+)\ +.+\S+",
                                     ret.stdout.strip())[0].lstrip()
            logging.debug("Secret uuid %s", secret_uuid)
            if secret_uuid == "":
                test.error("Failed to get secret uuid")

            # Set secret value
            encoding = locale.getpreferredencoding()
            secret_string = base64.b64encode(str(chap_passwd).encode(encoding)).decode(encoding)
            ret = virsh.secret_set_value(secret_uuid, secret_string,
                                         **virsh_dargs)
            libvirt.check_exit_status(ret)

        # Setup iscsi target
        blk_dev = libvirt.setup_or_cleanup_iscsi(is_setup=True,
                                                 is_login=True,
                                                 image_size=emulated_size,
                                                 chap_user=chap_user,
                                                 chap_passwd=chap_passwd,
                                                 portal_ip=iscsi_host)

        # Add disk xml
        vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
        disk_xml = Disk(type_name=device_type)
        disk_xml.device = device
        disk_xml.target = {"dev": device_target, "bus": device_bus}
        driver_dict = {"name": "qemu", "type": device_format}
        disk_xml.driver = driver_dict
        auth_dict = {}
        if auth_uuid:
            auth_dict = {"auth_user": chap_user,
                         "secret_type": secret_usage_type,
                         "secret_uuid": secret_uuid}
        elif auth_usage:
            auth_dict = {"auth_user": chap_user,
                         "secret_type": secret_usage_type,
                         "secret_usage": secret_usage_target}
        disk_source = disk_xml.new_disk_source(
            **{"attrs": {"dev": blk_dev}})
        if auth_dict:
            disk_auth = disk_xml.new_auth(**auth_dict)
            if auth_in_source:
                disk_source.auth = disk_auth
            else:
                disk_xml.auth = disk_auth
        if reservations_managed:
            reservations_dict = {"reservations_managed": "yes"}
        else:
            start_or_stop_qemu_pr_helper(path_to_sock=reservations_source_path)
            reservations_dict = {"reservations_managed": "no",
                                 "reservations_source_type": reservations_source_type,
                                 "reservations_source_path": reservations_source_path,
                                 "reservations_source_mode": reservations_source_mode}
        disk_source.reservations = disk_xml.new_reservations(**reservations_dict)
        disk_xml.source = disk_source

        if not hotplug_disk:
            vmxml.add_device(disk_xml)
        try:
            # Start the VM and check status
            vmxml.sync()
            vm.start()
            vm.wait_for_login().close()
            if hotplug_disk:
                result = virsh.attach_device(vm_name, disk_xml.xml,
                                             ignore_status=True, debug=True)
                libvirt.check_exit_status(result)
            new_parts = get_delta_parts(vm, old_parts)
            if len(new_parts) != 1:
                logging.error("Expected 1 dev added but has %s" % len(new_parts))
            new_part = new_parts[0]
            check_pr_cmds(vm, new_part)
            result = virsh.detach_device(vm_name, disk_xml.xml,
                                         ignore_status=True, debug=True)
            libvirt.check_exit_status(result)
        except virt_vm.VMStartError as e:
            test.fail("VM failed to start."
                      "Error: %s" % str(e))
        except xcepts.LibvirtXMLError as xml_error:
            test.fail("Failed to define VM:\n%s" % xml_error)

    finally:
        # Recover VM.
        if vm.is_alive():
            vm.destroy(gracefully=False)
        vmxml_backup.sync("--snapshots-metadata")
        # Delete the tmp files.
        libvirt.setup_or_cleanup_iscsi(is_setup=False)
        # Clean up secret
        if secret_uuid:
            virsh.secret_undefine(secret_uuid)
        # Stop qemu-pr-helper daemon
        start_or_stop_qemu_pr_helper(is_start=False)
Exemplo n.º 11
0
def run(test, params, env):
    """
    Test interafce xml options.

    1.Prepare test environment,destroy or suspend a VM.
    2.Edit xml and start the domain.
    3.Perform test operation.
    4.Recover test environment.
    5.Confirm the test result.
    """
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    host_arch = platform.machine()
    virsh_dargs = {'debug': True, 'ignore_status': False}

    if not utils_package.package_install(["lsof"]):
        test.cancel("Failed to install dependency package lsof" " on host")

    def create_iface_xml(iface_mac):
        """
        Create interface xml file
        """
        iface = Interface(type_name=iface_type)
        source = ast.literal_eval(iface_source)
        if source:
            iface.source = source
        iface.model = iface_model if iface_model else "virtio"
        iface.mac_address = iface_mac
        driver_dict = {}
        driver_host = {}
        driver_guest = {}
        if iface_driver:
            driver_dict = ast.literal_eval(iface_driver)
        if iface_driver_host:
            driver_host = ast.literal_eval(iface_driver_host)
        if iface_driver_guest:
            driver_guest = ast.literal_eval(iface_driver_guest)
        iface.driver = iface.new_driver(driver_attr=driver_dict,
                                        driver_host=driver_host,
                                        driver_guest=driver_guest)
        if test_target:
            iface.target = {"dev": target_dev}
        logging.debug("Create new interface xml: %s", iface)
        return iface

    def modify_iface_xml(update, status_error=False):
        """
        Modify interface xml options
        """
        vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
        xml_devices = vmxml.devices
        iface_index = xml_devices.index(
            xml_devices.by_device_tag("interface")[0])
        iface = xml_devices[iface_index]
        if iface_model:
            iface.model = iface_model
        else:
            del iface.model
        if iface_type:
            iface.type_name = iface_type
        del iface.source
        source = ast.literal_eval(iface_source)
        if source:
            net_ifs = utils_net.get_net_if(state="UP")
            # Check source device is valid or not,
            # if it's not in host interface list, try to set
            # source device to first active interface of host
            if (iface.type_name == "direct" and 'dev' in source
                    and source['dev'] not in net_ifs):
                logging.warn(
                    "Source device %s is not a interface"
                    " of host, reset to %s", source['dev'], net_ifs[0])
                source['dev'] = net_ifs[0]
            iface.source = source
        backend = ast.literal_eval(iface_backend)
        if backend:
            iface.backend = backend
        driver_dict = {}
        driver_host = {}
        driver_guest = {}
        if iface_driver:
            driver_dict = ast.literal_eval(iface_driver)
        if iface_driver_host:
            driver_host = ast.literal_eval(iface_driver_host)
        if iface_driver_guest:
            driver_guest = ast.literal_eval(iface_driver_guest)
        iface.driver = iface.new_driver(driver_attr=driver_dict,
                                        driver_host=driver_host,
                                        driver_guest=driver_guest)
        if test_target:
            logging.debug("iface.target is %s" % target_dev)
            iface.target = {"dev": target_dev}
        if iface.address:
            del iface.address
        if set_ip:
            iface.ips = [ast.literal_eval(x) for x in set_ips]
        logging.debug("New interface xml file: %s", iface)
        if unprivileged_user:
            # Create disk image for unprivileged user
            disk_index = xml_devices.index(
                xml_devices.by_device_tag("disk")[0])
            disk_xml = xml_devices[disk_index]
            logging.debug("source: %s", disk_xml.source)
            disk_source = disk_xml.source.attrs["file"]
            cmd = ("cp -fZ {0} {1} && chown {2}:{2} {1}"
                   "".format(disk_source, dst_disk, unprivileged_user))
            process.run(cmd, shell=True)
            disk_xml.source = disk_xml.new_disk_source(
                attrs={"file": dst_disk})
            vmxml.devices = xml_devices
            # Remove all channels to avoid of permission problem
            channels = vmxml.get_devices(device_type="channel")
            for channel in channels:
                vmxml.del_device(channel)
            logging.info("Unprivileged users can't use 'dac' security driver,"
                         " removing from domain xml if present...")
            vmxml.del_seclabel([('model', 'dac')])

            # Set vm memory to 2G if it's larger than 2G
            if vmxml.memory > 2097152:
                vmxml.memory = vmxml.current_mem = 2097152

            vmxml.xmltreefile.write()
            logging.debug("New VM xml: %s", vmxml)
            process.run("chmod a+rw %s" % vmxml.xml, shell=True)
            virsh.define(vmxml.xml, **virsh_dargs)
        # Try to modify interface xml by update-device or edit xml
        elif update:
            iface.xmltreefile.write()
            ret = virsh.update_device(vm_name, iface.xml, ignore_status=True)
            libvirt.check_exit_status(ret, status_error)
        else:
            vmxml.devices = xml_devices
            vmxml.xmltreefile.write()
            try:
                vmxml.sync()
                if define_error:
                    test.fail("Define VM succeed, but it should fail")
            except xcepts.LibvirtXMLError as e:
                if not define_error:
                    test.fail("Define VM fail: %s" % e)

    def check_offloads_option(if_name, driver_options, session=None):
        """
        Check interface offloads by ethtool output
        """
        offloads = {
            "csum": "tx-checksumming",
            "gso": "generic-segmentation-offload",
            "tso4": "tcp-segmentation-offload",
            "tso6": "tx-tcp6-segmentation",
            "ecn": "tx-tcp-ecn-segmentation",
            "ufo": "udp-fragmentation-offload"
        }
        if session:
            ret, output = session.cmd_status_output("ethtool -k %s | head"
                                                    " -18" % if_name)
        else:
            out = process.run("ethtool -k %s | head -18" % if_name, shell=True)
            ret, output = out.exit_status, out.stdout_text
        if ret:
            test.fail("ethtool return error code")
        logging.debug("ethtool output: %s", output)
        for offload in list(driver_options.keys()):
            if offload in offloads:
                if (output.count(offloads[offload]) and not output.count(
                        "%s: %s" %
                    (offloads[offload], driver_options[offload]))):
                    test.fail("offloads option %s: %s isn't"
                              " correct in ethtool output" %
                              (offloads[offload], driver_options[offload]))

    def run_xml_test(iface_mac):
        """
        Test for interface options in vm xml
        """
        # Get the interface object according the mac address
        vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
        iface_devices = vmxml.get_devices(device_type="interface")
        iface = None
        for iface_dev in iface_devices:
            if iface_dev.mac_address == iface_mac:
                iface = iface_dev
        if not iface:
            test.fail("Can't find interface with mac"
                      " '%s' in vm xml" % iface_mac)
        driver_dict = {}
        if iface_driver:
            driver_dict = ast.literal_eval(iface_driver)
        for driver_opt in list(driver_dict.keys()):
            if not driver_dict[driver_opt] == iface.driver.driver_attr[
                    driver_opt]:
                test.fail("Can't see driver option %s=%s in vm xml" %
                          (driver_opt, driver_dict[driver_opt]))
            else:
                logging.info("Find %s=%s in vm xml" %
                             (driver_opt, driver_dict[driver_opt]))
        if iface_target:
            if ("dev" not in iface.target
                    or not iface.target["dev"].startswith(iface_target)):
                test.fail("Can't see device target dev in vm xml")
            # Check macvtap mode by ip link command
            if iface_target == "macvtap" and "mode" in iface.source:
                cmd = "ip -d link show %s" % iface.target["dev"]
                output = process.run(cmd, shell=True).stdout_text
                logging.debug("ip link output: %s", output)
                mode = iface.source["mode"]
                if mode == "passthrough":
                    mode = "passthru"
                if not re.search(r"macvtap\s+mode %s" % mode, output):
                    test.fail("Failed to verify macvtap mode")
        # Check if the "target dev" is set successfully
        # 1. Target dev name with prefix as "vnet" will always be override;
        # 2. Target dev name with prefix as "macvtap" or "macvlan" with direct
        # type interface will be override;
        # 3. Other scenarios, the target dev should be set successfully.
        if test_target:
            if target_dev != iface.target["dev"]:
                if target_dev.startswith("vnet") or \
                        (iface_type == "direct" and
                         (target_dev.startswith("macvtap") or
                          target_dev.startswith("macvlan"))):
                    logging.debug("target dev %s is override" % target_dev)
                else:
                    test.fail("Failed to set target dev to %s", target_dev)
            else:
                logging.debug("target dev set successfully to %s",
                              iface.target["dev"])

    def run_cmdline_test(iface_mac, host_arch):
        """
        Test qemu command line
        :param iface_mac: expected MAC
        :param host_arch: host architecture, e.g. x86_64
        :raise avocado.core.exceptions.TestError: if preconditions are not met
        :raise avocado.core.exceptions.TestFail: if commandline doesn't match
        :return: None
        """
        cmd = ("ps -ef | grep %s | grep -v grep " % vm_name)
        ret = process.run(cmd, shell=True)
        logging.debug("Command line %s", ret.stdout_text)
        if test_vhost_net:
            if not ret.stdout_text.count("vhost=on") and not rm_vhost_driver:
                test.fail("Can't see vhost options in"
                          " qemu-kvm command line")

        if iface_model == "virtio":
            if host_arch == 's390x':
                model_option = "device virtio-net-ccw"
            else:
                model_option = "device virtio-net-pci"
        elif iface_model == 'rtl8139':
            model_option = "device rtl8139"
        else:
            test.error(
                "Don't know which device driver to expect on qemu cmdline"
                " for iface_model %s" % iface_model)
        iface_cmdline = re.findall(
            r"%s,(.+),mac=%s" % (model_option, iface_mac), ret.stdout_text)
        if not iface_cmdline:
            test.fail("Can't see %s with mac %s in command"
                      " line" % (model_option, iface_mac))

        cmd_opt = {}
        for opt in iface_cmdline[0].split(','):
            tmp = opt.rsplit("=")
            cmd_opt[tmp[0]] = tmp[1]
        logging.debug("Command line options %s", cmd_opt)

        driver_dict = {}
        # Test <driver> xml options.
        if iface_driver:
            iface_driver_dict = ast.literal_eval(iface_driver)
            for driver_opt in list(iface_driver_dict.keys()):
                if driver_opt == "name":
                    continue
                elif driver_opt == "txmode":
                    if iface_driver_dict["txmode"] == "iothread":
                        driver_dict["tx"] = "bh"
                    else:
                        driver_dict["tx"] = iface_driver_dict["txmode"]
                elif driver_opt == "queues":
                    driver_dict["mq"] = "on"
                    if "pci" in model_option:
                        driver_dict["vectors"] = str(
                            int(iface_driver_dict["queues"]) * 2 + 2)
                else:
                    driver_dict[driver_opt] = iface_driver_dict[driver_opt]
        # Test <driver><host/><driver> xml options.
        if iface_driver_host:
            driver_dict.update(ast.literal_eval(iface_driver_host))
        # Test <driver><guest/><driver> xml options.
        if iface_driver_guest:
            driver_dict.update(ast.literal_eval(iface_driver_guest))

        for driver_opt in list(driver_dict.keys()):
            if (driver_opt not in cmd_opt
                    or not cmd_opt[driver_opt] == driver_dict[driver_opt]):
                test.fail("Can't see option '%s=%s' in qemu-kvm "
                          " command line" %
                          (driver_opt, driver_dict[driver_opt]))
            logging.info("Find %s=%s in qemu-kvm command line" %
                         (driver_opt, driver_dict[driver_opt]))
        if test_backend:
            guest_pid = ret.stdout_text.rsplit()[1]
            cmd = "lsof %s | grep %s" % (backend["tap"], guest_pid)
            if process.system(cmd, ignore_status=True, shell=True):
                test.fail("Guest process didn't open backend file"
                          " %s" % backend["tap"])
            cmd = "lsof %s | grep %s" % (backend["vhost"], guest_pid)
            if process.system(cmd, ignore_status=True, shell=True):
                test.fail("Guest process didn't open backend file"
                          " %s" % backend["vhost"])

    def get_guest_ip(session, mac):
        """
        Wrapper function to get guest ip address
        """
        utils_net.restart_guest_network(session, mac)
        # Wait for IP address is ready
        utils_misc.wait_for(lambda: utils_net.get_guest_ip_addr(session, mac),
                            10)
        return utils_net.get_guest_ip_addr(session, mac)

    def check_user_network(session):
        """
        Check user network ip address on guest
        """
        vm_ips = []
        vm_ips.append(get_guest_ip(session, iface_mac_old))
        if attach_device:
            vm_ips.append(get_guest_ip(session, iface_mac))
        logging.debug("IP address on guest: %s", vm_ips)
        if len(vm_ips) != len(set(vm_ips)):
            logging.debug(
                "Duplicated IP address on guest. Check bug: "
                "https://bugzilla.redhat.com/show_bug.cgi?id=1147238")
        for vm_ip in vm_ips:
            if not vm_ip or vm_ip != expect_ip:
                logging.debug("vm_ip is %s, expect_ip is %s", vm_ip, expect_ip)
                test.fail("Found wrong IP address" " on guest")
        # Check gateway address
        gateway = str(utils_net.get_default_gateway(False, session))
        if expect_gw not in gateway:
            test.fail("The gateway on guest is %s, while expect is %s" %
                      (gateway, expect_gw))
        # Check dns server address
        ns_list = utils_net.get_guest_nameserver(session)
        if expect_ns not in ns_list:
            test.fail("The dns found is %s, which expect is %s" %
                      (ns_list, expect_ns))

    def check_mcast_network(session, add_session):
        """
        Check multicast ip address on guests

        :param session: vm session
        :param add_session: additional vm session
        """
        src_addr = ast.literal_eval(iface_source)['address']
        vms_sess_dict = {vm_name: session, additional_vm.name: add_session}

        # Check mcast address on host
        cmd = "netstat -g | grep %s" % src_addr
        if process.run(cmd, ignore_status=True, shell=True).exit_status:
            test.fail("Can't find multicast ip address" " on host")
        vms_ip_dict = {}
        # Get ip address on each guest
        for vms in list(vms_sess_dict.keys()):
            vm_mac = vm_xml.VMXML.get_first_mac_by_name(vms)
            vm_ip = get_guest_ip(vms_sess_dict[vms], vm_mac)
            if not vm_ip:
                test.fail("Can't get multicast ip" " address on guest")
            vms_ip_dict.update({vms: vm_ip})
        if len(set(vms_ip_dict.values())) != len(vms_sess_dict):
            test.fail("Got duplicated multicast ip address")
        logging.debug("Found ips on guest: %s", vms_ip_dict)

        # Run omping server on host
        if not utils_package.package_install(["omping"]):
            test.error("Failed to install omping" " on host")
        cmd = ("iptables -F;omping -m %s %s" %
               (src_addr,
                "192.168.122.1 %s" % ' '.join(list(vms_ip_dict.values()))))
        # Run a backgroup job waiting for connection of client
        bgjob = utils_misc.AsyncJob(cmd)

        # Run omping client on guests
        for vms in list(vms_sess_dict.keys()):
            # omping should be installed first
            if not utils_package.package_install(["omping"],
                                                 vms_sess_dict[vms]):
                test.error("Failed to install omping" " on guest")
            cmd = ("iptables -F; omping -c 5 -T 5 -m %s %s" %
                   (src_addr, "192.168.122.1 %s" % vms_ip_dict[vms]))
            ret, output = vms_sess_dict[vms].cmd_status_output(cmd)
            logging.debug("omping ret: %s, output: %s", ret, output)
            if (not output.count('multicast, xmt/rcv/%loss = 5/5/0%')
                    or not output.count('unicast, xmt/rcv/%loss = 5/5/0%')):
                test.fail("omping failed on guest")
        # Kill the backgroup job
        bgjob.kill_func()

    def get_iface_model(iface_model, host_arch):
        """
        Get iface_model. On s390x use default model 'virtio' if non-virtio given
        :param iface_model: value as by test configuration or default
        :param host_arch: host architecture, e.g. x86_64
        :return: iface_model
        """
        if 's390x' == host_arch and 'virtio' not in iface_model:
            return "virtio"
        else:
            return iface_model

    def check_vhostuser_guests(session1, session2):
        """
        Check the vhostuser interface in guests

        param session1: Session of original guest
        param session2: Session of original additional guest
        """
        logging.debug("iface details is %s" %
                      libvirt.get_interface_details(vm_name))
        vm1_mac = str(libvirt.get_interface_details(vm_name)[0]['mac'])
        vm2_mac = str(libvirt.get_interface_details(add_vm_name)[0]['mac'])

        utils_net.set_guest_ip_addr(session1, vm1_mac, guest1_ip)
        utils_net.set_guest_ip_addr(session2, vm2_mac, guest2_ip)
        ping_status, ping_output = utils_net.ping(dest=guest2_ip,
                                                  count='3',
                                                  timeout=5,
                                                  session=session1)
        logging.info("output:%s" % ping_output)
        if ping_status != 0:
            if ping_expect_fail:
                logging.info("Can not ping guest2 as expected")
            else:
                test.fail("Can not ping guest2 from guest1")
        else:
            if ping_expect_fail:
                test.fail("Ping guest2 successfully not expected")
            else:
                logging.info("Can ping guest2 from guest1")

    def get_ovs_statis(ovs):
        """
        Get ovs-vsctl interface statistics and format in dict

        param ovs: openvswitch instance
        """
        ovs_statis_dict = {}
        ovs_iface_info = ovs.ovs_vsctl(["list",
                                        "interface"]).stdout_text.strip()
        ovs_iface_list = re.findall(
            'name\s+: (\S+)\n.*?statistics\s+: {(.*?)}\n', ovs_iface_info,
            re.S)
        logging.info("ovs iface list is %s", ovs_iface_list)
        # Dict of iface name and statistics
        for iface_name in vhostuser_names.split():
            for ovs_iface in ovs_iface_list:
                if iface_name == eval(ovs_iface[0]):
                    format_statis = dict(
                        re.findall(r'(\S*?)=(\d*?),', ovs_iface[1]))
                    ovs_statis_dict[iface_name] = format_statis
                    break
        return ovs_statis_dict

    status_error = "yes" == params.get("status_error", "no")
    start_error = "yes" == params.get("start_error", "no")
    define_error = "yes" == params.get("define_error", "no")
    unprivileged_user = params.get("unprivileged_user")

    # Interface specific attributes.
    iface_type = params.get("iface_type", "network")
    iface_source = params.get("iface_source", "{}")
    iface_driver = params.get("iface_driver")
    iface_model = get_iface_model(params.get("iface_model", "virtio"),
                                  host_arch)
    iface_target = params.get("iface_target")
    iface_backend = params.get("iface_backend", "{}")
    iface_driver_host = params.get("iface_driver_host")
    iface_driver_guest = params.get("iface_driver_guest")
    ovs_br_name = params.get("ovs_br_name")
    vhostuser_names = params.get("vhostuser_names")
    attach_device = params.get("attach_iface_device")
    expect_tx_size = params.get("expect_tx_size")
    guest1_ip = params.get("vhostuser_guest1_ip", "192.168.100.1")
    guest2_ip = params.get("vhostuser_guest2_ip", "192.168.100.2")
    change_option = "yes" == params.get("change_iface_options", "no")
    update_device = "yes" == params.get("update_iface_device", "no")
    additional_guest = "yes" == params.get("additional_guest", "no")
    serial_login = "******" == params.get("serial_login", "no")
    rm_vhost_driver = "yes" == params.get("rm_vhost_driver", "no")
    test_option_cmd = "yes" == params.get("test_iface_option_cmd", "no")
    test_option_xml = "yes" == params.get("test_iface_option_xml", "no")
    test_vhost_net = "yes" == params.get("test_vhost_net", "no")
    test_option_offloads = "yes" == params.get("test_option_offloads", "no")
    test_iface_user = "******" == params.get("test_iface_user", "no")
    test_iface_mcast = "yes" == params.get("test_iface_mcast", "no")
    test_libvirtd = "yes" == params.get("test_libvirtd", "no")
    restart_libvirtd = "yes" == params.get("restart_libvirtd", "no")
    restart_vm = "yes" == params.get("restart_vm", "no")
    test_guest_ip = "yes" == params.get("test_guest_ip", "no")
    test_backend = "yes" == params.get("test_backend", "no")
    check_guest_trans = "yes" == params.get("check_guest_trans", "no")
    set_ip = "yes" == params.get("set_user_ip", "no")
    set_ips = params.get("set_ips", "").split()
    expect_ip = params.get("expect_ip")
    expect_gw = params.get("expect_gw")
    expect_ns = params.get("expect_ns")
    test_target = "yes" == params.get("test_target", "no")
    target_dev = params.get("target_dev", None)

    # test params for vhostuser test
    huge_page = ast.literal_eval(params.get("huge_page", "{}"))
    numa_cell = ast.literal_eval(params.get("numa_cell", "{}"))
    additional_iface_source = ast.literal_eval(
        params.get("additional_iface_source", "{}"))
    vcpu_num = params.get("vcpu_num")
    cpu_mode = params.get("cpu_mode")
    hugepage_num = params.get("hugepage_num")
    log_pattern = params.get("log_pattern")

    # judgement params for vhostuer test
    need_vhostuser_env = "yes" == params.get("need_vhostuser_env", "no")
    ping_expect_fail = "yes" == params.get("ping_expect_fail", "no")
    check_libvirtd_log = "yes" == params.get("check_libvirtd_log", "no")
    check_statistics = "yes" == params.get("check_statistics", "no")
    enable_multiqueue = "yes" == params.get("enable_multiqueue", "no")

    queue_size = None
    if iface_driver:
        driver_dict = ast.literal_eval(iface_driver)
        if "queues" in driver_dict:
            queue_size = int(driver_dict.get("queues"))

    if iface_driver_host or iface_driver_guest or test_backend:
        if not libvirt_version.version_compare(1, 2, 8):
            test.cancel("Offloading/backend options not "
                        "supported in this libvirt version")
    if iface_driver and "queues" in ast.literal_eval(iface_driver):
        if not libvirt_version.version_compare(1, 0, 6):
            test.cancel("Queues options not supported"
                        " in this libvirt version")

    if unprivileged_user:
        if not libvirt_version.version_compare(1, 1, 1):
            test.cancel("qemu-bridge-helper not supported" " on this host")
        virsh_dargs["unprivileged_user"] = unprivileged_user
        # Create unprivileged user if needed
        cmd = ("grep {0} /etc/passwd || "
               "useradd {0}".format(unprivileged_user))
        process.run(cmd, shell=True)
        # Need another disk image for unprivileged user to access
        dst_disk = "/tmp/%s.img" % unprivileged_user

    # Destroy VM first
    if vm.is_alive():
        vm.destroy(gracefully=False)

    # Back up xml file.
    vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    iface_mac_old = vm_xml.VMXML.get_first_mac_by_name(vm_name)
    # iface_mac will update if attach a new interface
    iface_mac = iface_mac_old
    # Additional vm for test
    additional_vm = None
    libvirtd = utils_libvirtd.Libvirtd()

    libvirtd_log_path = None
    libvirtd_conf = None
    if check_libvirtd_log:
        libvirtd_log_path = os.path.join(test.tmpdir, "libvirtd.log")
        libvirtd_conf = utils_config.LibvirtdConfig()
        libvirtd_conf["log_outputs"] = '"1:file:%s"' % libvirtd_log_path
        libvirtd.restart()

    # Prepare vhostuser
    ovs = None
    if need_vhostuser_env:
        # Reserve selinux status
        selinux_mode = utils_selinux.get_status()
        # Reserve orig page size
        orig_size = utils_memory.get_num_huge_pages()
        ovs_dir = data_dir.get_tmp_dir()
        ovs = utils_net.setup_ovs_vhostuser(hugepage_num, ovs_dir, ovs_br_name,
                                            vhostuser_names, queue_size)

    try:
        # Build the xml and run test.
        try:
            # Prepare interface backend files
            if test_backend:
                if not os.path.exists("/dev/vhost-net"):
                    process.run("modprobe vhost-net", shell=True)
                backend = ast.literal_eval(iface_backend)
                backend_tap = "/dev/net/tun"
                backend_vhost = "/dev/vhost-net"
                if not backend:
                    backend["tap"] = backend_tap
                    backend["vhost"] = backend_vhost
                if not start_error:
                    # Create backend files for normal test
                    if not os.path.exists(backend["tap"]):
                        os.rename(backend_tap, backend["tap"])
                    if not os.path.exists(backend["vhost"]):
                        os.rename(backend_vhost, backend["vhost"])
            # Edit the interface xml.
            if change_option:
                modify_iface_xml(update=False)
                if define_error:
                    return

            if test_target:
                logging.debug("Setting target device name to %s", target_dev)
                modify_iface_xml(update=False)

            if rm_vhost_driver:
                # remove vhost driver on host and
                # the character file /dev/vhost-net
                cmd = ("modprobe -r {0}; "
                       "rm -f /dev/vhost-net".format("vhost_net"))
                if process.system(cmd, ignore_status=True, shell=True):
                    test.error("Failed to remove vhost_net driver")
            else:
                # Load vhost_net driver by default
                cmd = "modprobe vhost_net"
                process.system(cmd, shell=True)

            # Attach a interface when vm is shutoff
            if attach_device == 'config':
                iface_mac = utils_net.generate_mac_address_simple()
                iface_xml_obj = create_iface_xml(iface_mac)
                iface_xml_obj.xmltreefile.write()
                ret = virsh.attach_device(vm_name,
                                          iface_xml_obj.xml,
                                          flagstr="--config",
                                          ignore_status=True)
                libvirt.check_exit_status(ret)

            # Add hugepage and update cpu for vhostuser testing
            if huge_page:
                vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
                membacking = vm_xml.VMMemBackingXML()
                hugepages = vm_xml.VMHugepagesXML()
                pagexml = hugepages.PageXML()
                pagexml.update(huge_page)
                hugepages.pages = [pagexml]
                membacking.hugepages = hugepages
                vmxml.mb = membacking

                vmxml.vcpu = int(vcpu_num)
                cpu_xml = vm_xml.VMCPUXML()
                cpu_xml.xml = "<cpu><numa/></cpu>"
                cpu_xml.numa_cell = [numa_cell]
                cpu_xml.mode = cpu_mode
                if cpu_mode == "custom":
                    vm_capability = capability_xml.CapabilityXML()
                    cpu_xml.model = vm_capability.model
                vmxml.cpu = cpu_xml

                vmxml.sync()
                logging.debug("xmltreefile:%s", vmxml.xmltreefile)

            # Clone additional vm
            if additional_guest:
                add_vm_name = "%s_%s" % (vm_name, '1')
                # Clone additional guest
                timeout = params.get("clone_timeout", 360)
                utils_libguestfs.virt_clone_cmd(vm_name,
                                                add_vm_name,
                                                True,
                                                timeout=timeout)
                additional_vm = vm.clone(add_vm_name)
                # Update iface source if needed
                if additional_iface_source:
                    add_vmxml = vm_xml.VMXML.new_from_dumpxml(add_vm_name)
                    add_xml_devices = add_vmxml.devices
                    add_iface_index = add_xml_devices.index(
                        add_xml_devices.by_device_tag("interface")[0])
                    add_iface = add_xml_devices[add_iface_index]
                    add_iface.source = additional_iface_source
                    add_vmxml.devices = add_xml_devices
                    add_vmxml.xmltreefile.write()
                    add_vmxml.sync()

                    logging.debug("add vm xmltreefile:%s",
                                  add_vmxml.xmltreefile)
                additional_vm.start()
                # additional_vm.wait_for_login()
                username = params.get("username")
                password = params.get("password")
                add_session = additional_vm.wait_for_serial_login(
                    username=username, password=password)

            # Start the VM.
            if unprivileged_user:
                virsh.start(vm_name, **virsh_dargs)
                cmd = ("su - %s -c 'virsh console %s'" %
                       (unprivileged_user, vm_name))
                session = aexpect.ShellSession(cmd)
                session.sendline()
                remote.handle_prompts(session, params.get("username"),
                                      params.get("password"), r"[\#\$]\s*$",
                                      60)
                # Get ip address on guest
                if not get_guest_ip(session, iface_mac):
                    test.error("Can't get ip address on guest")
            else:
                # Will raise VMStartError exception if start fails
                vm.start()
                if serial_login:
                    session = vm.wait_for_serial_login()
                else:
                    session = vm.wait_for_login()
            if start_error:
                test.fail("VM started unexpectedly")

            # Attach a interface when vm is running
            if attach_device == 'live':
                iface_mac = utils_net.generate_mac_address_simple()
                iface_xml_obj = create_iface_xml(iface_mac)
                iface_xml_obj.xmltreefile.write()
                ret = virsh.attach_device(vm_name,
                                          iface_xml_obj.xml,
                                          flagstr="--live",
                                          ignore_status=True,
                                          debug=True)
                libvirt.check_exit_status(ret, status_error)
                # Need sleep here for attachment take effect
                time.sleep(5)

            # Update a interface options
            if update_device:
                modify_iface_xml(update=True, status_error=status_error)

            # Run tests for qemu-kvm command line options
            if test_option_cmd:
                run_cmdline_test(iface_mac, host_arch)
            # Run tests for vm xml
            if test_option_xml:
                run_xml_test(iface_mac)
            # Run tests for offloads options
            if test_option_offloads:
                if iface_driver_host:
                    ifname_guest = utils_net.get_linux_ifname(
                        session, iface_mac)
                    check_offloads_option(ifname_guest,
                                          ast.literal_eval(iface_driver_host),
                                          session)
                if iface_driver_guest:
                    ifname_host = libvirt.get_ifname_host(vm_name, iface_mac)
                    check_offloads_option(ifname_host,
                                          ast.literal_eval(iface_driver_guest))

            if test_iface_user:
                # Test user type network
                check_user_network(session)
            if test_iface_mcast:
                # Test mcast type network
                check_mcast_network(session, add_session)
            # Check guest ip address
            if test_guest_ip:
                if not get_guest_ip(session, iface_mac):
                    test.fail("Guest can't get a" " valid ip address")
            # Check guest RX/TX ring
            if check_guest_trans:
                ifname_guest = utils_net.get_linux_ifname(session, iface_mac)
                ret, outp = session.cmd_status_output("ethtool -g %s" %
                                                      ifname_guest)
                if ret:
                    test.fail("ethtool return error code")
                logging.info("ethtool output is %s", outp)
                driver_dict = ast.literal_eval(iface_driver)
                if expect_tx_size:
                    driver_dict['tx_queue_size'] = expect_tx_size
                for outp_p in outp.split("Current hardware"):
                    if 'rx_queue_size' in driver_dict:
                        if re.search(
                                r"RX:\s*%s" % driver_dict['rx_queue_size'],
                                outp_p):
                            logging.info("Find RX setting RX:%s by ethtool",
                                         driver_dict['rx_queue_size'])
                        else:
                            test.fail("Cannot find matching rx setting")
                    if 'tx_queue_size' in driver_dict:
                        if re.search(
                                r"TX:\s*%s" % driver_dict['tx_queue_size'],
                                outp_p):
                            logging.info("Find TX settint TX:%s by ethtool",
                                         driver_dict['tx_queue_size'])
                        else:
                            test.fail("Cannot find matching tx setting")
            if test_target:
                logging.debug("Check if the target dev is set")
                run_xml_test(iface_mac)

            # Check vhostuser guest
            if additional_iface_source:
                check_vhostuser_guests(session, add_session)

            # Check libvirtd log
            if check_libvirtd_log:
                find = 0
                with open(libvirtd_log_path) as f:
                    lines = "".join(f.readlines())
                    if log_pattern in lines:
                        logging.info("Finding msg<%s> in libvirtd log",
                                     log_pattern)
                    else:
                        test.fail("Can not find msg:<%s> in libvirtd.log" %
                                  log_pattern)

            # Check statistics
            if check_statistics:
                session.sendline("ping %s" % guest2_ip)
                add_session.sendline("ping %s" % guest1_ip)
                time.sleep(5)
                vhost_name = vhostuser_names.split()[0]
                ovs_statis_dict = get_ovs_statis(ovs)[vhost_name]
                domif_info = {}
                domif_info = libvirt.get_interface_details(vm_name)
                virsh.domiflist(vm_name, debug=True)
                domif_stat_result = virsh.domifstat(vm_name, vhost_name)
                if domif_stat_result.exit_status != 0:
                    test.fail("domifstat cmd fail with msg:%s" %
                              domif_stat_result.stderr)
                else:
                    domif_stat = domif_stat_result.stdout.strip()
                logging.debug("vhost_name is %s, domif_stat is %s", vhost_name,
                              domif_stat)
                domif_stat_dict = dict(
                    re.findall("%s (\S*) (\d*)" % vhost_name, domif_stat))
                logging.debug("ovs_statis is %s, domif_stat is %s",
                              ovs_statis_dict, domif_stat_dict)
                ovs_cmp_dict = {
                    'tx_bytes': ovs_statis_dict['rx_bytes'],
                    'tx_drop': ovs_statis_dict['rx_dropped'],
                    'tx_errs': ovs_statis_dict['rx_errors'],
                    'tx_packets': ovs_statis_dict['rx_packets'],
                    'rx_bytes': ovs_statis_dict['tx_bytes'],
                    'rx_drop': ovs_statis_dict['tx_dropped']
                }
                logging.debug("ovs_cmp_dict is %s", ovs_cmp_dict)
                for dict_key in ovs_cmp_dict.keys():
                    if domif_stat_dict[dict_key] != ovs_cmp_dict[dict_key]:
                        test.fail(
                            "Find ovs %s result (%s) different with domifstate result (%s)"
                            % (dict_key, ovs_cmp_dict[dict_key],
                               domif_stat_dict[dict_key]))
                    else:
                        logging.info("ovs %s value %s is same with domifstate",
                                     dict_key, domif_stat_dict[dict_key])

            # Check multi_queue
            if enable_multiqueue:
                ifname_guest = utils_net.get_linux_ifname(session, iface_mac)
                for comb_size in (queue_size, queue_size - 1):
                    logging.info("Setting multiqueue size to %s" % comb_size)
                    session.cmd_status("ethtool -L %s combined %s" %
                                       (ifname_guest, comb_size))
                    ret, outp = session.cmd_status_output("ethtool -l %s" %
                                                          ifname_guest)
                    logging.debug("ethtool cmd output:%s" % outp)
                    if not ret:
                        pre_comb = re.search(
                            "Pre-set maximums:[\s\S]*?Combined:.*?(\d+)",
                            outp).group(1)
                        cur_comb = re.search(
                            "Current hardware settings:[\s\S]*?Combined:.*?(\d+)",
                            outp).group(1)
                        if int(pre_comb) != queue_size or int(cur_comb) != int(
                                comb_size):
                            test.fail(
                                "Fail to check the combined size: setting: %s,"
                                "Pre-set: %s, Current-set: %s, queue_size: %s"
                                % (comb_size, pre_comb, cur_comb, queue_size))
                        else:
                            logging.info(
                                "Getting correct Pre-set and Current set value"
                            )
                    else:
                        test.error("ethtool list fail: %s" % outp)

            session.close()
            if additional_guest:
                add_session.close()

            # Restart libvirtd and guest, then test again
            if restart_libvirtd:
                libvirtd.restart()

            if restart_vm:
                vm.destroy(gracefully=True)
                vm.start()
                if test_option_xml:
                    run_xml_test(iface_mac)

            # Detach hot/cold-plugged interface at last
            if attach_device and not status_error:
                ret = virsh.detach_device(vm_name,
                                          iface_xml_obj.xml,
                                          flagstr="",
                                          ignore_status=True,
                                          debug=True)
                libvirt.check_exit_status(ret)

        except virt_vm.VMStartError as e:
            logging.info(str(e))
            if not start_error:
                test.fail('VM failed to start\n%s' % e)

    finally:
        # Recover VM.
        logging.info("Restoring vm...")
        # Restore interface backend files
        if test_backend:
            if not os.path.exists(backend_tap):
                os.rename(backend["tap"], backend_tap)
            if not os.path.exists(backend_vhost):
                os.rename(backend["vhost"], backend_vhost)
        if rm_vhost_driver:
            # Restore vhost_net driver
            process.system("modprobe vhost_net", shell=True)
        if unprivileged_user:
            virsh.remove_domain(vm_name, **virsh_dargs)
            process.run('rm -f %s' % dst_disk, shell=True)
        if additional_vm:
            virsh.remove_domain(additional_vm.name, "--remove-all-storage")
            # Kill all omping server process on host
            process.system("pidof omping && killall omping",
                           ignore_status=True,
                           shell=True)
        if vm.is_alive():
            vm.destroy(gracefully=True)
        vmxml_backup.sync()

        if need_vhostuser_env:
            utils_net.clean_ovs_env(selinux_mode=selinux_mode,
                                    page_size=orig_size,
                                    clean_ovs=True)

        if libvirtd_conf:
            libvirtd_conf.restore()
            libvirtd.restart()

        if libvirtd_log_path and os.path.exists(libvirtd_log_path):
            os.unlink(libvirtd_log_path)
Exemplo n.º 12
0
        if test_qemu_cmd:
            # Check qemu-kvm command line
            cmd = ("ps -ef | grep %s | grep -v grep " % vm_name)
            if transport == "rdma":
                cmd += " | grep gluster+%s.*format=%s" % (transport, disk_format)
            else:
                cmd += " | grep gluster.*format=%s" % disk_format
            if driver_iothread:
                cmd += " | grep iothread=iothread%s" % driver_iothread
            if process.run(cmd, ignore_status=True, shell=True).exit_status:
                test.fail("Can't see gluster option '%s' "
                          "in command line" % cmd)
        # Detach hot plugged device.
        if start_vm and not default_pool:
            if gluster_disk:
                ret = virsh.detach_device(vm_name, custom_disk.xml,
                                          flagstr=attach_option, dargs=virsh_dargs)
                libvirt.check_exit_status(ret)

    finally:
        # Recover VM.
        if vm.is_alive():
            vm.destroy(gracefully=False)
        logging.info("Restoring vm...")
        vmxml_backup.sync()
        if utils_misc.is_mounted(mnt_src, default_pool, 'glusterfs'):
            process.run("umount %s" % default_pool,
                        ignore_status=True, shell=True)

        if gluster_disk:
            libvirt.setup_or_cleanup_gluster(False, vol_name, brick_path)
def run(test, params, env):
    """
    Test virsh {at|de}tach-device command.

    The command can attach new disk/detach device.
    1.Prepare test environment,destroy or suspend a VM.
    2.Perform virsh attach/detach-device operation.
    3.Recover test environment.
    4.Confirm the test result.
    """

    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    pre_vm_state = params.get("at_dt_device_pre_vm_state", "running")
    virsh_dargs = {"debug": True, "ignore_status": True}

    def is_attached(vmxml_devices, disk_type, source_file, target_dev):
        """
        Check attached device and disk exist or not.

        :param vmxml_devices: VMXMLDevices instance
        :param disk_type: disk's device type: cdrom or floppy
        :param source_file : disk's source file to check
        :param target_dev : target device name
        :return: True/False if backing file and device found
        """
        disks = vmxml_devices.by_device_tag("disk")
        for disk in disks:
            if disk.device != disk_type:
                continue
            if disk.target["dev"] != target_dev:
                continue
            if disk.xmltreefile.find("source") is not None:
                if disk.source.attrs["file"] != source_file:
                    continue
            else:
                continue
            # All three conditions met
            logging.debug("Find %s in given disk XML", source_file)
            return True
        logging.debug("Not find %s in gievn disk XML", source_file)
        return False

    def check_result(disk_source, disk_type, disk_target, flags, attach=True):
        """
        Check the test result of attach/detach-device command.
        """
        vm_state = pre_vm_state
        active_vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
        active_attached = is_attached(active_vmxml.devices, disk_type, disk_source, disk_target)
        if vm_state != "transient":
            inactive_vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name, options="--inactive")
            inactive_attached = is_attached(inactive_vmxml.devices, disk_type, disk_source, disk_target)

        if flags.count("config") and not flags.count("live"):
            if vm_state != "transient":
                if attach:
                    if not inactive_attached:
                        raise exceptions.TestFail(
                            "Inactive domain XML not updated" " when --config options used for" " attachment"
                        )
                    if vm_state != "shutoff":
                        if active_attached:
                            raise exceptions.TestFail(
                                "Active domain XML updated " "when --config options used " "for attachment"
                            )
                else:
                    if inactive_attached:
                        raise exceptions.TestFail(
                            "Inactive domain XML not updated" " when --config options used for" " detachment"
                        )
        elif flags.count("live") and not flags.count("config"):
            if attach:
                if vm_state in ["paused", "running", "transient"]:
                    if not active_attached:
                        raise exceptions.TestFail(
                            "Active domain XML not updated" " when --live options used for" " attachment"
                        )
                if vm_state in ["paused", "running"]:
                    if inactive_attached:
                        raise exceptions.TestFail(
                            "Inactive domain XML updated " "when --live options used for" " attachment"
                        )
            else:
                if vm_state in ["paused", "running", "transient"]:
                    if active_attached:
                        raise exceptions.TestFail(
                            "Active domain XML not updated" " when --live options used for" " detachment"
                        )
        elif flags.count("live") and flags.count("config"):
            if attach:
                if vm_state in ["paused", "running"]:
                    if not active_attached:
                        raise exceptions.TestFail(
                            "Active domain XML not updated" " when --live --config options" " used for attachment"
                        )
                    if not inactive_attached:
                        raise exceptions.TestFail(
                            "Inactive domain XML not updated" " when --live --config options " "used for attachment"
                        )
            else:
                if vm_state in ["paused", "running"]:
                    if active_attached:
                        raise exceptions.TestFail(
                            "Active domain XML not updated " "when --live --config options " "used for detachment"
                        )
                    if inactive_attached:
                        raise exceptions.TestFail(
                            "Inactive domain XML updated " "when --live --config options" " used for detachment"
                        )
        elif flags.count("current") or flags == "":
            if attach:
                if vm_state in ["paused", "running", "transient"]:
                    if not active_attached:
                        raise exceptions.TestFail(
                            "Active domain XML not updated" " when --current options used " "for attachment"
                        )
                if vm_state in ["paused", "running"]:
                    if inactive_attached:
                        raise exceptions.TestFail(
                            "Inactive domain XML updated " "when --current options used " "for live attachment"
                        )
                if vm_state == "shutoff" and not inactive_attached:
                    raise exceptions.TestFail(
                        "Inactive domain XML not updated" " when --current options used for" " attachment"
                    )
            else:
                if vm_state in ["paused", "running", "transient"]:
                    if active_attached:
                        raise exceptions.TestFail(
                            "Active domain XML not updated" " when --current options used " "for detachment"
                        )
                if vm_state == "shutoff" and inactive_attached:
                    raise exceptions.TestFail(
                        "Inactive domain XML not updated" " when --current options used for" " detachment"
                    )

    vm_ref = params.get("at_dt_device_vm_ref", "name")
    at_status_error = "yes" == params.get("at_status_error", "no")
    dt_status_error = "yes" == params.get("dt_status_error", "no")

    # Disk specific attributes.
    at_options = params.get("at_dt_device_at_options", "")
    dt_options = params.get("at_dt_device_dt_options", "")
    device = params.get("at_dt_device_device", "disk")
    device_source_name = params.get("at_dt_device_source", "attach.img")
    device_target = params.get("at_dt_device_target", "vdd")

    if vm.is_alive():
        vm.destroy(gracefully=False)
    # Back up xml file.
    backup_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)

    # Turn VM into certain state.
    if pre_vm_state == "running":
        logging.info("Starting %s..." % vm_name)
        if vm.is_dead():
            vm.start()
            vm.wait_for_login().close()
    elif pre_vm_state == "shutoff":
        logging.info("Shuting down %s..." % vm_name)
        if vm.is_alive():
            vm.destroy(gracefully=False)
    elif pre_vm_state == "paused":
        logging.info("Pausing %s..." % vm_name)
        if vm.is_dead():
            vm.start()
            vm.wait_for_login().close()
        if not vm.pause():
            raise exceptions.TestSkipError("Cann't pause the domain")
    elif pre_vm_state == "transient":
        logging.info("Creating %s..." % vm_name)
        vm.undefine()
        if virsh.create(backup_xml.xml, **virsh_dargs).exit_status:
            backup_xml.define()
            raise exceptions.TestSkipError("Cann't create the domain")
        vm.wait_for_login().close()

    # Test.
    domid = vm.get_id()
    domuuid = vm.get_uuid()

    # Confirm how to reference a VM.
    if vm_ref == "name":
        vm_ref = vm_name
    elif vm_ref.find("invalid") != -1:
        vm_ref = params.get(vm_ref)
    elif vm_ref == "id":
        vm_ref = domid
    elif vm_ref == "hex_id":
        vm_ref = hex(int(domid))
    elif vm_ref == "uuid":
        vm_ref = domuuid
    else:
        vm_ref = ""

    try:
        device_source = os.path.join(data_dir.get_tmp_dir(), device_source_name)
        libvirt.create_local_disk("file", device_source, "1")

        # Get disk xml file.
        disk_params = {
            "type_name": "file",
            "device_type": device,
            "target_dev": device_target,
            "target_bus": "virtio",
            "source_file": device_source,
            "driver_name": "qemu",
            "driver_type": "raw",
        }
        disk_xml = libvirt.create_disk_xml(disk_params)

        # Attach the disk.
        ret = virsh.attach_device(vm_ref, disk_xml, flagstr=at_options, debug=True)
        libvirt.check_exit_status(ret, at_status_error)

        # Check if command take affect config file.
        if vm.is_paused():
            vm.resume()
            vm.wait_for_login().close()

        # Sleep a while for vm is stable
        time.sleep(3)
        if not ret.exit_status:
            check_result(device_source, device, device_target, at_options)

        # Detach the disk.
        if pre_vm_state == "paused":
            if not vm.pause():
                raise exceptions.TestFail("Cann't pause the domain")
        disk_xml = libvirt.create_disk_xml(disk_params)
        ret = virsh.detach_device(vm_ref, disk_xml, flagstr=dt_options, debug=True)
        libvirt.check_exit_status(ret, dt_status_error)

        # Check if command take affect config file.
        if vm.is_paused():
            vm.resume()
            vm.wait_for_login().close()

        # Sleep a while for vm is stable
        time.sleep(3)
        if not ret.exit_status:
            check_result(device_source, device, device_target, dt_options, False)

        # Try to start vm at last.
        if vm.is_dead():
            vm.start()
            vm.wait_for_login().close()

    finally:
        # Recover VM.
        if vm.is_alive():
            vm.destroy(gracefully=False)
        backup_xml.sync()

        if os.path.exists(device_source):
            os.remove(device_source)
Exemplo n.º 14
0
def run(test, params, env):
    """
    Test interafce xml options.

    1.Prepare test environment,destroy or suspend a VM.
    2.Edit xml and start the domain.
    3.Perform test operation.
    4.Recover test environment.
    5.Confirm the test result.
    """
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    if vm.is_alive():
        vm.wait_for_login()

    def create_iface_xml(mac):
        """
        Create interface xml file
        """
        iface = Interface(type_name=iface_type)
        iface.source = iface_source
        iface.model = iface_model if iface_model else "virtio"
        if iface_target:
            iface.target = {'dev': iface_target}
        iface.mac_address = mac
        if iface_rom:
            iface.rom = eval(iface_rom)
        logging.debug("Create new interface xml: %s", iface)
        return iface

    # Interface specific attributes.
    iface_num = params.get("iface_num", '1')
    iface_type = params.get("iface_type", "network")
    iface_source = eval(params.get("iface_source",
                                   "{'network':'default'}"))
    iface_model = params.get("iface_model")
    iface_target = params.get("iface_target")
    iface_mac = params.get("iface_mac")
    iface_rom = params.get("iface_rom")
    attach_device = "yes" == params.get("attach_device", "no")
    attach_iface = "yes" == params.get("attach_iface", "no")
    attach_option = params.get("attach_option", "")
    detach_device = "yes" == params.get("detach_device")
    stress_test = "yes" == params.get("stress_test")
    restart_libvirtd = "yes" == params.get("restart_libvirtd",
                                           "no")
    start_vm = "yes" == params.get("start_vm", "yes")
    options_test = "yes" == params.get("options_test", "no")
    username = params.get("username")
    password = params.get("password")
    poll_timeout = int(params.get("poll_timeout", 10))
    err_msgs1 = params.get("err_msgs1")
    err_msgs2 = params.get("err_msgs2")
    err_msg_rom = params.get("err_msg_rom")

    # stree_test require detach operation
    stress_test_detach_device = False
    stress_test_detach_interface = False
    if stress_test:
        if attach_device:
            stress_test_detach_device = True
        if attach_iface:
            stress_test_detach_interface = True

    # The following detach-device step also using attach option
    detach_option = attach_option

    # Back up xml file.
    vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    #iface_mac = vm_xml.VMXML.get_first_mac_by_name(vm_name)
    libvirtd = utils_libvirtd.Libvirtd()

    # Check virsh command option
    check_cmds = []
    sep_options = attach_option.split()
    logging.debug("sep_options: %s" % sep_options)
    for sep_option in sep_options:
        if attach_device and sep_option:
            check_cmds.append(('attach-device', sep_option))
        if attach_iface and sep_option:
            check_cmds.append(('attach-interface', sep_option))
        if (detach_device or stress_test_detach_device) and sep_option:
            check_cmds.append(('detach-device', sep_option))
        if stress_test_detach_interface and sep_option:
            check_cmds.append(('detach-device', sep_option))

    for cmd, option in check_cmds:
        libvirt.virsh_cmd_has_option(cmd, option)

    try:
        try:
            # Attach an interface when vm is running
            iface_list = []
            err_msgs = ("No more available PCI slots",
                        "No more available PCI addresses")
            if not start_vm:
                virsh.destroy(vm_name)
            for i in range(int(iface_num)):
                if attach_device:
                    logging.info("Try to attach device loop %s" % i)
                    if iface_mac:
                        mac = iface_mac
                    else:
                        mac = utils_net.generate_mac_address_simple()
                    iface_xml_obj = create_iface_xml(mac)
                    iface_xml_obj.xmltreefile.write()
                    ret = virsh.attach_device(vm_name, iface_xml_obj.xml,
                                              flagstr=attach_option,
                                              ignore_status=True,
                                              debug=True)
                elif attach_iface:
                    logging.info("Try to attach interface loop %s" % i)
                    mac = utils_net.generate_mac_address_simple()
                    options = ("%s %s --model %s --mac %s %s" %
                               (iface_type, iface_source['network'],
                                iface_model, mac, attach_option))
                    ret = virsh.attach_interface(vm_name, options,
                                                 ignore_status=True)
                if ret.exit_status:
                    if any([msg in ret.stderr for msg in err_msgs]):
                        logging.debug("No more pci slots, can't attach more devices")
                        break
                    elif (ret.stderr.count("doesn't support option %s" % attach_option)):
                        test.cancel(ret.stderr)
                    elif err_msgs1 in ret.stderr:
                        logging.debug("option %s is not supported when domain running is %s" % (attach_option, vm.is_alive()))
                        if start_vm or ("--live" not in sep_options and attach_option):
                            test.fail("return not supported, but it is unexpected")
                    elif err_msgs2 in ret.stderr:
                        logging.debug("options %s are mutually exclusive" % attach_option)
                        if not ("--current" in sep_options and len(sep_options) > 1):
                            test.fail("return mutualy exclusive, but it is unexpected")
                    elif err_msg_rom in ret.stderr:
                        logging.debug("Attach failed with expect err msg: %s" % err_msg_rom)
                    else:
                        test.fail("Failed to attach-interface: %s" % ret.stderr.strip())
                elif stress_test:
                    if attach_device:
                        # Detach the device immediately for stress test
                        ret = virsh.detach_device(vm_name, iface_xml_obj.xml,
                                                  flagstr=detach_option,
                                                  ignore_status=True)
                    elif attach_iface:
                        # Detach the device immediately for stress test
                        options = ("--type %s --mac %s %s" %
                                   (iface_type, mac, detach_option))
                        ret = virsh.detach_interface(vm_name, options,
                                                     ignore_status=True)
                    libvirt.check_exit_status(ret)
                else:
                    if attach_device:
                        iface_list.append({'mac': mac,
                                           'iface_xml': iface_xml_obj})
                    elif attach_iface:
                        iface_list.append({'mac': mac})
            # Restart libvirtd service
            if restart_libvirtd:
                libvirtd.restart()
                # After restart libvirtd, the old console was invalidated,
                # so we need create a new serial console
                vm.cleanup_serial_console()
                vm.create_serial_console()
            # in options test, check if the interface is attached
            # in current state when attach return true

            def check_iface_exist():
                try:
                    session = vm.wait_for_serial_login(username=username,
                                                       password=password)
                    if utils_net.get_linux_ifname(session, iface['mac']):
                        return True
                    else:
                        logging.debug("can not find interface in vm")
                except Exception:
                    return False
            if options_test:
                for iface in iface_list:
                    if 'mac' in iface:
                        # Check interface in dumpxml output
                        if_attr = vm_xml.VMXML.get_iface_by_mac(vm_name,
                                                                iface['mac'])
                        if vm.is_alive() and attach_option == "--config":
                            if if_attr:
                                test.fail("interface should not exists "
                                          "in current live vm while "
                                          "attached by --config")
                        else:
                            if if_attr:
                                logging.debug("interface %s found current "
                                              "state in xml" % if_attr['mac'])
                            else:
                                test.fail("no interface found in "
                                          "current state in xml")

                        if if_attr:
                            if if_attr['type'] != iface_type or \
                                    if_attr['source'] != \
                                    iface_source['network']:
                                test.fail("Interface attribute doesn't "
                                          "match attachment options")
                        # check interface in vm only when vm is active
                        if vm.is_alive():
                            logging.debug("check interface in current state "
                                          "in vm")

                            if not utils_misc.wait_for(check_iface_exist, timeout=20):
                                if not attach_option == "--config":
                                    test.fail("Can't see interface "
                                              "in current state in vm")
                                else:
                                    logging.debug("find interface in "
                                                  "current state in vm")
                            else:
                                logging.debug("find interface in "
                                              "current state in vm")
                            # in options test, if the attach is performed
                            # when the vm is running
                            # need to destroy and start to check again
                            vm.destroy()

            # Start the domain if needed
            if vm.is_dead():
                vm.start()
            session = vm.wait_for_serial_login(username=username,
                                               password=password)

            # check if interface is attached
            for iface in iface_list:
                if 'mac' in iface:
                    logging.debug("check interface in xml")
                    # Check interface in dumpxml output
                    if_attr = vm_xml.VMXML.get_iface_by_mac(vm_name,
                                                            iface['mac'])
                    logging.debug(if_attr)
                    if if_attr:
                        logging.debug("interface {} is found in xml".
                                      format(if_attr['mac']))
                        if (if_attr['type'] != iface_type or
                                if_attr['source'] != iface_source['network']):
                            test.fail("Interface attribute doesn't "
                                      "match attachment options")
                        if options_test and start_vm and attach_option \
                                in ("--current", "--live", ""):
                            test.fail("interface should not exists when "
                                      "restart vm in options_test")
                    else:
                        logging.debug("no interface found in xml")
                        if options_test and start_vm and attach_option in \
                                ("--current", "--live", ""):
                            logging.debug("interface not exists next state "
                                          "in xml with %s" % attach_option)
                        else:
                            test.fail("Can't see interface in dumpxml")

                    # Check interface on guest
                    if not utils_misc.wait_for(check_iface_exist, timeout=20):
                        logging.debug("can't see interface next state in vm")
                        if start_vm and attach_option in \
                                ("--current", "--live", ""):
                            logging.debug("it is expected")
                        else:
                            test.fail("should find interface "
                                      "but no seen in next state in vm")

            # Detach hot/cold-plugged interface at last
            if detach_device:
                for iface in iface_list:
                    if 'iface_xml' in iface:
                        ret = virsh.detach_device(vm_name,
                                                  iface['iface_xml'].xml,
                                                  flagstr="",
                                                  ignore_status=True)
                        libvirt.check_exit_status(ret)
                    else:
                        options = ("%s --mac %s" %
                                   (iface_type, iface['mac']))
                        ret = virsh.detach_interface(vm_name, options,
                                                     ignore_status=True)
                        libvirt.check_exit_status(ret)

                # Check if interface was detached
                for iface in iface_list:
                    if 'mac' in iface:
                        polltime = time.time() + poll_timeout
                        while True:
                            # Check interface in dumpxml output
                            if not vm_xml.VMXML.get_iface_by_mac(vm_name,
                                                                 iface['mac']):
                                break
                            else:
                                time.sleep(2)
                                if time.time() > polltime:
                                    test.fail("Interface still "
                                              "exists after detachment")
            session.close()
        except virt_vm.VMStartError as e:
            logging.info(str(e))
            test.fail('VM failed to start:\n%s' % e)

    finally:
        # Recover VM.
        logging.info("Restoring vm...")
        if vm.is_alive():
            vm.destroy(gracefully=False)
        vmxml_backup.sync()
Exemplo n.º 15
0
def run(test, params, env):
    """
    Test rbd disk device.

    1.Prepare test environment,destroy or suspend a VM.
    2.Prepare disk image.
    3.Edit disks xml and start the domain.
    4.Perform test operation.
    5.Recover test environment.
    """
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    virsh_dargs = {'debug': True, 'ignore_status': True}
    additional_xml_file = os.path.join(data_dir.get_tmp_dir(), "additional_disk.xml")

    def config_ceph():
        """
        Write the configs to the file.
        """
        src_host = disk_src_host.split()
        src_port = disk_src_port.split()
        conf_str = "mon_host = "
        hosts = []
        for host, port in zip(src_host, src_port):
            hosts.append("%s:%s" % (host, port))
        with open(disk_src_config, 'w') as f:
            f.write(conf_str + ','.join(hosts) + '\n')

    def create_pool():
        """
        Define and start a pool.
        """
        sp = libvirt_storage.StoragePool()
        if create_by_xml:
            p_xml = pool_xml.PoolXML(pool_type=pool_type)
            p_xml.name = pool_name
            s_xml = pool_xml.SourceXML()
            s_xml.vg_name = disk_src_pool
            source_host = []
            for (host_name, host_port) in zip(
                    disk_src_host.split(), disk_src_port.split()):
                source_host.append({'name': host_name,
                                    'port': host_port})

            s_xml.hosts = source_host
            if auth_type:
                s_xml.auth_type = auth_type
            if auth_user:
                s_xml.auth_username = auth_user
            if auth_usage:
                s_xml.secret_usage = auth_usage
            p_xml.source = s_xml
            logging.debug("Pool xml: %s", p_xml)
            p_xml.xmltreefile.write()
            ret = virsh.pool_define(p_xml.xml, **virsh_dargs)
            libvirt.check_exit_status(ret)
            ret = virsh.pool_build(pool_name, **virsh_dargs)
            libvirt.check_exit_status(ret)
            ret = virsh.pool_start(pool_name, **virsh_dargs)
            libvirt.check_exit_status(ret)
        else:
            auth_opt = ""
            if client_name and client_key:
                auth_opt = ("--auth-type %s --auth-username %s --secret-usage '%s'"
                            % (auth_type, auth_user, auth_usage))
            if not sp.define_rbd_pool(pool_name, mon_host,
                                      disk_src_pool, extra=auth_opt):
                test.fail("Failed to define storage pool")
            if not sp.build_pool(pool_name):
                test.fail("Failed to build storage pool")
            if not sp.start_pool(pool_name):
                test.fail("Failed to start storage pool")

        # Check pool operation
        ret = virsh.pool_refresh(pool_name, **virsh_dargs)
        libvirt.check_exit_status(ret)
        ret = virsh.pool_uuid(pool_name, **virsh_dargs)
        libvirt.check_exit_status(ret)
        # pool-info
        pool_info = sp.pool_info(pool_name)
        if pool_info["Autostart"] != 'no':
            test.fail("Failed to check pool information")
        # pool-autostart
        if not sp.set_pool_autostart(pool_name):
            test.fail("Failed to set pool autostart")
        pool_info = sp.pool_info(pool_name)
        if pool_info["Autostart"] != 'yes':
            test.fail("Failed to check pool information")
        # pool-autostart --disable
        if not sp.set_pool_autostart(pool_name, "--disable"):
            test.fail("Failed to set pool autostart")
        # If port is not pre-configured, port value should not be hardcoded in pool information.
        if "yes" == params.get("rbd_port", "no"):
            if 'port' in virsh.pool_dumpxml(pool_name):
                test.fail("port attribute should not be in pool information")
        # find-storage-pool-sources-as
        if "yes" == params.get("find_storage_pool_sources_as", "no"):
            ret = virsh.find_storage_pool_sources_as("rbd", mon_host)
            libvirt.check_result(ret, skip_if=unsupported_err)

    def create_vol(vol_params):
        """
        Create volume.

        :param p_name. Pool name.
        :param vol_params. Volume parameters dict.
        :return: True if create successfully.
        """
        pvt = libvirt.PoolVolumeTest(test, params)
        if create_by_xml:
            pvt.pre_vol_by_xml(pool_name, **vol_params)
        else:
            pvt.pre_vol(vol_name, None, '2G', None, pool_name)

    def check_vol(vol_params):
        """
        Check volume information.
        """
        pv = libvirt_storage.PoolVolume(pool_name)
        # Supported operation
        if vol_name not in pv.list_volumes():
            test.fail("Volume %s doesn't exist" % vol_name)
        ret = virsh.vol_dumpxml(vol_name, pool_name)
        libvirt.check_exit_status(ret)
        # vol-info
        if not pv.volume_info(vol_name):
            test.fail("Can't see volume info")
        # vol-key
        ret = virsh.vol_key(vol_name, pool_name)
        libvirt.check_exit_status(ret)
        if "%s/%s" % (disk_src_pool, vol_name) not in ret.stdout.strip():
            test.fail("Volume key isn't correct")
        # vol-path
        ret = virsh.vol_path(vol_name, pool_name)
        libvirt.check_exit_status(ret)
        if "%s/%s" % (disk_src_pool, vol_name) not in ret.stdout.strip():
            test.fail("Volume path isn't correct")
        # vol-pool
        ret = virsh.vol_pool("%s/%s" % (disk_src_pool, vol_name))
        libvirt.check_exit_status(ret)
        if pool_name not in ret.stdout.strip():
            test.fail("Volume pool isn't correct")
        # vol-name
        ret = virsh.vol_name("%s/%s" % (disk_src_pool, vol_name))
        libvirt.check_exit_status(ret)
        if vol_name not in ret.stdout.strip():
            test.fail("Volume name isn't correct")
        # vol-resize
        ret = virsh.vol_resize(vol_name, "2G", pool_name)
        libvirt.check_exit_status(ret)

        # Not supported operation
        # vol-clone
        ret = virsh.vol_clone(vol_name, cloned_vol_name, pool_name)
        libvirt.check_result(ret, skip_if=unsupported_err)
        # vol-create-from
        volxml = vol_xml.VolXML()
        vol_params.update({"name": "%s" % create_from_cloned_volume})
        v_xml = volxml.new_vol(**vol_params)
        v_xml.xmltreefile.write()
        ret = virsh.vol_create_from(pool_name, v_xml.xml, vol_name, pool_name)
        libvirt.check_result(ret, skip_if=unsupported_err)

        # vol-wipe
        ret = virsh.vol_wipe(vol_name, pool_name)
        libvirt.check_result(ret, skip_if=unsupported_err)
        # vol-upload
        ret = virsh.vol_upload(vol_name, vm.get_first_disk_devices()['source'],
                               "--pool %s" % pool_name)
        libvirt.check_result(ret, skip_if=unsupported_err)
        # vol-download
        ret = virsh.vol_download(vol_name, cloned_vol_name, "--pool %s" % pool_name)
        libvirt.check_result(ret, skip_if=unsupported_err)

    def check_qemu_cmd():
        """
        Check qemu command line options.
        """
        cmd = ("ps -ef | grep %s | grep -v grep " % vm_name)
        process.run(cmd, shell=True)
        if disk_src_name:
            cmd += " | grep file=rbd:%s:" % disk_src_name
            if auth_user and auth_key:
                cmd += ('id=%s:auth_supported=cephx' % auth_user)
        if disk_src_config:
            cmd += " | grep 'conf=%s'" % disk_src_config
        elif mon_host:
            hosts = '\:6789\;'.join(mon_host.split())
            cmd += " | grep 'mon_host=%s'" % hosts
        if driver_iothread:
            cmd += " | grep iothread%s" % driver_iothread
        # Run the command
        process.run(cmd, shell=True)

    def check_save_restore():
        """
        Test save and restore operation
        """
        save_file = os.path.join(data_dir.get_tmp_dir(),
                                 "%s.save" % vm_name)
        ret = virsh.save(vm_name, save_file, **virsh_dargs)
        libvirt.check_exit_status(ret)
        ret = virsh.restore(save_file, **virsh_dargs)
        libvirt.check_exit_status(ret)
        if os.path.exists(save_file):
            os.remove(save_file)
        # Login to check vm status
        vm.wait_for_login().close()

    def check_snapshot(snap_option, target_dev='vda'):
        """
        Test snapshot operation.
        """
        snap_name = "s1"
        snap_mem = os.path.join(data_dir.get_tmp_dir(), "rbd.mem")
        snap_disk = os.path.join(data_dir.get_tmp_dir(), "rbd.disk")
        xml_snap_exp = ["disk name='%s' snapshot='external' type='file'" % target_dev]
        xml_dom_exp = ["source file='%s'" % snap_disk,
                       "backingStore type='network' index='1'",
                       "source protocol='rbd' name='%s'" % disk_src_name]
        if snap_option.count("disk-only"):
            options = ("%s --diskspec %s,file=%s --disk-only" %
                       (snap_name, target_dev, snap_disk))
        elif snap_option.count("disk-mem"):
            options = ("%s --memspec file=%s --diskspec %s,file="
                       "%s" % (snap_name, snap_mem, target_dev, snap_disk))
            xml_snap_exp.append("memory snapshot='external' file='%s'"
                                % snap_mem)
        else:
            options = snap_name

        ret = virsh.snapshot_create_as(vm_name, options)
        if test_disk_internal_snapshot or test_disk_readonly:
            libvirt.check_result(ret, expected_fails=unsupported_err)
        else:
            libvirt.check_result(ret, skip_if=unsupported_err)

        # check xml file.
        if not ret.exit_status:
            snap_xml = virsh.snapshot_dumpxml(vm_name, snap_name,
                                              debug=True).stdout.strip()
            dom_xml = virsh.dumpxml(vm_name, debug=True).stdout.strip()
            # Delete snapshots.
            libvirt.clean_up_snapshots(vm_name)
            if os.path.exists(snap_mem):
                os.remove(snap_mem)
            if os.path.exists(snap_disk):
                os.remove(snap_disk)

            if not all([x in snap_xml for x in xml_snap_exp]):
                test.fail("Failed to check snapshot xml")
            if not all([x in dom_xml for x in xml_dom_exp]):
                test.fail("Failed to check domain xml")

    def check_blockcopy(target):
        """
        Block copy operation test.
        """
        blk_file = os.path.join(data_dir.get_tmp_dir(), "blk.rbd")
        if os.path.exists(blk_file):
            os.remove(blk_file)
        blk_mirror = ("mirror type='file' file='%s' "
                      "format='raw' job='copy'" % blk_file)

        # Do blockcopy
        ret = virsh.blockcopy(vm_name, target, blk_file)
        libvirt.check_result(ret, skip_if=unsupported_err)

        dom_xml = virsh.dumpxml(vm_name, debug=True).stdout.strip()
        if not dom_xml.count(blk_mirror):
            test.fail("Can't see block job in domain xml")

        # Abort
        ret = virsh.blockjob(vm_name, target, "--abort")
        libvirt.check_exit_status(ret)
        dom_xml = virsh.dumpxml(vm_name, debug=True).stdout.strip()
        if dom_xml.count(blk_mirror):
            test.fail("Failed to abort block job")
        if os.path.exists(blk_file):
            os.remove(blk_file)

        # Sleep for a while after abort operation.
        time.sleep(5)
        # Do blockcopy again
        ret = virsh.blockcopy(vm_name, target, blk_file)
        libvirt.check_exit_status(ret)

        # Wait for complete
        def wait_func():
            ret = virsh.blockjob(vm_name, target, "--info")
            return ret.stderr.count("Block Copy: [100 %]")
        timeout = params.get("blockjob_timeout", 600)
        utils_misc.wait_for(wait_func, int(timeout))

        # Pivot
        ret = virsh.blockjob(vm_name, target, "--pivot")
        libvirt.check_exit_status(ret)
        dom_xml = virsh.dumpxml(vm_name, debug=True).stdout.strip()
        if not dom_xml.count("source file='%s'" % blk_file):
            test.fail("Failed to pivot block job")
        # Remove the disk file.
        if os.path.exists(blk_file):
            os.remove(blk_file)

    def check_in_vm(vm_obj, target, old_parts, read_only=False):
        """
        Check mount/read/write disk in VM.
        :param vm. VM guest.
        :param target. Disk dev in VM.
        :return: True if check successfully.
        """
        try:
            session = vm_obj.wait_for_login()
            new_parts = libvirt.get_parts_list(session)
            added_parts = list(set(new_parts).difference(set(old_parts)))
            logging.info("Added parts:%s", added_parts)
            if len(added_parts) != 1:
                logging.error("The number of new partitions is invalid in VM")
                return False

            added_part = None
            if target.startswith("vd"):
                if added_parts[0].startswith("vd"):
                    added_part = added_parts[0]
            elif target.startswith("hd"):
                if added_parts[0].startswith("sd"):
                    added_part = added_parts[0]

            if not added_part:
                logging.error("Can't see added partition in VM")
                return False

            cmd = ("mount /dev/{0} /mnt && ls /mnt && (sleep 15;"
                   " touch /mnt/testfile; umount /mnt)"
                   .format(added_part))
            s, o = session.cmd_status_output(cmd, timeout=60)
            session.close()
            logging.info("Check disk operation in VM:\n, %s, %s", s, o)
            # Readonly fs, check the error messages.
            # The command may return True, read-only
            # messges can be found from the command output
            if read_only:
                if "Read-only file system" not in o:
                    return False
                else:
                    return True

            # Other errors
            if s != 0:
                return False
            return True

        except (remote.LoginError, virt_vm.VMError, aexpect.ShellError) as e:
            logging.error(str(e))
            return False

    def clean_up_volume_snapshots():
        """
        Get all snapshots for rbd_vol.img volume,unprotect and then clean up them.
        """
        cmd = ("rbd -m {0} {1} info {2}"
               "".format(mon_host, key_opt, os.path.join(disk_src_pool, vol_name)))
        if process.run(cmd, ignore_status=True, shell=True).exit_status:
            return
        # Get snapshot list.
        cmd = ("rbd -m {0} {1} snap"
               " list {2}"
               "".format(mon_host, key_opt, os.path.join(disk_src_pool, vol_name)))
        snaps_out = process.run(cmd, ignore_status=True, shell=True).stdout_text
        snap_names = []
        if snaps_out:
            for line in snaps_out.rsplit("\n"):
                if line.startswith("SNAPID") or line == "":
                    continue
                snap_line = line.rsplit()
                if len(snap_line) == 4:
                    snap_names.append(snap_line[1])
            logging.debug("Find snapshots: %s", snap_names)
            # Unprotect snapshot first,otherwise it will fail to purge volume
            for snap_name in snap_names:
                cmd = ("rbd -m {0} {1} snap"
                       " unprotect {2}@{3}"
                       "".format(mon_host, key_opt, os.path.join(disk_src_pool, vol_name), snap_name))
                process.run(cmd, ignore_status=True, shell=True)
        # Purge volume,and then delete volume.
        cmd = ("rbd -m {0} {1} info {2} && rbd -m {0} {1} snap"
               " purge {2} && rbd -m {0} {1} rm {2}"
               "".format(mon_host, key_opt, os.path.join(disk_src_pool, vol_name)))
        process.run(cmd, ignore_status=True, shell=True)

    def make_snapshot():
        """
        make external snapshots.

        :return external snapshot path list
        """
        logging.info("Making snapshot...")
        first_disk_source = vm.get_first_disk_devices()['source']
        snapshot_path_list = []
        snapshot2_file = os.path.join(data_dir.get_tmp_dir(), "mem.s2")
        snapshot3_file = os.path.join(data_dir.get_tmp_dir(), "mem.s3")
        snapshot4_file = os.path.join(data_dir.get_tmp_dir(), "mem.s4")
        snapshot4_disk_file = os.path.join(data_dir.get_tmp_dir(), "disk.s4")
        snapshot5_file = os.path.join(data_dir.get_tmp_dir(), "mem.s5")
        snapshot5_disk_file = os.path.join(data_dir.get_tmp_dir(), "disk.s5")

        # Attempt to take different types of snapshots.
        snapshots_param_dict = {"s1": "s1 --disk-only --no-metadata",
                                "s2": "s2 --memspec %s --no-metadata" % snapshot2_file,
                                "s3": "s3 --memspec %s --no-metadata --live" % snapshot3_file,
                                "s4": "s4 --memspec %s --diskspec vda,file=%s --no-metadata" % (snapshot4_file, snapshot4_disk_file),
                                "s5": "s5 --memspec %s --diskspec vda,file=%s --live --no-metadata" % (snapshot5_file, snapshot5_disk_file)}
        for snapshot_name in sorted(snapshots_param_dict.keys()):
            ret = virsh.snapshot_create_as(vm_name, snapshots_param_dict[snapshot_name],
                                           **virsh_dargs)
            libvirt.check_exit_status(ret)
            if snapshot_name != 's4' and snapshot_name != 's5':
                snapshot_path_list.append(first_disk_source.replace('qcow2', snapshot_name))
        return snapshot_path_list

    def get_secret_list():
        """
        Get secret list.

        :return secret list
        """
        logging.info("Get secret list ...")
        secret_list_result = virsh.secret_list()
        secret_list = results_stdout_52lts(secret_list_result).strip().splitlines()
        # First two lines contain table header followed by entries
        # for each secret, such as:
        #
        # UUID                                  Usage
        # --------------------------------------------------------------------------------
        # b4e8f6d3-100c-4e71-9f91-069f89742273  ceph client.libvirt secret
        secret_list = secret_list[2:]
        result = []
        # If secret list is empty.
        if secret_list:
            for line in secret_list:
                # Split on whitespace, assume 1 column
                linesplit = line.split(None, 1)
                result.append(linesplit[0])
        return result

    mon_host = params.get("mon_host")
    disk_src_name = params.get("disk_source_name")
    disk_src_config = params.get("disk_source_config")
    disk_src_host = params.get("disk_source_host")
    disk_src_port = params.get("disk_source_port")
    disk_src_pool = params.get("disk_source_pool")
    disk_format = params.get("disk_format", "raw")
    driver_iothread = params.get("driver_iothread")
    snap_name = params.get("disk_snap_name")
    attach_device = "yes" == params.get("attach_device", "no")
    attach_disk = "yes" == params.get("attach_disk", "no")
    test_save_restore = "yes" == params.get("test_save_restore", "no")
    test_snapshot = "yes" == params.get("test_snapshot", "no")
    test_blockcopy = "yes" == params.get("test_blockcopy", "no")
    test_qemu_cmd = "yes" == params.get("test_qemu_cmd", "no")
    test_vm_parts = "yes" == params.get("test_vm_parts", "no")
    additional_guest = "yes" == params.get("additional_guest", "no")
    create_snapshot = "yes" == params.get("create_snapshot", "no")
    convert_image = "yes" == params.get("convert_image", "no")
    create_volume = "yes" == params.get("create_volume", "no")
    create_by_xml = "yes" == params.get("create_by_xml", "no")
    client_key = params.get("client_key")
    client_name = params.get("client_name")
    auth_key = params.get("auth_key")
    auth_user = params.get("auth_user")
    auth_type = params.get("auth_type")
    auth_usage = params.get("secret_usage")
    pool_name = params.get("pool_name")
    pool_type = params.get("pool_type")
    vol_name = params.get("vol_name")
    cloned_vol_name = params.get("cloned_volume", "cloned_test_volume")
    create_from_cloned_volume = params.get("create_from_cloned_volume", "create_from_cloned_test_volume")
    vol_cap = params.get("vol_cap")
    vol_cap_unit = params.get("vol_cap_unit")
    start_vm = "yes" == params.get("start_vm", "no")
    test_disk_readonly = "yes" == params.get("test_disk_readonly", "no")
    test_disk_internal_snapshot = "yes" == params.get("test_disk_internal_snapshot", "no")
    test_json_pseudo_protocol = "yes" == params.get("json_pseudo_protocol", "no")
    disk_snapshot_with_sanlock = "yes" == params.get("disk_internal_with_sanlock", "no")

    # Create /etc/ceph/ceph.conf file to suppress false warning error message.
    process.run("mkdir -p /etc/ceph", ignore_status=True, shell=True)
    cmd = ("echo 'mon_host = {0}' >/etc/ceph/ceph.conf"
           .format(mon_host))
    process.run(cmd, ignore_status=True, shell=True)

    # Start vm and get all partions in vm.
    if vm.is_dead():
        vm.start()
    session = vm.wait_for_login()
    old_parts = libvirt.get_parts_list(session)
    session.close()
    vm.destroy(gracefully=False)
    if additional_guest:
        guest_name = "%s_%s" % (vm_name, '1')
        timeout = params.get("clone_timeout", 360)
        utils_libguestfs.virt_clone_cmd(vm_name, guest_name,
                                        True, timeout=timeout,
                                        ignore_status=False)
        additional_vm = vm.clone(guest_name)
        if start_vm:
            virsh.start(guest_name)

    # Back up xml file.
    vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    key_opt = ""
    secret_uuid = None
    snapshot_path = None
    key_file = os.path.join(data_dir.get_tmp_dir(), "ceph.key")
    img_file = os.path.join(data_dir.get_tmp_dir(),
                            "%s_test.img" % vm_name)
    front_end_img_file = os.path.join(data_dir.get_tmp_dir(),
                                      "%s_frontend_test.img" % vm_name)
    # Construct a unsupported error message list to skip these kind of tests
    unsupported_err = []
    if driver_iothread:
        unsupported_err.append('IOThreads not supported')
    if test_snapshot:
        unsupported_err.append('live disk snapshot not supported')
    if test_disk_readonly:
        if not libvirt_version.version_compare(5, 0, 0):
            unsupported_err.append('Could not create file: Permission denied')
            unsupported_err.append('Permission denied')
        else:
            unsupported_err.append('unsupported configuration: external snapshot ' +
                                   'for readonly disk vdb is not supported')
    if test_disk_internal_snapshot:
        unsupported_err.append('unsupported configuration: internal snapshot for disk ' +
                               'vdb unsupported for storage type raw')
    if test_blockcopy:
        unsupported_err.append('block copy is not supported')
    if attach_disk:
        unsupported_err.append('No such file or directory')
    if create_volume:
        unsupported_err.append("backing 'volume' disks isn't yet supported")
        unsupported_err.append('this function is not supported')

    try:
        # Clean up dirty secrets in test environments if there have.
        dirty_secret_list = get_secret_list()
        if dirty_secret_list:
            for dirty_secret_uuid in dirty_secret_list:
                virsh.secret_undefine(dirty_secret_uuid)
        # Prepare test environment.
        qemu_config = LibvirtQemuConfig()

        if disk_snapshot_with_sanlock:
            # Install necessary package:sanlock,libvirt-lock-sanlock
            if not utils_package.package_install(["sanlock"]):
                test.error("fail to install sanlock")
            if not utils_package.package_install(["libvirt-lock-sanlock"]):
                test.error("fail to install libvirt-lock-sanlock")

            # Set virt_use_sanlock
            result = process.run("setsebool -P virt_use_sanlock 1", shell=True)
            if result.exit_status:
                test.error("Failed to set virt_use_sanlock value")

            # Update lock_manager in qemu.conf
            qemu_config.lock_manager = 'sanlock'

            # Update qemu-sanlock.conf.
            san_lock_config = LibvirtSanLockConfig()
            san_lock_config.user = '******'
            san_lock_config.group = 'sanlock'
            san_lock_config.host_id = 1
            san_lock_config.auto_disk_leases = True
            process.run("mkdir -p /var/lib/libvirt/sanlock", shell=True)
            san_lock_config.disk_lease_dir = "/var/lib/libvirt/sanlock"
            san_lock_config.require_lease_for_disks = False

            # Start sanlock service and restart libvirtd to enforce changes.
            result = process.run("systemctl start wdmd", shell=True)
            if result.exit_status:
                test.error("Failed to start wdmd service")
            result = process.run("systemctl start sanlock", shell=True)
            if result.exit_status:
                test.error("Failed to start sanlock service")
            utils_libvirtd.Libvirtd().restart()

            # Prepare lockspace and lease file for sanlock in order.
            sanlock_cmd_dict = OrderedDict()
            sanlock_cmd_dict["truncate -s 1M /var/lib/libvirt/sanlock/TEST_LS"] = "Failed to truncate TEST_LS"
            sanlock_cmd_dict["sanlock direct init -s TEST_LS:0:/var/lib/libvirt/sanlock/TEST_LS:0"] = "Failed to sanlock direct init TEST_LS:0"
            sanlock_cmd_dict["chown sanlock:sanlock /var/lib/libvirt/sanlock/TEST_LS"] = "Failed to chown sanlock TEST_LS"
            sanlock_cmd_dict["restorecon -R -v /var/lib/libvirt/sanlock"] = "Failed to restorecon sanlock"
            sanlock_cmd_dict["truncate -s 1M /var/lib/libvirt/sanlock/test-disk-resource-lock"] = "Failed to truncate test-disk-resource-lock"
            sanlock_cmd_dict["sanlock direct init -r TEST_LS:test-disk-resource-lock:" +
                             "/var/lib/libvirt/sanlock/test-disk-resource-lock:0"] = "Failed to sanlock direct init test-disk-resource-lock"
            sanlock_cmd_dict["chown sanlock:sanlock " +
                             "/var/lib/libvirt/sanlock/test-disk-resource-lock"] = "Failed to chown test-disk-resource-loc"
            sanlock_cmd_dict["sanlock client add_lockspace -s TEST_LS:1:" +
                             "/var/lib/libvirt/sanlock/TEST_LS:0"] = "Failed to client add_lockspace -s TEST_LS:0"
            for sanlock_cmd in sanlock_cmd_dict.keys():
                result = process.run(sanlock_cmd, shell=True)
                if result.exit_status:
                    test.error(sanlock_cmd_dict[sanlock_cmd])

            # Create one lease device and add it to VM.
            san_lock_vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
            lease_device = Lease()
            lease_device.lockspace = 'TEST_LS'
            lease_device.key = 'test-disk-resource-lock'
            lease_device.target = {'path': '/var/lib/libvirt/sanlock/test-disk-resource-lock'}
            san_lock_vmxml.add_device(lease_device)
            san_lock_vmxml.sync()

        # Install ceph-common package which include rbd command
        if utils_package.package_install(["ceph-common"]):
            if client_name and client_key:
                with open(key_file, 'w') as f:
                    f.write("[%s]\n\tkey = %s\n" %
                            (client_name, client_key))
                key_opt = "--keyring %s" % key_file

                # Create secret xml
                sec_xml = secret_xml.SecretXML("no", "no")
                sec_xml.usage = auth_type
                sec_xml.usage_name = auth_usage
                sec_xml.xmltreefile.write()

                logging.debug("Secret xml: %s", sec_xml)
                ret = virsh.secret_define(sec_xml.xml)
                libvirt.check_exit_status(ret)

                secret_uuid = re.findall(r".+\S+(\ +\S+)\ +.+\S+",
                                         ret.stdout.strip())[0].lstrip()
                logging.debug("Secret uuid %s", secret_uuid)
                if secret_uuid is None:
                    test.error("Failed to get secret uuid")

                # Set secret value
                auth_key = params.get("auth_key")
                ret = virsh.secret_set_value(secret_uuid, auth_key,
                                             **virsh_dargs)
                libvirt.check_exit_status(ret)

            # Delete the disk if it exists
            cmd = ("rbd -m {0} {1} info {2} && rbd -m {0} {1} rm "
                   "{2}".format(mon_host, key_opt, disk_src_name))
            process.run(cmd, ignore_status=True, shell=True)
        else:
            test.error("Failed to install ceph-common")

        if disk_src_config:
            config_ceph()
        disk_path = ("rbd:%s:mon_host=%s" %
                     (disk_src_name, mon_host))
        if auth_user and auth_key:
            disk_path += (":id=%s:key=%s" %
                          (auth_user, auth_key))
        targetdev = params.get("disk_target", "vdb")
        # To be compatible with create_disk_xml function,
        # some parameters need to be updated.
        params.update({
            "type_name": params.get("disk_type", "network"),
            "target_bus": params.get("disk_target_bus"),
            "target_dev": targetdev,
            "secret_uuid": secret_uuid,
            "source_protocol": params.get("disk_source_protocol"),
            "source_name": disk_src_name,
            "source_host_name": disk_src_host,
            "source_host_port": disk_src_port})
        # Prepare disk image
        if convert_image:
            first_disk = vm.get_first_disk_devices()
            blk_source = first_disk['source']
            # Convert the image to remote storage
            disk_cmd = ("rbd -m %s %s info %s 2> /dev/null|| qemu-img convert"
                        " -O %s %s %s" % (mon_host, key_opt,
                                          disk_src_name, disk_format,
                                          blk_source, disk_path))
            process.run(disk_cmd, ignore_status=False, shell=True)

        elif create_volume:
            vol_params = {"name": vol_name, "capacity": int(vol_cap),
                          "capacity_unit": vol_cap_unit, "format": disk_format}

            create_pool()
            create_vol(vol_params)
            check_vol(vol_params)
        else:
            # Create an local image and make FS on it.
            disk_cmd = ("qemu-img create -f %s %s 10M && mkfs.ext4 -F %s" %
                        (disk_format, img_file, img_file))
            process.run(disk_cmd, ignore_status=False, shell=True)
            # Convert the image to remote storage
            disk_cmd = ("rbd -m %s %s info %s 2> /dev/null|| qemu-img convert -O"
                        " %s %s %s" % (mon_host, key_opt, disk_src_name,
                                       disk_format, img_file, disk_path))
            process.run(disk_cmd, ignore_status=False, shell=True)
            # Create disk snapshot if needed.
            if create_snapshot:
                snap_cmd = ("rbd -m %s %s snap create %s@%s" %
                            (mon_host, key_opt, disk_src_name, snap_name))
                process.run(snap_cmd, ignore_status=False, shell=True)
            if test_json_pseudo_protocol:
                # Create one frontend image with the rbd backing file.
                json_str = ('json:{"file.driver":"rbd",'
                            '"file.filename":"rbd:%s:mon_host=%s"}'
                            % (disk_src_name, mon_host))
                # pass different json string according to the auth config
                if auth_user and auth_key:
                    json_str = ('%s:id=%s:key=%s"}' % (json_str[:-2], auth_user, auth_key))
                disk_cmd = ("qemu-img create -f qcow2 -b '%s' %s" %
                            (json_str, front_end_img_file))
                disk_path = front_end_img_file
                process.run(disk_cmd, ignore_status=False, shell=True)
        # If hot plug, start VM first, and then wait the OS boot.
        # Otherwise stop VM if running.
        if start_vm:
            if vm.is_dead():
                vm.start()
            vm.wait_for_login().close()
        else:
            if not vm.is_dead():
                vm.destroy()
        if attach_device:
            if create_volume:
                params.update({"source_pool": pool_name})
                params.update({"type_name": "volume"})
                # No need auth options for volume
                if "auth_user" in params:
                    params.pop("auth_user")
                if "auth_type" in params:
                    params.pop("auth_type")
                if "secret_type" in params:
                    params.pop("secret_type")
                if "secret_uuid" in params:
                    params.pop("secret_uuid")
                if "secret_usage" in params:
                    params.pop("secret_usage")
            xml_file = libvirt.create_disk_xml(params)
            if additional_guest:
                # Copy xml_file for additional guest VM.
                shutil.copyfile(xml_file, additional_xml_file)
            opts = params.get("attach_option", "")
            ret = virsh.attach_device(vm_name, xml_file,
                                      flagstr=opts, debug=True)
            libvirt.check_result(ret, skip_if=unsupported_err)
            if additional_guest:
                # Make sure the additional VM is running
                if additional_vm.is_dead():
                    additional_vm.start()
                    additional_vm.wait_for_login().close()
                ret = virsh.attach_device(guest_name, additional_xml_file,
                                          "", debug=True)
                libvirt.check_result(ret, skip_if=unsupported_err)
        elif attach_disk:
            opts = params.get("attach_option", "")
            ret = virsh.attach_disk(vm_name, disk_path,
                                    targetdev, opts)
            libvirt.check_result(ret, skip_if=unsupported_err)
        elif test_disk_readonly:
            params.update({'readonly': "yes"})
            xml_file = libvirt.create_disk_xml(params)
            opts = params.get("attach_option", "")
            ret = virsh.attach_device(vm_name, xml_file,
                                      flagstr=opts, debug=True)
            libvirt.check_result(ret, skip_if=unsupported_err)
        elif test_disk_internal_snapshot:
            xml_file = libvirt.create_disk_xml(params)
            opts = params.get("attach_option", "")
            ret = virsh.attach_device(vm_name, xml_file,
                                      flagstr=opts, debug=True)
            libvirt.check_result(ret, skip_if=unsupported_err)
        elif disk_snapshot_with_sanlock:
            if vm.is_dead():
                vm.start()
            snapshot_path = make_snapshot()
            if vm.is_alive():
                vm.destroy()
        elif not create_volume:
            libvirt.set_vm_disk(vm, params)
        if test_blockcopy:
            logging.info("Creating %s...", vm_name)
            vmxml_for_test = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
            if vm.is_alive():
                vm.destroy(gracefully=False)
            vm.undefine()
            if virsh.create(vmxml_for_test.xml, **virsh_dargs).exit_status:
                vmxml_backup.define()
                test.fail("Can't create the domain")
        elif vm.is_dead():
            vm.start()
        # Wait for vm is running
        vm.wait_for_login(timeout=600).close()
        if additional_guest:
            if additional_vm.is_dead():
                additional_vm.start()
        # Check qemu command line
        if test_qemu_cmd:
            check_qemu_cmd()
        # Check partitions in vm
        if test_vm_parts:
            if not check_in_vm(vm, targetdev, old_parts,
                               read_only=create_snapshot):
                test.fail("Failed to check vm partitions")
            if additional_guest:
                if not check_in_vm(additional_vm, targetdev, old_parts):
                    test.fail("Failed to check vm partitions")
        # Save and restore operation
        if test_save_restore:
            check_save_restore()
        if test_snapshot:
            snap_option = params.get("snapshot_option", "")
            check_snapshot(snap_option)
        if test_blockcopy:
            check_blockcopy(targetdev)
        if test_disk_readonly:
            snap_option = params.get("snapshot_option", "")
            check_snapshot(snap_option, 'vdb')
        if test_disk_internal_snapshot:
            snap_option = params.get("snapshot_option", "")
            check_snapshot(snap_option, targetdev)
        # Detach the device.
        if attach_device:
            xml_file = libvirt.create_disk_xml(params)
            ret = virsh.detach_device(vm_name, xml_file)
            libvirt.check_exit_status(ret)
            if additional_guest:
                ret = virsh.detach_device(guest_name, xml_file)
                libvirt.check_exit_status(ret)
        elif attach_disk:
            ret = virsh.detach_disk(vm_name, targetdev)
            libvirt.check_exit_status(ret)

        # Check disk in vm after detachment.
        if attach_device or attach_disk:
            session = vm.wait_for_login()
            new_parts = libvirt.get_parts_list(session)
            if len(new_parts) != len(old_parts):
                test.fail("Disk still exists in vm"
                          " after detachment")
            session.close()

    except virt_vm.VMStartError as details:
        for msg in unsupported_err:
            if msg in str(details):
                test.cancel(str(details))
        else:
            test.fail("VM failed to start."
                      "Error: %s" % str(details))
    finally:
        # Remove /etc/ceph/ceph.conf file if exists.
        if os.path.exists('/etc/ceph/ceph.conf'):
            os.remove('/etc/ceph/ceph.conf')
        # Delete snapshots.
        snapshot_lists = virsh.snapshot_list(vm_name)
        if len(snapshot_lists) > 0:
            libvirt.clean_up_snapshots(vm_name, snapshot_lists)
            for snap in snapshot_lists:
                virsh.snapshot_delete(vm_name, snap, "--metadata")

        # Recover VM.
        if vm.is_alive():
            vm.destroy(gracefully=False)
        if additional_guest:
            virsh.remove_domain(guest_name,
                                "--remove-all-storage",
                                ignore_stauts=True)
        # Remove the snapshot.
        if create_snapshot:
            cmd = ("rbd -m {0} {1} info {2} && rbd -m {0} {1} snap"
                   " purge {2} && rbd -m {0} {1} rm {2}"
                   "".format(mon_host, key_opt, disk_src_name))
            process.run(cmd, ignore_status=True, shell=True)
        elif create_volume:
            cmd = ("rbd -m {0} {1} info {2} && rbd -m {0} {1} rm {2}"
                   "".format(mon_host, key_opt, os.path.join(disk_src_pool, cloned_vol_name)))
            process.run(cmd, ignore_status=True, shell=True)
            cmd = ("rbd -m {0} {1} info {2} && rbd -m {0} {1} rm {2}"
                   "".format(mon_host, key_opt, os.path.join(disk_src_pool, create_from_cloned_volume)))
            process.run(cmd, ignore_status=True, shell=True)
            clean_up_volume_snapshots()
        else:
            cmd = ("rbd -m {0} {1} info {2} && rbd -m {0} {1} rm {2}"
                   "".format(mon_host, key_opt, disk_src_name))
            process.run(cmd, ignore_status=True, shell=True)

        # Delete tmp files.
        if os.path.exists(key_file):
            os.remove(key_file)
        if os.path.exists(img_file):
            os.remove(img_file)
        # Clean up volume, pool
        if vol_name and vol_name in str(virsh.vol_list(pool_name).stdout):
            virsh.vol_delete(vol_name, pool_name)
        if pool_name and pool_name in virsh.pool_state_dict():
            virsh.pool_destroy(pool_name, **virsh_dargs)
            virsh.pool_undefine(pool_name, **virsh_dargs)

        # Clean up secret
        secret_list = get_secret_list()
        if secret_list:
            for secret_uuid in secret_list:
                virsh.secret_undefine(secret_uuid)

        logging.info("Restoring vm...")
        vmxml_backup.sync()

        if disk_snapshot_with_sanlock:
            # Restore virt_use_sanlock setting.
            process.run("setsebool -P virt_use_sanlock 0", shell=True)
            # Restore qemu config
            qemu_config.restore()
            utils_libvirtd.Libvirtd().restart()
            # Force shutdown sanlock service.
            process.run("sanlock client shutdown -f 1", shell=True)
            # Clean up lockspace folder
            process.run("rm -rf  /var/lib/libvirt/sanlock/*", shell=True)
            if snapshot_path is not None:
                for snapshot in snapshot_path:
                    if os.path.exists(snapshot):
                        os.remove(snapshot)
Exemplo n.º 16
0
def run(test, params, env):
    """
    Test svirt in adding disk to VM.

    (1).Init variables for test.
    (2).Create a image to attached to VM.
    (3).Attach disk.
    (4).Start VM and check result.
    """
    # Get general variables.
    status_error = ('yes' == params.get("status_error", 'no'))
    host_sestatus = params.get("svirt_attach_disk_host_selinux", "enforcing")
    # Get variables about seclabel for VM.
    sec_type = params.get("svirt_attach_disk_vm_sec_type", "dynamic")
    sec_model = params.get("svirt_attach_disk_vm_sec_model", "selinux")
    sec_label = params.get("svirt_attach_disk_vm_sec_label", None)
    sec_relabel = params.get("svirt_attach_disk_vm_sec_relabel", "yes")
    sec_dict = {'type': sec_type, 'model': sec_model, 'label': sec_label,
                'relabel': sec_relabel}
    disk_seclabel = params.get("disk_seclabel", "no")
    # Get variables about pool vol
    with_pool_vol = 'yes' == params.get("with_pool_vol", "no")
    check_cap_rawio = "yes" == params.get("check_cap_rawio", "no")
    virt_use_nfs = params.get("virt_use_nfs", "off")
    pool_name = params.get("pool_name")
    pool_type = params.get("pool_type")
    pool_target = params.get("pool_target")
    emulated_image = params.get("emulated_image")
    vol_name = params.get("vol_name")
    vol_format = params.get("vol_format", "qcow2")
    device_target = params.get("disk_target")
    device_bus = params.get("disk_target_bus")
    device_type = params.get("device_type", "file")
    # Get variables about VM and get a VM object and VMXML instance.
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    vmxml = VMXML.new_from_inactive_dumpxml(vm_name)
    backup_xml = vmxml.copy()
    # Get varialbles about image.
    img_label = params.get('svirt_attach_disk_disk_label')
    sec_disk_dict = {'model': sec_model, 'label': img_label, 'relabel': sec_relabel}
    enable_namespace = 'yes' == params.get('enable_namespace', 'no')
    img_name = "svirt_disk"
    # Default label for the other disks.
    # To ensure VM is able to access other disks.
    default_label = params.get('svirt_attach_disk_disk_default_label', None)

    # Set selinux of host.
    backup_sestatus = utils_selinux.get_status()
    utils_selinux.set_status(host_sestatus)
    # Set the default label to other disks of vm.
    disks = vm.get_disk_devices()
    for disk in list(disks.values()):
        utils_selinux.set_context_of_file(filename=disk['source'],
                                          context=default_label)

    pvt = None
    qemu_conf = utils_config.LibvirtQemuConfig()
    libvirtd = utils_libvirtd.Libvirtd()
    disk_xml = Disk(type_name=device_type)
    disk_xml.device = "disk"
    try:
        # set qemu conf
        if check_cap_rawio:
            qemu_conf.user = '******'
            qemu_conf.group = 'root'
            logging.debug("the qemu.conf content is: %s" % qemu_conf)
            libvirtd.restart()

        if with_pool_vol:
            # Create dst pool for create attach vol img
            pvt = utlv.PoolVolumeTest(test, params)
            logging.debug("pool_type %s" % pool_type)
            pvt.pre_pool(pool_name, pool_type, pool_target,
                         emulated_image, image_size="1G",
                         pre_disk_vol=["20M"])

            if pool_type in ["iscsi", "disk"]:
                # iscsi and disk pool did not support create volume in libvirt,
                # logical pool could use libvirt to create volume but volume
                # format is not supported and will be 'raw' as default.
                pv = libvirt_storage.PoolVolume(pool_name)
                vols = list(pv.list_volumes().keys())
                vol_format = "raw"
                if vols:
                    vol_name = vols[0]
                else:
                    test.cancel("No volume in pool: %s" % pool_name)
            else:
                vol_arg = {'name': vol_name, 'format': vol_format,
                           'capacity': 1073741824,
                           'allocation': 1048576, }
                # Set volume xml file
                volxml = libvirt_xml.VolXML()
                newvol = volxml.new_vol(**vol_arg)
                vol_xml = newvol['xml']

                # Run virsh_vol_create to create vol
                logging.debug("create volume from xml: %s" % newvol.xmltreefile)
                cmd_result = virsh.vol_create(pool_name, vol_xml,
                                              ignore_status=True,
                                              debug=True)
                if cmd_result.exit_status:
                    test.cancel("Failed to create attach volume.")

            cmd_result = virsh.vol_path(vol_name, pool_name, debug=True)
            if cmd_result.exit_status:
                test.cancel("Failed to get volume path from pool.")
            img_path = cmd_result.stdout.strip()

            if pool_type in ["iscsi", "disk"]:
                source_type = "dev"
                if pool_type == "iscsi":
                    disk_xml.device = "lun"
                    disk_xml.rawio = "yes"
                else:
                    if not enable_namespace:
                        qemu_conf.namespaces = ''
                        logging.debug("the qemu.conf content is: %s" % qemu_conf)
                        libvirtd.restart()
            else:
                source_type = "file"

            # set host_sestatus as nfs pool will reset it
            utils_selinux.set_status(host_sestatus)
            # set virt_use_nfs
            result = process.run("setsebool virt_use_nfs %s" % virt_use_nfs,
                                 shell=True)
            if result.exit_status:
                test.cancel("Failed to set virt_use_nfs value")
        else:
            source_type = "file"
            # Init a QemuImg instance.
            params['image_name'] = img_name
            tmp_dir = data_dir.get_tmp_dir()
            image = qemu_storage.QemuImg(params, tmp_dir, img_name)
            # Create a image.
            img_path, result = image.create(params)
            # Set the context of the image.
            if sec_relabel == "no":
                utils_selinux.set_context_of_file(filename=img_path, context=img_label)

        disk_xml.target = {"dev": device_target, "bus": device_bus}
        disk_xml.driver = {"name": "qemu", "type": vol_format}
        if disk_seclabel == "yes":
            source_seclabel = []
            sec_xml = seclabel.Seclabel()
            sec_xml.update(sec_disk_dict)
            source_seclabel.append(sec_xml)
            disk_source = disk_xml.new_disk_source(**{"attrs": {source_type: img_path},
                                                      "seclabels": source_seclabel})
        else:
            disk_source = disk_xml.new_disk_source(**{"attrs": {source_type: img_path}})
            # Set the context of the VM.
            vmxml.set_seclabel([sec_dict])
            vmxml.sync()

        disk_xml.source = disk_source
        logging.debug(disk_xml)

        # Do the attach action.
        cmd_result = virsh.attach_device(domainarg=vm_name, filearg=disk_xml.xml, flagstr='--persistent')
        libvirt.check_exit_status(cmd_result, expect_error=False)
        logging.debug("the domain xml is: %s" % vmxml.xmltreefile)

        # Start VM to check the VM is able to access the image or not.
        try:
            vm.start()
            # Start VM successfully.
            # VM with set seclabel can access the image with the
            # set context.
            if status_error:
                test.fail('Test succeeded in negative case.')

            if check_cap_rawio:
                cap_list = ['CapPrm', 'CapEff', 'CapBnd']
                cap_dict = {}
                pid = vm.get_pid()
                pid_status_path = "/proc/%s/status" % pid
                with open(pid_status_path) as f:
                    for line in f:
                        val_list = line.split(":")
                        if val_list[0] in cap_list:
                            cap_dict[val_list[0]] = int(val_list[1].strip(), 16)

                # bit and with rawio capabilitiy value to check cap_sys_rawio
                # is set
                cap_rawio_val = 0x0000000000020000
                for i in cap_list:
                    if not cap_rawio_val & cap_dict[i]:
                        err_msg = "vm process with %s: 0x%x" % (i, cap_dict[i])
                        err_msg += " lack cap_sys_rawio capabilities"
                        test.fail(err_msg)
                    else:
                        inf_msg = "vm process with %s: 0x%x" % (i, cap_dict[i])
                        inf_msg += " have cap_sys_rawio capabilities"
                        logging.debug(inf_msg)
            if pool_type == "disk":
                if libvirt_version.version_compare(3, 1, 0) and enable_namespace:
                    vm_pid = vm.get_pid()
                    output = process.system_output(
                        "nsenter -t %d -m -- ls -Z %s" % (vm_pid, img_path))
                else:
                    output = process.system_output('ls -Z %s' % img_path)
                logging.debug("The default label is %s", default_label)
                logging.debug("The label after guest started is %s", to_text(output.strip().split()[-2]))
                if default_label not in to_text(output.strip().split()[-2]):
                    test.fail("The label is wrong after guest started\n")
        except virt_vm.VMStartError as e:
            # Starting VM failed.
            # VM with set seclabel can not access the image with the
            # set context.
            if not status_error:
                test.fail("Test failed in positive case."
                          "error: %s" % e)

        cmd_result = virsh.detach_device(domainarg=vm_name, filearg=disk_xml.xml)
        libvirt.check_exit_status(cmd_result, status_error)
    finally:
        # clean up
        vm.destroy()
        if not with_pool_vol:
            image.remove()
        if pvt:
            try:
                pvt.cleanup_pool(pool_name, pool_type, pool_target,
                                 emulated_image)
            except exceptions.TestFail as detail:
                logging.error(str(detail))
        backup_xml.sync()
        utils_selinux.set_status(backup_sestatus)
        if check_cap_rawio:
            qemu_conf.restore()
            libvirtd.restart()
def run(test, params, env):
    """
    Test for vhba hostdev passthrough.
    1. create a vhba
    2. prepare hostdev xml for lun device of the newly created vhba
    3.1 If hot attach, attach-device the hostdev xml to vm
    3.2 If cold attach, add the hostdev to vm and start it
    4. login the vm and check the attached disk
    5. detach-device the hostdev xml
    6. login the vm to check the partitions
    """

    def check_in_vm(vm, target, old_parts):
        """
        Check mount/read/write disk in VM.

        :param vm: VM guest.
        :param target: Disk dev in VM.
        :return: True if check successfully.
       """
        try:
            session = vm.wait_for_login()
            new_parts = libvirt.get_parts_list(session)
            added_parts = list(set(new_parts).difference(set(old_parts)))
            logging.info("Added parts:%s", added_parts)
            if len(added_parts) != 1:
                logging.error("The number of new partitions is invalid in VM")
                return False

            added_part = None
            if target.startswith("vd"):
                if added_parts[0].startswith("vd"):
                    added_part = added_parts[0]
            elif target.startswith("hd"):
                if added_parts[0].startswith("sd"):
                    added_part = added_parts[0]

            if not added_part:
                logging.error("Can't see added partition in VM")
                return False

            cmd = ("fdisk -l /dev/{0} && mkfs.ext4 -F /dev/{0} && "
                   "mkdir -p test && mount /dev/{0} test && echo"
                   " teststring > test/testfile && umount test"
                   .format(added_part))
            try:
                cmd_status, cmd_output = session.cmd_status_output(cmd)
            except Exception as detail:
                test.error("Error occurred when run cmd: fdisk, %s" % detail)
            logging.info("Check disk operation in VM:\n%s", cmd_output)
            session.close()
            if cmd_status != 0:
                return False
            return True
        except (remote.LoginError, virt_vm.VMError, aexpect.ShellError) as detail:
            logging.error(str(detail))
            return False

    try:
        status_error = "yes" == params.get("status_error", "no")
        vm_name = params.get("main_vm", "avocado-vt-vm1")
        device_target = params.get("hostdev_disk_target", "hdb")
        scsi_wwnn = params.get("scsi_wwnn", "ENTER.YOUR.WWNN")
        scsi_wwpn = params.get("scsi_wwpn", "ENTER.YOUR.WWPN")
        attach_method = params.get('attach_method', 'hot')
        vm = env.get_vm(vm_name)
        vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
        virsh_dargs = {'debug': True, 'ignore_status': True}
        new_vhbas = []

        if scsi_wwnn.count("ENTER.YOUR.WWNN") or \
                scsi_wwpn.count("ENTER.YOUR.WWPN"):
            test.cancel("You didn't provide proper wwpn/wwnn")
        if vm.is_dead():
            vm.start()
        session = vm.wait_for_login()
        old_parts = libvirt.get_parts_list(session)
        # find first online hba
        online_hbas = []
        online_hbas = utils_npiv.find_hbas("hba")
        if not online_hbas:
            test.cancel("NO ONLINE HBAs!")
        first_online_hba = online_hbas[0]
        # create vhba based on the first online hba
        old_vhbas = utils_npiv.find_hbas("vhba")
        logging.debug("Original online vHBAs: %s", old_vhbas)
        new_vhba = utils_npiv.nodedev_create_from_xml(
                {"nodedev_parent": first_online_hba,
                 "scsi_wwnn": scsi_wwnn,
                 "scsi_wwpn": scsi_wwpn})
        if not utils_misc.wait_for(lambda: utils_npiv.is_vhbas_added(old_vhbas),
                                   timeout=_TIMEOUT):
            test.fail("vhba not successfully created")
        new_vhbas.append(new_vhba)
        # find first available lun of the newly created vhba
        lun_dicts = []
        first_lun = {}
        if not utils_misc.wait_for(lambda: utils_npiv.find_scsi_luns(new_vhba),
                                   timeout=_TIMEOUT):
            test.fail("There is no available lun storage for "
                      "wwpn: %s, please check your wwns or "
                      "contact IT admins" % scsi_wwpn)
        lun_dicts = utils_npiv.find_scsi_luns(new_vhba)
        logging.debug("The luns discovered are: %s", lun_dicts)
        first_lun = lun_dicts[0]
        # prepare hostdev xml for the first lun
        kwargs = {'addr_bus': first_lun['bus'],
                  'addr_target': first_lun['target'],
                  'addr_unit': first_lun['unit']}

        new_hostdev_xml = utils_npiv.create_hostdev_xml(
                   adapter_name="scsi_host"+first_lun['scsi'],
                   **kwargs)
        logging.info("New hostdev xml as follow:")
        logging.info(new_hostdev_xml)
        new_hostdev_xml.xmltreefile.write()
        if attach_method == "hot":
            # attach-device the lun's hostdev xml to guest vm
            result = virsh.attach_device(vm_name, new_hostdev_xml.xml)
            libvirt.check_exit_status(result, status_error)
        elif attach_method == "cold":
            if vm.is_alive():
                vm.destroy(gracefully=False)
            vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
            vmxml.devices = vmxml.devices.append(new_hostdev_xml)
            vmxml.sync()
            vm.start()
            session = vm.wait_for_login()
            logging.debug("The new vm's xml is: \n%s", vmxml)

        # login vm and check the disk
        check_result = check_in_vm(vm, device_target, old_parts)
        if not check_result:
            test.fail("check disk in vm failed")
        result = virsh.detach_device(vm_name, new_hostdev_xml.xml)
        libvirt.check_exit_status(result, status_error)
        # login vm and check disk actually removed
        parts_after_detach = libvirt.get_parts_list(session)
        old_parts.sort()
        parts_after_detach.sort()
        if parts_after_detach == old_parts:
            logging.info("hostdev successfully detached.")
        else:
            test.fail("Device not successfully detached. "
                      "Still existing in vm's /proc/partitions")
    finally:
        utils_npiv.vhbas_cleanup(new_vhbas)
        # recover vm
        if vm.is_alive():
            vm.destroy(gracefully=False)
        logging.info("Restoring vm...")
        vmxml_backup.sync()
        process.system('service multipathd restart', verbose=True)
Exemplo n.º 18
0
            check_save_restore()
        if test_snapshot:
            snap_option = params.get("snapshot_option", "")
            check_snapshot(snap_option)
        if test_blockcopy:
            check_blockcopy(targetdev)
        if test_disk_readonly:
            snap_option = params.get("snapshot_option", "")
            check_snapshot(snap_option, 'vdb')
        if test_disk_internal_snapshot:
            snap_option = params.get("snapshot_option", "")
            check_snapshot(snap_option, targetdev)
        # Detach the device.
        if attach_device:
            xml_file = libvirt.create_disk_xml(params)
            ret = virsh.detach_device(vm_name, xml_file)
            libvirt.check_exit_status(ret)
            if additional_guest:
                ret = virsh.detach_device(guest_name, xml_file)
                libvirt.check_exit_status(ret)
        elif attach_disk:
            ret = virsh.detach_disk(vm_name, targetdev)
            libvirt.check_exit_status(ret)

        # Check disk in vm after detachment.
        if attach_device or attach_disk:
            session = vm.wait_for_login()
            new_parts = libvirt.get_parts_list(session)
            if len(new_parts) != len(old_parts):
                test.fail("Disk still exists in vm"
                          " after detachment")
Exemplo n.º 19
0
def run(test, params, env):
    """
    Test usb virtual disk plug/unplug.

    1.Prepare a vm with usb controllers
    2.Prepare a local image
    3.Prepare a virtual disk xml
    4.Attach the virtual disk to the vm
    5.Check the disk in vm
    6.Unplug the disk from vm
    """
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)

    def get_new_disks(old_partitions):
        """
        Get new virtual disks in vm after disk plug.

        :param old_partitions: Already existing partitions in vm.
        :return: New disks/partitions in vm.
        """
        session = None
        try:
            session = vm.wait_for_login()
            if platform.platform().count('ppc64'):
                time.sleep(10)
            new_partitions = libvirt.get_parts_list(session)
            added_partitions = list(set(new_partitions).difference(set(old_partitions)))
            if not added_partitions:
                logging.debug("No new partitions found in vm.")
            else:
                logging.debug("Newly added partition(s) is: %s", added_partitions)
            return added_partitions
        except (remote.LoginError, virt_vm.VMError, aexpect.ShellError) as err:
            test.fail("Error happens when get new disk: %s", str(err))
        finally:
            if session:
                session.close()

    def check_disk_type(partition):
        """
        Check if a disk partition is a usb disk in vm.

        :param partition: The disk partition in vm to be checked.
        :return: If the disk is a usb device, return True.
        """
        session = None
        try:
            session = vm.wait_for_login()
            if platform.platform().count('ppc64'):
                time.sleep(10)
            cmd = "ls -l /dev/disk/by-id/ | grep %s | grep -i usb" % partition
            status = session.cmd_status(cmd)
            return status == 0
        except (remote.LoginError, virt_vm.VMError, aexpect.ShellError) as err:
            logging.debug("Error happens when check if new disk is usb device: %s",
                          str(err))
            return False
        finally:
            if session:
                session.close()

    def check_disk_io(partition):
        """
        Check if the disk partition in vm can be normally used.

        :param partition: The disk partition in vm to be checked.
        :return: If the disk can be used, return True.
        """
        session = None
        try:
            session = vm.wait_for_login()
            cmd = ("fdisk -l /dev/{0} && mkfs.ext4 -F /dev/{0} && "
                   "mkdir -p test && mount /dev/{0} test && echo"
                   " teststring > test/testfile && umount test"
                   .format(partition))
            status, output = session.cmd_status_output(cmd)
            logging.debug("Disk operation in VM:\nexit code:\n%s\noutput:\n%s",
                          status, output)
            return status == 0
        except (remote.LoginError, virt_vm.VMError, aexpect.ShellError) as err:
            logging.debug("Error happens when check disk io in vm: %s", str(err))
            return False
        finally:
            if session:
                session.close()

    def remove_usbs(vmxml):
        """
        Remove all USB devices and controllers from a vm's xml.

        :param vmxml: The vm's xml.
        """
        try:
            for xml in vmxml.xmltreefile.findall('/devices/*'):
                if (xml.get('bus') == 'usb') or (xml.get('type') == 'usb'):
                    vmxml.xmltreefile.remove(xml)
        except (AttributeError, TypeError):
            pass  # Element doesn't exist at first
        vmxml.xmltreefile.write()

    def prepare_usb_controller(vmxml, usb_models):
        """
        Add usb controllers into vm's xml.

        :param vmxml: The vm's xml.
        :param usb_models: The usb models will be used in usb controller(s).
        """
        if not usb_models:
            test.error("No usb model provided.")
        # Add disk usb controller(s)
        for usb_model in usb_models:
            usb_controller = Controller("controller")
            usb_controller.type = "usb"
            usb_controller.index = "0"
            usb_controller.model = usb_model
            vmxml.add_device(usb_controller)
        # Redefine domain
        vmxml.sync()

    def prepare_local_image():
        """
        Prepare a local image.

        :return: The path to the image file.
        """
        image_filename = params.get("image_filename", "raw.img")
        image_format = params.get("image_format", "raw")
        image_size = params.get("image_size", "1G")
        image_path = os.path.join(test.tmpdir, image_filename)
        try:
            if image_format in ["raw", "qcow2"]:
                image_path = libvirt.create_local_disk("file",
                                                       image_path, image_size,
                                                       disk_format=image_format)
            else:
                test.cancel("We only test raw & qcow2 format for now.")
        except Exception as err:
            test.error("Error happens when prepare local image: %s", err)
        return image_path

    def prepare_virt_disk_xml(image_path):
        """
        Prepare the virtual disk xml to be attached/detached.

        :param image_path: The path to the local image.
        :return: The virtual disk xml.
        """
        virt_disk_device = params.get("virt_disk_device", "disk")
        virt_disk_device_type = params.get("virt_disk_device_type", "file")
        virt_disk_device_format = params.get("virt_disk_device_format", "raw")
        virt_disk_device_target = params.get("virt_disk_device_target", "sdb")
        virt_disk_device_bus = params.get("virt_disk_device_bus", "usb")
        disk_xml = Disk(type_name=virt_disk_device_type)
        disk_xml.device = virt_disk_device
        disk_src_dict = {'attrs': {'file': image_path, 'type_name': 'file'}}
        disk_xml.source = disk_xml.new_disk_source(**disk_src_dict)
        driver_dict = {"name": "qemu", "type": virt_disk_device_format}
        disk_xml.driver = driver_dict
        disk_xml.target = {"dev": virt_disk_device_target,
                           "bus": virt_disk_device_bus}
        return disk_xml

    usb_models = params.get("usb_model").split()
    coldplug = "yes" == params.get("coldplug")
    status_error = "yes" == params.get("status_error")
    new_disks = []
    new_disk = ""
    attach_options = ""

    # Get disk partitions info before hot/cold plug virtual disk
    if vm.is_dead():
        vm.start()
    session = vm.wait_for_login()
    old_partitions = libvirt.get_parts_list(session)
    session.close()
    vm.destroy(gracefully=False)

    # Backup vm xml
    vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    vmxml = vmxml_backup.copy()

    try:
        remove_usbs(vmxml)
        prepare_usb_controller(vmxml, usb_models)
        vm.start()
        session = vm.wait_for_login()
        disk_xml = prepare_virt_disk_xml(prepare_local_image())
        session.close()
        if coldplug:
            attach_options = "--config"

        # Attach virtual disk to vm
        result = virsh.attach_device(vm_name, disk_xml.xml,
                                     flagstr=attach_options,
                                     ignore_status=True, debug=True)
        libvirt.check_exit_status(result, status_error)

        # Check the attached disk in vm
        if coldplug:
            vm.destroy(gracefully=False)
            vm.start()
            vm.wait_for_login().close()
        utils_misc.wait_for(lambda: get_new_disks(old_partitions), 20)
        new_disks = get_new_disks(old_partitions)
        if len(new_disks) != 1:
            test.fail("Attached 1 virtual disk but got %s." % len(new_disk))
        new_disk = new_disks[0]
        if not check_disk_type(new_disk):
            test.fail("The newly attached disk is not a usb one.")
        if not check_disk_io(new_disk):
            test.fail("Cannot operate the newly added disk in vm.")

        # Detach the disk from vm
        result = virsh.detach_device(vm_name, disk_xml.xml,
                                     flagstr=attach_options,
                                     ignore_status=True, debug=True)
        libvirt.check_exit_status(result, status_error)

        # Check the detached disk in vm
        if coldplug:
            vm.destroy(gracefully=False)
            vm.start()
            vm.wait_for_login().close()
        utils_misc.wait_for(lambda: not get_new_disks(old_partitions), 20)
        new_disks = get_new_disks(old_partitions)
        if len(new_disks) != 0:
            test.fail("Unplug virtual disk failed.")

    finally:
        if vm.is_alive():
            vm.destroy(gracefully=False)
        # Restoring vm
        vmxml_backup.sync()
Exemplo n.º 20
0
def run(test, params, env):
    """
    Test virtiofs filesystem device:

    1.Start guest with 1/2 virtiofs filesystem devices.
    2.Start 2 guest with the same virtiofs filesystem device.
    3.Coldplug/Coldunplug virtiofs filesystem device
    4.Share data between guests and host.
    5.Lifecycle for guest with virtiofs filesystem device.
    """
    def generate_expected_process_option(expected_results):
        """
        Generate expected virtiofsd process option
        """
        if cache_mode != "auto":
            expected_results = "cache=%s" % cache_mode
        if xattr == "on":
            expected_results += ",xattr"
        elif xattr == "off":
            expected_results += ",no_xattr"
        if flock == "on":
            expected_results += ",flock"
        else:
            expected_results += ",no_flock"
        if lock_posix == "on":
            expected_results += ",posix_lock"
        else:
            expected_results += ",no_posix_lock"
        logging.debug(expected_results)
        return expected_results

    def shared_data(vm_names, fs_devs):
        """
        Shared data between guests and host:
        1.Mount dir in guest;
        2.Write a file in guest;
        3.Check the md5sum value are the same in guests and host;
        """
        md5s = []
        for vm in vms:
            session = vm.wait_for_login()
            for fs_dev in fs_devs:
                logging.debug(fs_dev)
                mount_dir = '/var/tmp/' + fs_dev.target['dir']
                session.cmd('rm -rf %s' % mount_dir, ignore_all_errors=False)
                session.cmd('mkdir -p %s' % mount_dir)
                logging.debug("mount virtiofs dir in guest")
                cmd = "mount -t virtiofs %s %s" % (fs_dev.target['dir'],
                                                   mount_dir)
                status, output = session.cmd_status_output(cmd, timeout=300)
                if status != 0:
                    session.close()
                    test.fail("mount virtiofs dir failed: %s" % output)
                if vm == vms[0]:
                    filename_guest = mount_dir + '/' + vm.name
                    cmd = "dd if=/dev/urandom of=%s bs=1M count=512 oflag=direct" % filename_guest
                    status, output = session.cmd_status_output(cmd,
                                                               timeout=300)
                    if status != 0:
                        session.close()
                        test.fail("Write data failed: %s" % output)
                md5_value = session.cmd_status_output(
                    "md5sum %s" % filename_guest)[1].strip().split()[0]
                md5s.append(md5_value)
                logging.debug(md5_value)
                md5_value = process.run(
                    "md5sum %s" %
                    filename_guest).stdout_text.strip().split()[0]
                logging.debug(md5_value)
                md5s.append(md5_value)
            session.close()
        if len(set(md5s)) != len(fs_devs):
            test.fail("The md5sum value are not the same in guests and host")

    def launch_externally_virtiofs(source_dir, source_socket):
        """
        Launch externally virtiofs

        :param source_dir:  the dir shared on host
        :param source_socket: the socket file listened on
        """
        process.run('chcon -t virtd_exec_t %s' % path,
                    ignore_status=False,
                    shell=True)
        cmd = "systemd-run %s --socket-path=%s -o source=%s" % (
            path, source_socket, source_dir)
        try:
            process.run(cmd, ignore_status=False, shell=True)
            # Make sure the socket is created
            utils_misc.wait_for(lambda: os.path.isdir(source_socket),
                                timeout=3)
            process.run("chown qemu:qemu %s" % source_socket,
                        ignore_status=False)
            process.run('chcon -t svirt_image_t %s' % source_socket,
                        ignore_status=False,
                        shell=True)
        except Exception as err:
            cmd = "pkill virtiofsd"
            process.run(cmd, shell=True)
            test.fail("{}".format(err))

    def prepare_stress_script(script_path, script_content):
        """
        Refer to xfstest generic/531. Create stress test script to create a lot of unlinked files.

        :param source_path: The path of script
        :param content: The content of stress script
        """
        logging.debug("stress script path: %s content: %s" %
                      (script_path, script_content))
        script_lines = script_content.split(';')
        try:
            with open(script_path, 'w') as fd:
                fd.write('\n'.join(script_lines))
            os.chmod(script_path, 0o777)
        except Exception as e:
            test.error("Prepare the guest stress script failed %s" % e)

    def run_stress_script(session, script_path):
        """
        Run stress script in the guest

        :param session: guest session
        :param script_path: The path of script in the guest
        """
        # Set ULIMIT_NOFILE to increase the number of unlinked files
        session.cmd("ulimit -n 500000 && /usr/bin/python3 %s" % script_path,
                    timeout=120)

    def umount_fs(vm):
        """
        Unmount the filesystem in guest

        :param vm: filesystem in this vm that should be unmounted
        """
        if vm.is_alive():
            session = vm.wait_for_login()
            for fs_dev in fs_devs:
                mount_dir = '/var/tmp/' + fs_dev.target['dir']
                session.cmd('umount -f %s' % mount_dir, ignore_all_errors=True)
                session.cmd('rm -rf %s' % mount_dir, ignore_all_errors=True)
            session.close()

    def check_detached_xml(vm):
        """
        Check whether there is xml about the filesystem device
        in the vm xml

        :param vm: the vm to be checked
        """
        vmxml = vm_xml.VMXML.new_from_dumpxml(vm.name)
        filesystems = vmxml.devices.by_device_tag('filesystem')
        if filesystems:
            test.fail("There should be no filesystem devices in guest "
                      "xml after hotunplug")

    def check_filesystem_in_guest(vm, fs_dev):
        """
        Check whether there is virtiofs in vm

        :param vm: the vm to be checked
        :param fs_dev: the virtiofs device to be checked
        """
        session = vm.wait_for_login()
        mount_dir = '/var/tmp/' + fs_dev.target['dir']
        cmd = "mkdir %s; mount -t virtiofs %s %s" % (
            mount_dir, fs_dev.target['dir'], mount_dir)
        status, output = session.cmd_status_output(cmd, timeout=300)
        session.cmd('rm -rf %s' % mount_dir, ignore_all_errors=True)
        if not status:
            test.fail(
                "Mount virtiofs should failed after hotunplug device. %s" %
                output)
        session.close()

    start_vm = params.get("start_vm", "no")
    vm_names = params.get("vms", "avocado-vt-vm1").split()
    cache_mode = params.get("cache_mode", "none")
    xattr = params.get("xattr", "on")
    lock_posix = params.get("lock_posix", "on")
    flock = params.get("flock", "on")
    xattr = params.get("xattr", "on")
    path = params.get("virtiofsd_path", "/usr/libexec/virtiofsd")
    queue_size = int(params.get("queue_size", "512"))
    driver_type = params.get("driver_type", "virtiofs")
    guest_num = int(params.get("guest_num", "1"))
    fs_num = int(params.get("fs_num", "1"))
    vcpus_per_cell = int(params.get("vcpus_per_cell", 2))
    dir_prefix = params.get("dir_prefix", "mount_tag")
    error_msg_start = params.get("error_msg_start", "")
    error_msg_save = params.get("error_msg_save", "")
    status_error = params.get("status_error", "no") == "yes"
    socket_file_checking = params.get("socket_file_checking", "no") == "yes"
    suspend_resume = params.get("suspend_resume", "no") == "yes"
    managedsave = params.get("managedsave", "no") == "yes"
    coldplug = params.get("coldplug", "no") == "yes"
    hotplug_unplug = params.get("hotplug_unplug", "no") == "yes"
    detach_device_alias = params.get("detach_device_alias", "no") == "yes"
    extra_hugepages = params.get_numeric("extra_hugepages")
    edit_start = params.get("edit_start", "no") == "yes"
    with_hugepages = params.get("with_hugepages", "yes") == "yes"
    with_numa = params.get("with_numa", "yes") == "yes"
    with_memfd = params.get("with_memfd", "no") == "yes"
    source_socket = params.get("source_socket", "/var/tmp/vm001.socket")
    launched_mode = params.get("launched_mode", "auto")
    destroy_start = params.get("destroy_start", "no") == "yes"
    bug_url = params.get("bug_url", "")
    script_content = params.get("stress_script", "")
    stdio_handler_file = "file" == params.get("stdio_handler")

    fs_devs = []
    vms = []
    vmxml_backups = []
    expected_fails_msg = []
    expected_results = ""
    host_hp_size = utils_memory.get_huge_page_size()
    backup_huge_pages_num = utils_memory.get_num_huge_pages()
    huge_pages_num = 0

    if len(vm_names) != guest_num:
        test.cancel("This test needs exactly %d vms." % guest_num)

    if not libvirt_version.version_compare(7, 0, 0) and not with_numa:
        test.cancel("Not supported without NUMA before 7.0.0")

    if not libvirt_version.version_compare(7, 6, 0) and destroy_start:
        test.cancel("Bug %s is not fixed on current build" % bug_url)

    try:
        # Define filesystem device xml
        for index in range(fs_num):
            driver = {'type': driver_type, 'queue': queue_size}
            source_dir = os.path.join('/var/tmp/',
                                      str(dir_prefix) + str(index))
            logging.debug(source_dir)
            not os.path.isdir(source_dir) and os.mkdir(source_dir)
            target_dir = dir_prefix + str(index)
            source = {'socket': source_socket}
            target = {'dir': target_dir}
            if launched_mode == "auto":
                binary_keys = [
                    'path', 'cache_mode', 'xattr', 'lock_posix', 'flock'
                ]
                binary_values = [path, cache_mode, xattr, lock_posix, flock]
                binary_dict = dict(zip(binary_keys, binary_values))
                source = {'dir': source_dir}
                accessmode = "passthrough"
                fsdev_keys = [
                    'accessmode', 'driver', 'source', 'target', 'binary'
                ]
                fsdev_values = [
                    accessmode, driver, source, target, binary_dict
                ]
            else:
                fsdev_keys = ['driver', 'source', 'target']
                fsdev_values = [driver, source, target]
            fsdev_dict = dict(zip(fsdev_keys, fsdev_values))
            logging.debug(fsdev_dict)
            fs_dev = libvirt_device_utils.create_fs_xml(
                fsdev_dict, launched_mode)
            logging.debug(fs_dev)
            fs_devs.append(fs_dev)

        #Start guest with virtiofs filesystem device
        for index in range(guest_num):
            logging.debug("prepare vm %s", vm_names[index])
            vm = env.get_vm(vm_names[index])
            vms.append(vm)
            vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_names[index])
            vmxml_backup = vmxml.copy()
            vmxml_backups.append(vmxml_backup)
            if vmxml.max_mem < 1024000:
                vmxml.max_mem = 1024000
            if with_hugepages:
                huge_pages_num += vmxml.max_mem // host_hp_size + extra_hugepages
                utils_memory.set_num_huge_pages(huge_pages_num)
            vmxml.remove_all_device_by_type('filesystem')
            vmxml.sync()
            numa_no = None
            if with_numa:
                numa_no = vmxml.vcpu // vcpus_per_cell if vmxml.vcpu != 1 else 1
            vm_xml.VMXML.set_vm_vcpus(vmxml.vm_name,
                                      vmxml.vcpu,
                                      numa_number=numa_no)
            vm_xml.VMXML.set_memoryBacking_tag(vmxml.vm_name,
                                               access_mode="shared",
                                               hpgs=with_hugepages,
                                               memfd=with_memfd)
            vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_names[index])
            logging.debug(vmxml)
            if launched_mode == "externally":
                launch_externally_virtiofs(source_dir, source_socket)
            if coldplug:
                ret = virsh.attach_device(vm_names[index],
                                          fs_devs[0].xml,
                                          flagstr='--config',
                                          debug=True)
                utils_test.libvirt.check_exit_status(ret, expect_error=False)
            else:
                if not hotplug_unplug:
                    for fs in fs_devs:
                        vmxml.add_device(fs)
                        vmxml.sync()
            logging.debug(vmxml)
            libvirt_pcicontr.reset_pci_num(vm_names[index])
            result = virsh.start(vm_names[index], debug=True)
            if hotplug_unplug:
                if stdio_handler_file:
                    qemu_config = LibvirtQemuConfig()
                    qemu_config.stdio_handler = "file"
                    utils_libvirtd.Libvirtd().restart()
                for fs_dev in fs_devs:
                    ret = virsh.attach_device(vm_names[index],
                                              fs_dev.xml,
                                              ignore_status=True,
                                              debug=True)
                    libvirt.check_exit_status(ret, status_error)

                if status_error:
                    return
            if status_error and not managedsave:
                expected_error = error_msg_start
                utils_test.libvirt.check_exit_status(result, expected_error)
                return
            else:
                utils_test.libvirt.check_exit_status(result,
                                                     expect_error=False)
            expected_results = generate_expected_process_option(
                expected_results)
            if launched_mode == "auto":
                cmd = 'ps aux | grep virtiofsd | head -n 1'
                utils_test.libvirt.check_cmd_output(cmd,
                                                    content=expected_results)

        if managedsave:
            expected_error = error_msg_save
            result = virsh.managedsave(vm_names[0],
                                       ignore_status=True,
                                       debug=True)
            utils_test.libvirt.check_exit_status(result, expected_error)
        else:
            shared_data(vm_names, fs_devs)
            if suspend_resume:
                virsh.suspend(vm_names[0], debug=True, ignore_status=False)
                time.sleep(30)
                virsh.resume(vm_names[0], debug=True, ignore_statue=False)
            elif destroy_start:
                session = vm.wait_for_login(timeout=120)
                # Prepare the guest test script
                script_path = os.path.join(fs_devs[0].source["dir"], "test.py")
                script_content %= (fs_devs[0].source["dir"],
                                   fs_devs[0].source["dir"])
                prepare_stress_script(script_path, script_content)
                # Run guest stress script
                stress_script_thread = threading.Thread(
                    target=run_stress_script, args=(session, script_path))
                stress_script_thread.setDaemon(True)
                stress_script_thread.start()
                # Create a lot of unlink files
                time.sleep(60)
                virsh.destroy(vm_names[0], debug=True, ignore_status=False)
                ret = virsh.start(vm_names[0], debug=True)
                libvirt.check_exit_status(ret)
            elif edit_start:
                vmxml_virtio_backup = vm_xml.VMXML.new_from_inactive_dumpxml(
                    vm_names[0])
                if vm.is_alive():
                    virsh.destroy(vm_names[0])
                    cmd = "virt-xml %s --edit --qemu-commandline '\-foo'" % vm_names[
                        0]
                    cmd_result = process.run(cmd,
                                             ignore_status=True,
                                             shell=True)
                    logging.debug(virsh.dumpxml(vm_names[0]))
                    if cmd_result.exit_status:
                        test.error("virt-xml edit guest failed: %s" %
                                   cmd_result)
                    result = virsh.start(vm_names[0],
                                         ignore_status=True,
                                         debug=True)
                    if error_msg_start:
                        expected_fails_msg.append(error_msg_start)
                    utils_test.libvirt.check_result(
                        result, expected_fails=expected_fails_msg)
                    if not libvirt_version.version_compare(6, 10, 0):
                        # Because of bug #1897105, it was fixed in libvirt-6.10.0,
                        # before this version, need to recover the env manually.
                        cmd = "pkill virtiofsd"
                        process.run(cmd, shell=True)
                    if not vm.is_alive():
                        # Restoring vm and check if vm can start successfully
                        vmxml_virtio_backup.sync()
                        virsh.start(vm_names[0],
                                    ignore_status=False,
                                    shell=True)
            elif socket_file_checking:
                result = virsh.domid(vm_names[0])
                domid = result.stdout.strip()
                domain_dir = "var/lib/libvirt/qemu/domain-" + domid + '-' + vm_names[
                    0]
                if result.exit_status:
                    test.fail("Get domid failed.")
                    for fs_dev in fs_devs:
                        alias = fs_dev.alias['name']
                        expected_pid = domain_dir + alias + '-fs.pid'
                        expected_sock = alias + '-fs.sock'
                        status1 = process.run('ls -l %s' % expected_pid,
                                              shell=True).exit_status
                        status2 = process.run('ls -l %s' % expected_sock,
                                              shell=True).exit_status
                        if not (status1 and status2):
                            test.fail(
                                "The socket and pid file is not as expected")
            elif hotplug_unplug:
                for vm in vms:
                    umount_fs(vm)
                    for fs_dev in fs_devs:
                        if detach_device_alias:
                            alias = fs_dev.alias['name']
                            cmd = 'lsof /var/log/libvirt/qemu/%s-%s-virtiofsd.log' % (
                                vm.name, alias)
                            output = process.run(cmd).stdout_text.splitlines()
                            for item in output[1:]:
                                if stdio_handler_file:
                                    if item.split()[0] != "virtiofsd":
                                        test.fail(
                                            "When setting stdio_handler as file, the command"
                                            "to write log should be virtiofsd!"
                                        )
                                else:
                                    if item.split()[0] != "virtlogd":
                                        test.fail(
                                            "When setting stdio_handler as logd, the command"
                                            "to write log should be virtlogd!")
                            ret = virsh.detach_device_alias(
                                vm.name,
                                alias,
                                ignore_status=True,
                                debug=True,
                                wait_for_event=True,
                                event_timeout=10)
                        else:
                            ret = virsh.detach_device(vm.name,
                                                      fs_dev.xml,
                                                      ignore_status=True,
                                                      debug=True,
                                                      wait_for_event=True)
                        libvirt.check_exit_status(ret, status_error)
                        check_filesystem_in_guest(vm, fs_dev)
                    check_detached_xml(vm)
    finally:
        for vm in vms:
            if vm.is_alive():
                umount_fs(vm)
                vm.destroy(gracefully=False)
        for vmxml_backup in vmxml_backups:
            vmxml_backup.sync()
        for index in range(fs_num):
            process.run('rm -rf %s' % '/var/tmp/' + str(dir_prefix) +
                        str(index),
                        ignore_status=False)
            process.run('rm -rf %s' % source_socket,
                        ignore_status=False,
                        shell=True)
        if launched_mode == "externally":
            process.run('restorecon %s' % path,
                        ignore_status=False,
                        shell=True)
        utils_memory.set_num_huge_pages(backup_huge_pages_num)
        if stdio_handler_file:
            qemu_config.restore()
            utils_libvirtd.Libvirtd().restart()
def run(test, params, env):
    """
    Test for hotplug usb device.
    """
    # get the params from params
    vm_name = params.get("main_vm", "avocado-vt-vm1")
    vm = env.get_vm(vm_name)

    usb_type = params.get("usb_type", "kbd")
    attach_type = params.get("attach_type", "attach_device")
    attach_count = int(params.get("attach_count", "1"))
    if usb_type == "storage":
        model = params.get("model", "nec-xhci")
        index = params.get("index", "1")
    status_error = ("yes" == params.get("status_error", "no"))

    vm_xml = VMXML.new_from_inactive_dumpxml(vm_name)
    vm_xml_backup = vm_xml.copy()

    # Set selinux of host.
    backup_sestatus = utils_selinux.get_status()
    utils_selinux.set_status("permissive")

    if usb_type == "storage":
        controllers = vm_xml.get_devices(device_type="controller")
        devices = vm_xml.get_devices()
        for dev in controllers:
            if dev.type == "usb" and dev.index == "1":
                devices.remove(dev)
        controller = Controller("controller")
        controller.type = "usb"
        controller.index = index
        controller.model = model
        devices.append(controller)
        vm_xml.set_devices(devices)

    try:
        session = vm.wait_for_login()
    except (LoginError, VMError, ShellError) as e:
        test.fail("Test failed: %s" % str(e))

    def is_hotplug_ok():
        try:
            output = session.cmd_output("fdisk -l | grep -c '^Disk /dev/.* 1 M'")
            if int(output.strip()) != attach_count:
                return False
            else:
                return True
        except ShellTimeoutError as detail:
            test.fail("unhotplug failed: %s, " % detail)

    tmp_dir = os.path.join(data_dir.get_tmp_dir(), "usb_hotplug_files")
    if not os.path.isdir(tmp_dir):
        os.mkdir(tmp_dir)

    try:
        result = None
        dev_xml = None
        opt = "--hmp"
        for i in range(attach_count):
            if usb_type == "storage":
                path = os.path.join(tmp_dir, "%s.img" % i)
                libvirt.create_local_disk("file", path, size="1M", disk_format="qcow2")
                os.chmod(path, 0o666)

            if attach_type == "qemu_monitor":
                if usb_type == "storage":
                    attach_cmd = "drive_add"
                    attach_cmd += (" 0 id=drive-usb-%s,if=none,file=%s" % (i, path))

                    result = virsh.qemu_monitor_command(vm_name, attach_cmd, options=opt)
                    if result.exit_status or (result.stdout.strip().find("OK") == -1):
                        raise process.CmdError(result.command, result)

                    attach_cmd = "device_add usb-storage,"
                    attach_cmd += ("id=drive-usb-%s,bus=usb1.0,drive=drive-usb-%s" % (i, i))
                else:
                    attach_cmd = "device_add"
                    attach_cmd += " usb-%s,bus=usb1.0,id=%s%s" % (usb_type, usb_type, i)

                result = virsh.qemu_monitor_command(vm_name, attach_cmd, options=opt)
                if result.exit_status:
                    raise process.CmdError(result.command, result)
            else:
                attributes = {'type_name': "usb", 'bus': "1", 'port': "0"}
                if usb_type == "storage":
                    dev_xml = Disk(type_name="file")
                    dev_xml.device = "disk"
                    dev_xml.source = dev_xml.new_disk_source(**{"attrs": {'file': path}})
                    dev_xml.driver = {"name": "qemu", "type": 'qcow2', "cache": "none"}
                    dev_xml.target = {"dev": 'sdb', "bus": "usb"}
                    dev_xml.address = dev_xml.new_disk_address(**{"attrs": attributes})
                else:
                    if usb_type == "mouse":
                        dev_xml = Input("mouse")
                    elif usb_type == "tablet":
                        dev_xml = Input("tablet")
                    else:
                        dev_xml = Input("keyboard")

                    dev_xml.input_bus = "usb"
                    dev_xml.address = dev_xml.new_input_address(**{"attrs": attributes})

                result = virsh.attach_device(vm_name, dev_xml.xml)
                if result.exit_status:
                    raise process.CmdError(result.command, result)

        if status_error and usb_type == "storage":
            if utils_misc.wait_for(is_hotplug_ok, timeout=30):
                # Sometimes we meet an error but the ret in $? is 0.
                test.fail("\nAttach device successfully in negative case."
                          "\nExcept it fail when attach count exceed maximum."
                          "\nDetail: %s" % result)

        for i in range(attach_count):
            attach_cmd = "device_del"
            if attach_type == "qemu_monitor":
                if usb_type == "storage":
                    attach_cmd += (" drive-usb-%s" % i)
                else:
                    if usb_type == "mouse":
                        attach_cmd += " mouse"
                    elif usb_type == "tablet":
                        attach_cmd += " tablet"
                    else:
                        attach_cmd += " keyboard"

                result = virsh.qemu_monitor_command(vm_name, attach_cmd, options=opt)
                if result.exit_status:
                    raise process.CmdError(result.command, result)
            else:
                result = virsh.detach_device(vm_name, dev_xml.xml)
                if result.exit_status:
                    raise process.CmdError(result.command, result)
    except process.CmdError as e:
        if not status_error:
            # live attach of device 'input' is not supported
            ret = result.stderr.find("Operation not supported")
            if usb_type != "storage" and ret > -1:
                pass
            else:
                test.fail("failed to attach device.\nDetail: %s." % result)
    finally:
        session.close()
        if os.path.isdir(tmp_dir):
            shutil.rmtree(tmp_dir)
        utils_selinux.set_status(backup_sestatus)
        vm_xml_backup.sync()
Exemplo n.º 22
0
def run(test, params, env):
    """
    Test rbd disk device.

    1.Prepare test environment,destroy or suspend a VM.
    2.Prepare disk image.
    3.Edit disks xml and start the domain.
    4.Perform test operation.
    5.Recover test environment.
    """
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    virsh_dargs = {'debug': True, 'ignore_status': True}

    def config_ceph():
        """
        Write the configs to the file.
        """
        src_host = disk_src_host.split()
        src_port = disk_src_port.split()
        conf_str = "mon_host = "
        hosts = []
        for host, port in zip(src_host, src_port):
            hosts.append("%s:%s" % (host, port))
        with open(disk_src_config, 'w') as f:
            f.write(conf_str + ','.join(hosts) + '\n')

    def create_pool():
        """
        Define and start a pool.
        """
        sp = libvirt_storage.StoragePool()
        if create_by_xml:
            p_xml = pool_xml.PoolXML(pool_type=pool_type)
            p_xml.name = pool_name
            s_xml = pool_xml.SourceXML()
            s_xml.vg_name = disk_src_pool
            source_host = []
            for (host_name, host_port) in zip(
                    disk_src_host.split(), disk_src_port.split()):
                source_host.append({'name': host_name,
                                    'port': host_port})

            s_xml.hosts = source_host
            if auth_type:
                s_xml.auth_type = auth_type
            if auth_user:
                s_xml.auth_username = auth_user
            if auth_usage:
                s_xml.secret_usage = auth_usage
            p_xml.source = s_xml
            logging.debug("Pool xml: %s", p_xml)
            p_xml.xmltreefile.write()
            ret = virsh.pool_define(p_xml.xml, **virsh_dargs)
            libvirt.check_exit_status(ret)
            ret = virsh.pool_build(pool_name, **virsh_dargs)
            libvirt.check_exit_status(ret)
            ret = virsh.pool_start(pool_name, **virsh_dargs)
            libvirt.check_exit_status(ret)
        else:
            auth_opt = ""
            if client_name and client_key:
                auth_opt = ("--auth-type %s --auth-username %s --secret-usage '%s'"
                            % (auth_type, auth_user, auth_usage))
            if not sp.define_rbd_pool(pool_name, mon_host,
                                      disk_src_pool, extra=auth_opt):
                test.fail("Failed to define storage pool")
            if not sp.build_pool(pool_name):
                test.fail("Failed to build storage pool")
            if not sp.start_pool(pool_name):
                test.fail("Failed to start storage pool")

        # Check pool operation
        ret = virsh.pool_refresh(pool_name, **virsh_dargs)
        libvirt.check_exit_status(ret)
        ret = virsh.pool_uuid(pool_name, **virsh_dargs)
        libvirt.check_exit_status(ret)
        # pool-info
        pool_info = sp.pool_info(pool_name)
        if pool_info["Autostart"] != 'no':
            test.fail("Failed to check pool information")
        # pool-autostart
        if not sp.set_pool_autostart(pool_name):
            test.fail("Failed to set pool autostart")
        pool_info = sp.pool_info(pool_name)
        if pool_info["Autostart"] != 'yes':
            test.fail("Failed to check pool information")
        # pool-autostart --disable
        if not sp.set_pool_autostart(pool_name, "--disable"):
            test.fail("Failed to set pool autostart")
        # If port is not pre-configured, port value should not be hardcoded in pool information.
        if "yes" == params.get("rbd_port", "no"):
            if 'port' in virsh.pool_dumpxml(pool_name):
                test.fail("port attribute should not be in pool information")
        # find-storage-pool-sources-as
        if "yes" == params.get("find_storage_pool_sources_as", "no"):
            ret = virsh.find_storage_pool_sources_as("rbd", mon_host)
            libvirt.check_result(ret, skip_if=unsupported_err)

    def create_vol(vol_params):
        """
        Create volume.

        :param p_name. Pool name.
        :param vol_params. Volume parameters dict.
        :return: True if create successfully.
        """
        pvt = libvirt.PoolVolumeTest(test, params)
        if create_by_xml:
            pvt.pre_vol_by_xml(pool_name, **vol_params)
        else:
            pvt.pre_vol(vol_name, None, '2G', None, pool_name)

    def check_vol(vol_params):
        """
        Check volume information.
        """
        pv = libvirt_storage.PoolVolume(pool_name)
        # Supported operation
        if vol_name not in pv.list_volumes():
            test.fail("Volume %s doesn't exist" % vol_name)
        ret = virsh.vol_dumpxml(vol_name, pool_name)
        libvirt.check_exit_status(ret)
        # vol-info
        if not pv.volume_info(vol_name):
            test.fail("Can't see volume info")
        # vol-key
        ret = virsh.vol_key(vol_name, pool_name)
        libvirt.check_exit_status(ret)
        if "%s/%s" % (disk_src_pool, vol_name) not in ret.stdout:
            test.fail("Volume key isn't correct")
        # vol-path
        ret = virsh.vol_path(vol_name, pool_name)
        libvirt.check_exit_status(ret)
        if "%s/%s" % (disk_src_pool, vol_name) not in ret.stdout:
            test.fail("Volume path isn't correct")
        # vol-pool
        ret = virsh.vol_pool("%s/%s" % (disk_src_pool, vol_name))
        libvirt.check_exit_status(ret)
        if pool_name not in ret.stdout:
            test.fail("Volume pool isn't correct")
        # vol-name
        ret = virsh.vol_name("%s/%s" % (disk_src_pool, vol_name))
        libvirt.check_exit_status(ret)
        if vol_name not in ret.stdout:
            test.fail("Volume name isn't correct")
        # vol-resize
        ret = virsh.vol_resize(vol_name, "2G", pool_name)
        libvirt.check_exit_status(ret)

        # Not supported operation
        # vol-clone
        ret = virsh.vol_clone(vol_name, cloned_vol_name, pool_name)
        libvirt.check_result(ret, skip_if=unsupported_err)
        # vol-create-from
        volxml = vol_xml.VolXML()
        vol_params.update({"name": "%s" % create_from_cloned_volume})
        v_xml = volxml.new_vol(**vol_params)
        v_xml.xmltreefile.write()
        ret = virsh.vol_create_from(pool_name, v_xml.xml, vol_name, pool_name)
        libvirt.check_result(ret, skip_if=unsupported_err)

        # vol-wipe
        ret = virsh.vol_wipe(vol_name, pool_name)
        libvirt.check_result(ret, skip_if=unsupported_err)
        # vol-upload
        ret = virsh.vol_upload(vol_name, vm.get_first_disk_devices()['source'],
                               "--pool %s" % pool_name)
        libvirt.check_result(ret, skip_if=unsupported_err)
        # vol-download
        ret = virsh.vol_download(vol_name, cloned_vol_name, "--pool %s" % pool_name)
        libvirt.check_result(ret, skip_if=unsupported_err)

    def check_qemu_cmd():
        """
        Check qemu command line options.
        """
        cmd = ("ps -ef | grep %s | grep -v grep " % vm_name)
        process.run(cmd, shell=True)
        if disk_src_name:
            cmd += " | grep file=rbd:%s:" % disk_src_name
            if auth_user and auth_key:
                cmd += ('id=%s:auth_supported=cephx' % auth_user)
        if disk_src_config:
            cmd += " | grep 'conf=%s'" % disk_src_config
        elif mon_host:
            hosts = '\:6789\;'.join(mon_host.split())
            cmd += " | grep 'mon_host=%s'" % hosts
        if driver_iothread:
            cmd += " | grep iothread%s" % driver_iothread
        # Run the command
        process.run(cmd, shell=True)

    def check_save_restore():
        """
        Test save and restore operation
        """
        save_file = os.path.join(test.tmpdir,
                                 "%s.save" % vm_name)
        ret = virsh.save(vm_name, save_file, **virsh_dargs)
        libvirt.check_exit_status(ret)
        ret = virsh.restore(save_file, **virsh_dargs)
        libvirt.check_exit_status(ret)
        if os.path.exists(save_file):
            os.remove(save_file)
        # Login to check vm status
        vm.wait_for_login().close()

    def check_snapshot(snap_option, target_dev='vda'):
        """
        Test snapshot operation.
        """
        snap_name = "s1"
        snap_mem = os.path.join(test.tmpdir, "rbd.mem")
        snap_disk = os.path.join(test.tmpdir, "rbd.disk")
        xml_snap_exp = ["disk name='%s' snapshot='external' type='file'" % target_dev]
        xml_dom_exp = ["source file='%s'" % snap_disk,
                       "backingStore type='network' index='1'",
                       "source protocol='rbd' name='%s'" % disk_src_name]
        if snap_option.count("disk-only"):
            options = ("%s --diskspec %s,file=%s --disk-only" %
                       (snap_name, target_dev, snap_disk))
        elif snap_option.count("disk-mem"):
            options = ("%s --memspec file=%s --diskspec %s,file="
                       "%s" % (snap_name, snap_mem, target_dev, snap_disk))
            xml_snap_exp.append("memory snapshot='external' file='%s'"
                                % snap_mem)
        else:
            options = snap_name

        ret = virsh.snapshot_create_as(vm_name, options)
        if test_disk_internal_snapshot or test_disk_readonly:
            libvirt.check_result(ret, expected_fails=unsupported_err)
        else:
            libvirt.check_result(ret, skip_if=unsupported_err)

        # check xml file.
        if not ret.exit_status:
            snap_xml = virsh.snapshot_dumpxml(vm_name, snap_name,
                                              debug=True).stdout.strip()
            dom_xml = virsh.dumpxml(vm_name, debug=True).stdout.strip()
            # Delete snapshots.
            libvirt.clean_up_snapshots(vm_name)
            if os.path.exists(snap_mem):
                os.remove(snap_mem)
            if os.path.exists(snap_disk):
                os.remove(snap_disk)

            if not all([x in snap_xml for x in xml_snap_exp]):
                test.fail("Failed to check snapshot xml")
            if not all([x in dom_xml for x in xml_dom_exp]):
                test.fail("Failed to check domain xml")

    def check_blockcopy(target):
        """
        Block copy operation test.
        """
        blk_file = os.path.join(test.tmpdir, "blk.rbd")
        if os.path.exists(blk_file):
            os.remove(blk_file)
        blk_mirror = ("mirror type='file' file='%s' "
                      "format='raw' job='copy'" % blk_file)

        # Do blockcopy
        ret = virsh.blockcopy(vm_name, target, blk_file)
        libvirt.check_result(ret, skip_if=unsupported_err)

        dom_xml = virsh.dumpxml(vm_name, debug=True).stdout.strip()
        if not dom_xml.count(blk_mirror):
            test.fail("Can't see block job in domain xml")

        # Abort
        ret = virsh.blockjob(vm_name, target, "--abort")
        libvirt.check_exit_status(ret)
        dom_xml = virsh.dumpxml(vm_name, debug=True).stdout.strip()
        if dom_xml.count(blk_mirror):
            test.fail("Failed to abort block job")
        if os.path.exists(blk_file):
            os.remove(blk_file)

        # Sleep for a while after abort operation.
        time.sleep(5)
        # Do blockcopy again
        ret = virsh.blockcopy(vm_name, target, blk_file)
        libvirt.check_exit_status(ret)

        # Wait for complete
        def wait_func():
            ret = virsh.blockjob(vm_name, target, "--info")
            return ret.stderr.count("Block Copy: [100 %]")
        timeout = params.get("blockjob_timeout", 600)
        utils_misc.wait_for(wait_func, int(timeout))

        # Pivot
        ret = virsh.blockjob(vm_name, target, "--pivot")
        libvirt.check_exit_status(ret)
        dom_xml = virsh.dumpxml(vm_name, debug=True).stdout.strip()
        if not dom_xml.count("source file='%s'" % blk_file):
            test.fail("Failed to pivot block job")
        # Remove the disk file.
        if os.path.exists(blk_file):
            os.remove(blk_file)

    def check_in_vm(vm_obj, target, old_parts, read_only=False):
        """
        Check mount/read/write disk in VM.
        :param vm. VM guest.
        :param target. Disk dev in VM.
        :return: True if check successfully.
        """
        try:
            session = vm_obj.wait_for_login()
            new_parts = libvirt.get_parts_list(session)
            added_parts = list(set(new_parts).difference(set(old_parts)))
            logging.info("Added parts:%s", added_parts)
            if len(added_parts) != 1:
                logging.error("The number of new partitions is invalid in VM")
                return False

            added_part = None
            if target.startswith("vd"):
                if added_parts[0].startswith("vd"):
                    added_part = added_parts[0]
            elif target.startswith("hd"):
                if added_parts[0].startswith("sd"):
                    added_part = added_parts[0]

            if not added_part:
                logging.error("Can't see added partition in VM")
                return False

            cmd = ("mount /dev/{0} /mnt && ls /mnt && (sleep 15;"
                   " touch /mnt/testfile; umount /mnt)"
                   .format(added_part))
            s, o = session.cmd_status_output(cmd, timeout=60)
            session.close()
            logging.info("Check disk operation in VM:\n, %s, %s", s, o)
            # Readonly fs, check the error messages.
            # The command may return True, read-only
            # messges can be found from the command output
            if read_only:
                if "Read-only file system" not in o:
                    return False
                else:
                    return True

            # Other errors
            if s != 0:
                return False
            return True

        except (remote.LoginError, virt_vm.VMError, aexpect.ShellError) as e:
            logging.error(str(e))
            return False

    def clean_up_volume_snapshots():
        """
        Get all snapshots for rbd_vol.img volume,unprotect and then clean up them.
        """
        cmd = ("rbd -m {0} {1} info {2}"
               "".format(mon_host, key_opt, os.path.join(disk_src_pool, vol_name)))
        if process.run(cmd, ignore_status=True, shell=True).exit_status:
            return
        # Get snapshot list.
        cmd = ("rbd -m {0} {1} snap"
               " list {2}"
               "".format(mon_host, key_opt, os.path.join(disk_src_pool, vol_name)))
        snaps_out = process.run(cmd, ignore_status=True, shell=True).stdout
        snap_names = []
        if snaps_out:
            for line in snaps_out.rsplit("\n"):
                if line.startswith("SNAPID") or line == "":
                    continue
                snap_line = line.rsplit()
                if len(snap_line) == 4:
                    snap_names.append(snap_line[1])
            logging.debug("Find snapshots: %s", snap_names)
            # Unprotect snapshot first,otherwise it will fail to purge volume
            for snap_name in snap_names:
                cmd = ("rbd -m {0} {1} snap"
                       " unprotect {2}@{3}"
                       "".format(mon_host, key_opt, os.path.join(disk_src_pool, vol_name), snap_name))
                process.run(cmd, ignore_status=True, shell=True)
        # Purge volume,and then delete volume.
        cmd = ("rbd -m {0} {1} info {2} && rbd -m {0} {1} snap"
               " purge {2} && rbd -m {0} {1} rm {2}"
               "".format(mon_host, key_opt, os.path.join(disk_src_pool, vol_name)))
        process.run(cmd, ignore_status=True, shell=True)

    mon_host = params.get("mon_host")
    disk_src_name = params.get("disk_source_name")
    disk_src_config = params.get("disk_source_config")
    disk_src_host = params.get("disk_source_host")
    disk_src_port = params.get("disk_source_port")
    disk_src_pool = params.get("disk_source_pool")
    disk_format = params.get("disk_format", "raw")
    driver_iothread = params.get("driver_iothread")
    snap_name = params.get("disk_snap_name")
    attach_device = "yes" == params.get("attach_device", "no")
    attach_disk = "yes" == params.get("attach_disk", "no")
    test_save_restore = "yes" == params.get("test_save_restore", "no")
    test_snapshot = "yes" == params.get("test_snapshot", "no")
    test_blockcopy = "yes" == params.get("test_blockcopy", "no")
    test_qemu_cmd = "yes" == params.get("test_qemu_cmd", "no")
    test_vm_parts = "yes" == params.get("test_vm_parts", "no")
    additional_guest = "yes" == params.get("additional_guest", "no")
    create_snapshot = "yes" == params.get("create_snapshot", "no")
    convert_image = "yes" == params.get("convert_image", "no")
    create_volume = "yes" == params.get("create_volume", "no")
    create_by_xml = "yes" == params.get("create_by_xml", "no")
    client_key = params.get("client_key")
    client_name = params.get("client_name")
    auth_key = params.get("auth_key")
    auth_user = params.get("auth_user")
    auth_type = params.get("auth_type")
    auth_usage = params.get("secret_usage")
    pool_name = params.get("pool_name")
    pool_type = params.get("pool_type")
    vol_name = params.get("vol_name")
    cloned_vol_name = params.get("cloned_volume")
    create_from_cloned_volume = params.get("create_from_cloned_volume")
    vol_cap = params.get("vol_cap")
    vol_cap_unit = params.get("vol_cap_unit")
    start_vm = "yes" == params.get("start_vm", "no")
    test_disk_readonly = "yes" == params.get("test_disk_readonly", "no")
    test_disk_internal_snapshot = "yes" == params.get("test_disk_internal_snapshot", "no")
    test_json_pseudo_protocol = "yes" == params.get("json_pseudo_protocol", "no")

    # Start vm and get all partions in vm.
    if vm.is_dead():
        vm.start()
    session = vm.wait_for_login()
    old_parts = libvirt.get_parts_list(session)
    session.close()
    vm.destroy(gracefully=False)
    if additional_guest:
        guest_name = "%s_%s" % (vm_name, '1')
        timeout = params.get("clone_timeout", 360)
        utils_libguestfs.virt_clone_cmd(vm_name, guest_name,
                                        True, timeout=timeout,
                                        ignore_status=False)
        additional_vm = vm.clone(guest_name)
        if start_vm:
            virsh.start(guest_name)

    # Back up xml file.
    vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    key_opt = ""
    secret_uuid = None
    key_file = os.path.join(test.tmpdir, "ceph.key")
    img_file = os.path.join(test.tmpdir,
                            "%s_test.img" % vm_name)
    front_end_img_file = os.path.join(test.tmpdir,
                                      "%s_frontend_test.img" % vm_name)
    # Construct a unsupported error message list to skip these kind of tests
    unsupported_err = []
    if driver_iothread:
        unsupported_err.append('IOThreads not supported')
    if test_snapshot:
        unsupported_err.append('live disk snapshot not supported')
    if test_disk_readonly:
        unsupported_err.append('Unable to read from monitor: Connection reset by peer')
        unsupported_err.append('Permission denied')
    if test_disk_internal_snapshot:
        unsupported_err.append('unsupported configuration: internal snapshot for disk ' +
                               'vdb unsupported for storage type raw')
    if test_blockcopy:
        unsupported_err.append('block copy is not supported')
    if attach_disk:
        unsupported_err.append('No such file or directory')
    if create_volume:
        unsupported_err.append("backing 'volume' disks isn't yet supported")
        unsupported_err.append('this function is not supported')

    try:
        # Install ceph-common package which include rbd command
        if utils_package.package_install(["ceph-common"]):
            if client_name and client_key:
                with open(key_file, 'w') as f:
                    f.write("[%s]\n\tkey = %s\n" %
                            (client_name, client_key))
                key_opt = "--keyring %s" % key_file

                # Create secret xml
                sec_xml = secret_xml.SecretXML("no", "no")
                sec_xml.usage = auth_type
                sec_xml.usage_name = auth_usage
                sec_xml.xmltreefile.write()

                logging.debug("Secret xml: %s", sec_xml)
                ret = virsh.secret_define(sec_xml.xml)
                libvirt.check_exit_status(ret)

                secret_uuid = re.findall(r".+\S+(\ +\S+)\ +.+\S+",
                                         ret.stdout)[0].lstrip()
                logging.debug("Secret uuid %s", secret_uuid)
                if secret_uuid is None:
                    test.error("Failed to get secret uuid")

                # Set secret value
                auth_key = params.get("auth_key")
                ret = virsh.secret_set_value(secret_uuid, auth_key,
                                             **virsh_dargs)
                libvirt.check_exit_status(ret)

            # Delete the disk if it exists
            cmd = ("rbd -m {0} {1} info {2} && rbd -m {0} {1} rm "
                   "{2}".format(mon_host, key_opt, disk_src_name))
            process.run(cmd, ignore_status=True, shell=True)
        else:
            test.error("Failed to install ceph-common")

        if disk_src_config:
            config_ceph()
        disk_path = ("rbd:%s:mon_host=%s" %
                     (disk_src_name, mon_host))
        if auth_user and auth_key:
            disk_path += (":id=%s:key=%s" %
                          (auth_user, auth_key))
        targetdev = params.get("disk_target", "vdb")
        # To be compatible with create_disk_xml function,
        # some parameters need to be updated.
        params.update({
            "type_name": params.get("disk_type", "network"),
            "target_bus": params.get("disk_target_bus"),
            "target_dev": targetdev,
            "secret_uuid": secret_uuid,
            "source_protocol": params.get("disk_source_protocol"),
            "source_name": disk_src_name,
            "source_host_name": disk_src_host,
            "source_host_port": disk_src_port})
        # Prepare disk image
        if convert_image:
            first_disk = vm.get_first_disk_devices()
            blk_source = first_disk['source']
            # Convert the image to remote storage
            disk_cmd = ("rbd -m %s %s info %s || qemu-img convert"
                        " -O %s %s %s" % (mon_host, key_opt,
                                          disk_src_name, disk_format,
                                          blk_source, disk_path))
            process.run(disk_cmd, ignore_status=False, shell=True)

        elif create_volume:
            vol_params = {"name": vol_name, "capacity": int(vol_cap),
                          "capacity_unit": vol_cap_unit, "format": disk_format}

            create_pool()
            create_vol(vol_params)
            check_vol(vol_params)
        else:
            # Create an local image and make FS on it.
            disk_cmd = ("qemu-img create -f %s %s 10M && mkfs.ext4 -F %s" %
                        (disk_format, img_file, img_file))
            process.run(disk_cmd, ignore_status=False, shell=True)
            # Convert the image to remote storage
            disk_cmd = ("rbd -m %s %s info %s || qemu-img convert -O"
                        " %s %s %s" % (mon_host, key_opt, disk_src_name,
                                       disk_format, img_file, disk_path))
            process.run(disk_cmd, ignore_status=False, shell=True)
            # Create disk snapshot if needed.
            if create_snapshot:
                snap_cmd = ("rbd -m %s %s snap create %s@%s" %
                            (mon_host, key_opt, disk_src_name, snap_name))
                process.run(snap_cmd, ignore_status=False, shell=True)
            if test_json_pseudo_protocol:
                # Create one frontend image with the rbd backing file.
                json_str = ('json:{"file.driver":"rbd",'
                            '"file.filename":"rbd:%s:mon_host=%s"}'
                            % (disk_src_name, mon_host))
                # pass different json string according to the auth config
                if auth_user and auth_key:
                    json_str = ('%s:id=%s:key=%s"}' % (json_str[:-2], auth_user, auth_key))
                disk_cmd = ("qemu-img create -f qcow2 -b '%s' %s" %
                            (json_str, front_end_img_file))
                disk_path = front_end_img_file
                process.run(disk_cmd, ignore_status=False, shell=True)
        # If hot plug, start VM first, otherwise stop VM if running.
        if start_vm:
            if vm.is_dead():
                vm.start()
        else:
            if not vm.is_dead():
                vm.destroy()
        if attach_device:
            if create_volume:
                params.update({"source_pool": pool_name})
                params.update({"type_name": "volume"})
                # No need auth options for volume
                if "auth_user" in params:
                    params.pop("auth_user")
                if "auth_type" in params:
                    params.pop("auth_type")
                if "secret_type" in params:
                    params.pop("secret_type")
                if "secret_uuid" in params:
                    params.pop("secret_uuid")
                if "secret_usage" in params:
                    params.pop("secret_usage")
            xml_file = libvirt.create_disk_xml(params)
            opts = params.get("attach_option", "")
            ret = virsh.attach_device(vm_name, xml_file,
                                      flagstr=opts, debug=True)
            libvirt.check_result(ret, skip_if=unsupported_err)
            if additional_guest:
                # Make sure the additional VM is running
                if additional_vm.is_dead():
                    additional_vm.start()
                ret = virsh.attach_device(guest_name, xml_file,
                                          "", debug=True)
                libvirt.check_result(ret, skip_if=unsupported_err)
        elif attach_disk:
            opts = params.get("attach_option", "")
            ret = virsh.attach_disk(vm_name, disk_path,
                                    targetdev, opts)
            libvirt.check_result(ret, skip_if=unsupported_err)
        elif test_disk_readonly:
            params.update({'readonly': "yes"})
            xml_file = libvirt.create_disk_xml(params)
            opts = params.get("attach_option", "")
            ret = virsh.attach_device(vm_name, xml_file,
                                      flagstr=opts, debug=True)
            libvirt.check_result(ret, skip_if=unsupported_err)
        elif test_disk_internal_snapshot:
            xml_file = libvirt.create_disk_xml(params)
            opts = params.get("attach_option", "")
            ret = virsh.attach_device(vm_name, xml_file,
                                      flagstr=opts, debug=True)
            libvirt.check_result(ret, skip_if=unsupported_err)
        elif not create_volume:
            libvirt.set_vm_disk(vm, params)
        if test_blockcopy:
            logging.info("Creating %s...", vm_name)
            vmxml_for_test = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
            if vm.is_alive():
                vm.destroy(gracefully=False)
            vm.undefine()
            if virsh.create(vmxml_for_test.xml, **virsh_dargs).exit_status:
                vmxml_backup.define()
                test.fail("Can't create the domain")
        elif vm.is_dead():
            vm.start()
        # Wait for vm is running
        vm.wait_for_login(timeout=600).close()
        if additional_guest:
            if additional_vm.is_dead():
                additional_vm.start()
        # Check qemu command line
        if test_qemu_cmd:
            check_qemu_cmd()
        # Check partitions in vm
        if test_vm_parts:
            if not check_in_vm(vm, targetdev, old_parts,
                               read_only=create_snapshot):
                test.fail("Failed to check vm partitions")
            if additional_guest:
                if not check_in_vm(additional_vm, targetdev, old_parts):
                    test.fail("Failed to check vm partitions")
        # Save and restore operation
        if test_save_restore:
            check_save_restore()
        if test_snapshot:
            snap_option = params.get("snapshot_option", "")
            check_snapshot(snap_option)
        if test_blockcopy:
            check_blockcopy(targetdev)
        if test_disk_readonly:
            snap_option = params.get("snapshot_option", "")
            check_snapshot(snap_option, 'vdb')
        if test_disk_internal_snapshot:
            snap_option = params.get("snapshot_option", "")
            check_snapshot(snap_option, targetdev)
        # Detach the device.
        if attach_device:
            xml_file = libvirt.create_disk_xml(params)
            ret = virsh.detach_device(vm_name, xml_file)
            libvirt.check_exit_status(ret)
            if additional_guest:
                ret = virsh.detach_device(guest_name, xml_file)
                libvirt.check_exit_status(ret)
        elif attach_disk:
            ret = virsh.detach_disk(vm_name, targetdev)
            libvirt.check_exit_status(ret)

        # Check disk in vm after detachment.
        if attach_device or attach_disk:
            session = vm.wait_for_login()
            new_parts = libvirt.get_parts_list(session)
            if len(new_parts) != len(old_parts):
                test.fail("Disk still exists in vm"
                          " after detachment")
            session.close()

    except virt_vm.VMStartError as details:
        for msg in unsupported_err:
            if msg in str(details):
                test.skip(details)
        else:
            test.fail("VM failed to start."
                      "Error: %s" % str(details))
    finally:
        # Delete snapshots.
        snapshot_lists = virsh.snapshot_list(vm_name)
        if len(snapshot_lists) > 0:
            libvirt.clean_up_snapshots(vm_name, snapshot_lists)
            for snap in snapshot_lists:
                virsh.snapshot_delete(vm_name, snap, "--metadata")

        # Recover VM.
        if vm.is_alive():
            vm.destroy(gracefully=False)
        if additional_guest:
            virsh.remove_domain(guest_name,
                                "--remove-all-storage",
                                ignore_stauts=True)
        # Remove the snapshot.
        if create_snapshot:
            cmd = ("rbd -m {0} {1} info {2} && rbd -m {0} {1} snap"
                   " purge {2} && rbd -m {0} {1} rm {2}"
                   "".format(mon_host, key_opt, disk_src_name))
            process.run(cmd, ignore_status=True, shell=True)
        elif create_volume:
            cmd = ("rbd -m {0} {1} info {2} && rbd -m {0} {1} rm {2}"
                   "".format(mon_host, key_opt, os.path.join(disk_src_pool, cloned_vol_name)))
            process.run(cmd, ignore_status=True, shell=True)
            cmd = ("rbd -m {0} {1} info {2} && rbd -m {0} {1} rm {2}"
                   "".format(mon_host, key_opt, os.path.join(disk_src_pool, create_from_cloned_volume)))
            process.run(cmd, ignore_status=True, shell=True)
            clean_up_volume_snapshots()
        else:
            cmd = ("rbd -m {0} {1} info {2} && rbd -m {0} {1} rm {2}"
                   "".format(mon_host, key_opt, disk_src_name))
            process.run(cmd, ignore_status=True, shell=True)

        # Delete tmp files.
        if os.path.exists(key_file):
            os.remove(key_file)
        if os.path.exists(img_file):
            os.remove(img_file)
        # Clean up volume, pool
        if vol_name and vol_name in str(virsh.vol_list(pool_name).stdout):
            virsh.vol_delete(vol_name, pool_name)
        if pool_name and pool_name in virsh.pool_state_dict():
            virsh.pool_destroy(pool_name, **virsh_dargs)
            virsh.pool_undefine(pool_name, **virsh_dargs)

        # Clean up secret
        if secret_uuid:
            virsh.secret_undefine(secret_uuid)

        logging.info("Restoring vm...")
        vmxml_backup.sync()
Exemplo n.º 23
0
def run(test, params, env):
    """
    Test usb virtual disk plug/unplug.

    1.Prepare a vm with usb controllers
    2.Prepare a local image
    3.Prepare a virtual disk xml
    4.Attach the virtual disk to the vm
    5.Check the disk in vm
    6.Unplug the disk from vm
    """
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)

    def get_new_disks(old_partitions):
        """
        Get new virtual disks in vm after disk plug.

        :param old_partitions: Already existing partitions in vm.
        :return: New disks/partitions in vm.
        """
        session = None
        try:
            session = vm.wait_for_login()
            if platform.platform().count('ppc64'):
                time.sleep(10)
            new_partitions = libvirt.get_parts_list(session)
            added_partitions = list(set(new_partitions).difference(set(old_partitions)))
            if not added_partitions:
                logging.debug("No new partitions found in vm.")
            else:
                logging.debug("Newly added partition(s) is: %s", added_partitions)
            return added_partitions
        except (remote.LoginError, virt_vm.VMError, aexpect.ShellError) as err:
            test.fail("Error happens when get new disk: %s", str(err))
        finally:
            if session:
                session.close()

    def check_disk_type(partition):
        """
        Check if a disk partition is a usb disk in vm.

        :param partition: The disk partition in vm to be checked.
        :return: If the disk is a usb device, return True.
        """
        session = None
        try:
            session = vm.wait_for_login()
            if platform.platform().count('ppc64'):
                time.sleep(10)
            cmd = "ls -l /dev/disk/by-id/ | grep %s | grep -i usb" % partition
            status = session.cmd_status(cmd)
            return status == 0
        except (remote.LoginError, virt_vm.VMError, aexpect.ShellError) as err:
            logging.debug("Error happens when check if new disk is usb device: %s",
                          str(err))
            return False
        finally:
            if session:
                session.close()

    def check_disk_io(partition):
        """
        Check if the disk partition in vm can be normally used.

        :param partition: The disk partition in vm to be checked.
        :return: If the disk can be used, return True.
        """
        session = None
        try:
            session = vm.wait_for_login()
            cmd = ("fdisk -l /dev/{0} && mkfs.ext4 -F /dev/{0} && "
                   "mkdir -p test && mount /dev/{0} test && echo"
                   " teststring > test/testfile && umount test"
                   .format(partition))
            status, output = session.cmd_status_output(cmd)
            logging.debug("Disk operation in VM:\nexit code:\n%s\noutput:\n%s",
                          status, output)
            return status == 0
        except (remote.LoginError, virt_vm.VMError, aexpect.ShellError) as err:
            logging.debug("Error happens when check disk io in vm: %s", str(err))
            return False
        finally:
            if session:
                session.close()

    def remove_usbs(vmxml):
        """
        Remove all USB devices and controllers from a vm's xml.

        :param vmxml: The vm's xml.
        """
        try:
            for xml in vmxml.xmltreefile.findall('/devices/*'):
                if (xml.get('bus') == 'usb') or (xml.get('type') == 'usb'):
                    vmxml.xmltreefile.remove(xml)
        except (AttributeError, TypeError):
            pass  # Element doesn't exist at first
        vmxml.xmltreefile.write()

    def prepare_usb_controller(vmxml, usb_models):
        """
        Add usb controllers into vm's xml.

        :param vmxml: The vm's xml.
        :param usb_models: The usb models will be used in usb controller(s).
        """
        if not usb_models:
            test.error("No usb model provided.")
        # Add disk usb controller(s)
        for usb_model in usb_models:
            usb_controller = Controller("controller")
            usb_controller.type = "usb"
            usb_controller.index = "0"
            usb_controller.model = usb_model
            vmxml.add_device(usb_controller)
        # Redefine domain
        vmxml.sync()

    def prepare_local_image():
        """
        Prepare a local image.

        :return: The path to the image file.
        """
        image_filename = params.get("image_filename", "raw.img")
        image_format = params.get("image_format", "raw")
        image_size = params.get("image_size", "1G")
        image_path = os.path.join(data_dir.get_tmp_dir(), image_filename)
        try:
            if image_format in ["raw", "qcow2"]:
                image_path = libvirt.create_local_disk("file",
                                                       image_path, image_size,
                                                       disk_format=image_format)
            else:
                test.cancel("We only test raw & qcow2 format for now.")
        except Exception as err:
            test.error("Error happens when prepare local image: %s", err)
        return image_path

    def prepare_virt_disk_xml(image_path):
        """
        Prepare the virtual disk xml to be attached/detached.

        :param image_path: The path to the local image.
        :return: The virtual disk xml.
        """
        virt_disk_device = params.get("virt_disk_device", "disk")
        virt_disk_device_type = params.get("virt_disk_device_type", "file")
        virt_disk_device_format = params.get("virt_disk_device_format", "raw")
        virt_disk_device_target = params.get("virt_disk_device_target", "sdb")
        virt_disk_device_bus = params.get("virt_disk_device_bus", "usb")
        disk_xml = Disk(type_name=virt_disk_device_type)
        disk_xml.device = virt_disk_device
        disk_src_dict = {'attrs': {'file': image_path, 'type_name': 'file'}}
        disk_xml.source = disk_xml.new_disk_source(**disk_src_dict)
        driver_dict = {"name": "qemu", "type": virt_disk_device_format}
        disk_xml.driver = driver_dict
        disk_xml.target = {"dev": virt_disk_device_target,
                           "bus": virt_disk_device_bus}
        return disk_xml

    usb_models = params.get("usb_model").split()
    coldplug = "yes" == params.get("coldplug")
    status_error = "yes" == params.get("status_error")
    new_disks = []
    new_disk = ""
    attach_options = ""

    # Get disk partitions info before hot/cold plug virtual disk
    if vm.is_dead():
        vm.start()
    session = vm.wait_for_login()
    old_partitions = libvirt.get_parts_list(session)
    session.close()
    vm.destroy(gracefully=False)

    # Backup vm xml
    vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    vmxml = vmxml_backup.copy()

    try:
        remove_usbs(vmxml)
        prepare_usb_controller(vmxml, usb_models)
        vm.start()
        session = vm.wait_for_login()
        disk_xml = prepare_virt_disk_xml(prepare_local_image())
        session.close()
        if coldplug:
            attach_options = "--config"

        # Attach virtual disk to vm
        result = virsh.attach_device(vm_name, disk_xml.xml,
                                     flagstr=attach_options,
                                     ignore_status=True, debug=True)
        libvirt.check_exit_status(result, status_error)

        # Check the attached disk in vm
        if coldplug:
            vm.destroy(gracefully=False)
            vm.start()
            vm.wait_for_login().close()
        utils_misc.wait_for(lambda: get_new_disks(old_partitions), 20)
        new_disks = get_new_disks(old_partitions)
        if len(new_disks) != 1:
            test.fail("Attached 1 virtual disk but got %s." % len(new_disk))
        new_disk = new_disks[0]
        if not check_disk_type(new_disk):
            test.fail("The newly attached disk is not a usb one.")
        if not check_disk_io(new_disk):
            test.fail("Cannot operate the newly added disk in vm.")

        # Detach the disk from vm
        result = virsh.detach_device(vm_name, disk_xml.xml,
                                     flagstr=attach_options,
                                     ignore_status=True, debug=True)
        libvirt.check_exit_status(result, status_error)

        # Check the detached disk in vm
        if coldplug:
            vm.destroy(gracefully=False)
            vm.start()
            vm.wait_for_login().close()
        utils_misc.wait_for(lambda: not get_new_disks(old_partitions), 20)
        new_disks = get_new_disks(old_partitions)
        if len(new_disks) != 0:
            test.fail("Unplug virtual disk failed.")

    finally:
        if vm.is_alive():
            vm.destroy(gracefully=False)
        # Restoring vm
        vmxml_backup.sync()
Exemplo n.º 24
0
def run(test, params, env):
    """
    Test pci/pcie-to-pci bridge

    Hotplug interface to pci/pcie-to-pci bridge, then check xml and
    inside vm.
    Hotunplug interface, then check xml and inside vm
    Other test scenarios of pci/pcie-to-pci bridge
    """

    def create_pci_device(pci_model, pci_model_name, **kwargs):
        """
        Create a pci/pcie bridge

        :param pci_model: model of pci controller device
        :param pci_model_name: model name of pci controller device
        :param kwargs: other k-w args that needed to create device
        :return: the newly created device object
        """
        pci_bridge = Controller('pci')
        pci_bridge.model = pci_model
        pci_bridge.model_name = {'name': pci_model_name}
        if 'index' in kwargs:
            pci_bridge.index = kwargs['index']
        if 'address' in kwargs:
            pci_bridge.address = pci_bridge.new_controller_address(
                attrs=eval(kwargs['address']))

        logging.debug('pci_bridge: %s', pci_bridge)
        return pci_bridge

    def create_iface(iface_model, iface_source, **kwargs):
        """
        Create an interface to be attached to vm

        :param iface_model: model of the interface device
        :param iface_source: source of the interface device
        :param kwargs: other k-w args that needed to create device
        :return: the newly created interface object
        """
        iface = Interface('network')
        iface.model = iface_model
        iface.source = eval(iface_source)

        if 'mac' in kwargs:
            iface.mac_address = kwargs['mac']
        else:
            mac = utils_net.generate_mac_address_simple()
            iface.mac_address = mac

        if 'address' in kwargs:
            iface.address = iface.new_iface_address(attrs=eval(kwargs['address']))

        logging.debug('iface: %s', iface)
        return iface

    vm_name = params.get('main_vm')
    status_error = 'yes' == params.get('status_error', 'no')
    err_msg = params.get('err_msg', '').split(';')
    case = params.get('case', '')
    hotplug = 'yes' == params.get('hotplug', 'no')

    need_pci_br = 'yes' == params.get('need_pci_br', 'no')
    pci_model = params.get('pci_model', 'pci')
    pci_model_name = params.get('pci_model_name')
    pci_br_kwargs = eval(params.get('pci_br_kwargs', '{}'))

    pci_br_has_device = 'yes' == params.get('pci_br_has_device', 'no')
    sound_dev_model_type = params.get('sound_dev_model_type', '')
    sound_dev_address = params.get('sound_dev_address', '')

    iface_model = params.get('iface_model', '')
    iface_source = params.get('iface_source', '')
    iface_kwargs = eval(params.get('iface_kwargs', '{}'))

    max_slots = int(params.get('max_slots', 31))
    pcie_br_count = int(params.get('pcie_br_count', 3))

    vmxml = VMXML.new_from_inactive_dumpxml(vm_name)
    bkxml = vmxml.copy()
    vm = env.get_vm(vm_name)

    try:

        # Check if there is a pci/pcie-to-pci bridge, if so,
        # just use the existing pci/pcie-to-pci-bridge to test
        ori_pci_br = [dev for dev in vmxml.get_devices('controller')
                      if dev.type == 'pci' and dev.model == pci_model]

        if need_pci_br:
            # If there is not a pci/pcie-to-pci bridge to test,
            # create one and add to vm
            if not ori_pci_br:
                logging.info('No %s on vm, create one', pci_model)
                pci_bridge = create_pci_device(pci_model, pci_model_name)
                vmxml.add_device(pci_bridge)
                vmxml.sync()
                logging.debug(virsh.dumpxml(vm_name))

            # Check if pci/pcie-to-pci bridge is successfully added
            vmxml = VMXML.new_from_inactive_dumpxml(vm_name)
            cur_pci_br = [dev for dev in vmxml.get_devices('controller')
                          if dev.type == 'pci' and dev.model == pci_model]
            if not cur_pci_br:
                test.error('Failed to add %s controller to vm xml' % pci_model)

            pci_br = cur_pci_br[0]
            logging.debug('pci_br: %s', pci_br)
            pci_br_index = pci_br.index

        # If test scenario requires another pci device on pci/pcie-to-pci
        # bridge before hotplug, add a sound device and make sure
        # the 'bus' is same with pci bridge index
        if need_pci_br and pci_br_has_device:
            sound_dev = Sound()
            sound_dev.model_type = sound_dev_model_type
            sound_dev.address = eval(sound_dev_address % pci_br_index)
            logging.debug('sound_dev.address: %s', sound_dev.address)
            vmxml.add_device(sound_dev)
            if case != 'vm_with_pcie_br_1_br':
                vmxml.sync()

        # Test hotplug scenario
        if hotplug:
            vm.start()
            vm.wait_for_login().close()

            # Create interface to be hotplugged
            logging.info('Create interface to be hotplugged')
            iface = create_iface(iface_model, iface_source)
            mac = iface.mac_address

            result = virsh.attach_device(vm_name, iface.xml, debug=True)
            libvirt.check_exit_status(result)

            xml_after_attach = VMXML.new_from_dumpxml(vm_name)
            logging.debug(virsh.dumpxml(vm_name))

            # Check if the iface with given mac address is successfully
            # attached with address bus equal to pcie/pci bridge's index
            iface_list = [
                iface for iface in xml_after_attach.get_devices('interface')
                if iface.mac_address == mac and
                int(iface.address['attrs']['bus'], 16) == int(pci_br_index, 16)
            ]

            logging.debug('iface list after attach: %s', iface_list)
            if not iface_list:
                test.error('Failed to attach interface %s' % iface)

            # Check inside vm
            def check_inside_vm(session, expect=True):
                ip_output = session.cmd('ip a')
                logging.debug('output of "ip a": %s', ip_output)

                return expect if mac in ip_output else not expect

            session = vm.wait_for_serial_login()
            if not utils_misc.wait_for(lambda: check_inside_vm(session, True),
                                       timeout=60, step=5):
                test.fail('Check interface inside vm failed,'
                          'interface not successfully attached:'
                          'not found mac address %s' % mac)
            session.close()

            # Test hotunplug
            result = virsh.detach_device(vm_name, iface.xml, debug=True)
            libvirt.check_exit_status(result)

            logging.debug(virsh.dumpxml(vm_name))

            # Check if the iface with given mac address has been
            # successfully detached
            xml_after_detach = VMXML.new_from_dumpxml(vm_name)
            iface_list_after_detach = [
                iface for iface in xml_after_detach.get_devices('interface')
                if iface.mac_address == mac
            ]

            logging.debug('iface list after detach: %s', iface_list_after_detach)
            if iface_list_after_detach:
                test.fail('Failed to detach device: %s' % iface)

            # Check again inside vm
            session = vm.wait_for_serial_login()
            if not utils_misc.wait_for(lambda: check_inside_vm(session, False),
                                       timeout=60, step=5):
                test.fail('Check interface inside vm failed,'
                          'interface not successfully detached:'
                          'found mac address %s' % mac)
            session.close()

        # Other test scenarios of pci/pcie
        if case:
            logging.debug('iface_kwargs: %s', iface_kwargs)

            # Setting pcie-to-pci-bridge model name !=pcie-pci-bridge.
            # or Invalid controller index for pcie-to-pci-bridge.
            if case in ('wrong_model_name', 'invalid_index'):
                pci_bridge = create_pci_device(pci_model, pci_model_name,
                                               **pci_br_kwargs)
                vmxml.add_device(pci_bridge)
                result_to_check = virsh.define(vmxml.xml, debug=True)

            # Attach device with invalid slot to pcie-to-pci-bridge
            if case == 'attach_with_invalid_slot':
                iface = create_iface(iface_model, iface_source, **iface_kwargs)
                vmxml.add_device(iface)
                result_to_check = virsh.define(vmxml.xml, debug=True)

            # Test that pcie-to-pci-bridge has 31 available slots
            if case == 'max_slots':
                target_bus = cur_pci_br[0].index
                target_bus = hex(int(target_bus))
                logging.debug('target_bus: %s', target_bus)

                # Attach 32 interfaces
                for i in range(max_slots + 1):
                    logging.debug('address: %s', iface_kwargs['address'])
                    new_iface_kwargs = {'address': iface_kwargs['address']
                                        % (target_bus, hex(i + 1))}
                    iface = create_iface(iface_model, iface_source,
                                         **new_iface_kwargs)
                    logging.info('Attaching the %d th interface', i + 1)
                    result_in_loop = virsh.attach_device(
                        vm_name, iface.xml, flagstr='--config', debug=True)

                    # Attaching the 32rd interfaces will fail
                    if i == max_slots:
                        status_error = True
                    libvirt.check_exit_status(result_in_loop,
                                              expect_error=status_error)
                logging.debug(virsh.dumpxml(vm_name))

                # Get all devices on pcie-to-pci-bridge from new xml
                # Test if it matches with value of max_slots
                new_xml = VMXML.new_from_dumpxml(vm_name)
                device_on_pci_br = [
                    dev for dev in new_xml.get_devices('interface')
                    if dev.address['type_name'] == 'pci' and
                    int(dev.address['attrs']['bus'], 16) == int(target_bus, 16)
                ]

                logging.info('All slots of pcie-to-pci-bridge is %d',
                             len(device_on_pci_br))
                if len(device_on_pci_br) != max_slots:
                    test.fail('Max slots is %d instead of %d' %
                              (len(device_on_pci_br), max_slots))

            # Define a guest with pcie-to-pci-bridge controller's index <=bus
            if case.startswith('index_v_bus'):
                last_pci_index = max([
                    int(dev.index) for dev in vmxml.get_devices('controller')
                    if dev.type == 'pci'])

                # New index of new pcie-bridge should be +1
                new_index = last_pci_index + 1
                if case.endswith('less_than'):
                    new_bus = new_index + 1
                elif case.endswith('equal_to'):
                    new_bus = new_index
                else:
                    test.error('Invalid test: %s' % case)

                pci_br_kwargs['index'] = new_index
                pci_br_kwargs['address'] = pci_br_kwargs['address'] % (new_bus)
                logging.debug('pci_br_kwargs: %s', pci_br_kwargs)

                pcie_br = create_pci_device(pci_model, pci_model_name, **pci_br_kwargs)
                vmxml.add_device(pcie_br)
                result_to_check = virsh.define(vmxml.xml, debug=True)

            # Test define & start an VM with/without pcie-to-pci-bridge
            if case.startswith('vm_with_pcie_br'):
                if case.endswith('1_br'):
                    pass
                elif case.endswith('multi_br'):
                    # Add $pcie_br_count pcie-root-port to vm
                    for i in range(pcie_br_count):
                        temp_xml = VMXML.new_from_inactive_dumpxml(vm_name)
                        port = create_pci_device('pcie-root-port', 'pcie-root-port')
                        temp_xml.add_device(port)
                        temp_xml.sync()

                    vmxml = VMXML.new_from_inactive_dumpxml(vm_name)
                    pci_root_ports = [dev for dev in vmxml.get_devices('controller')
                                      if dev.type == 'pci' and
                                      dev.model == 'pcie-root-port']
                    indexs = sorted([hex(int(dev.index)) for dev in pci_root_ports])
                    logging.debug('indexs: %s', indexs)

                    # Add $pcie_br_count pcie-to-pci-bridge to vm,
                    # on separated pcie-root-port
                    for i in range(pcie_br_count):
                        new_kwargs = {
                            'address': pci_br_kwargs['address'] % indexs[-i - 1]}
                        pcie_br = create_pci_device(pci_model, pci_model_name,
                                                    **new_kwargs)
                        vmxml.add_device(pcie_br)

                elif case.endswith('no_br'):
                    # Delete all pcie-to-pci-bridge
                    for dev in ori_pci_br:
                        vmxml.del_device(dev)
                    vmxml.sync()

                    # Add an pci device(rtl8139 nic)
                    iface = create_iface(iface_model, iface_source)
                    vmxml.add_device(iface)
                else:
                    test.error('Invalid test: %s' % case)

                # Test define and start vm with new xml settings
                result_define_vm = virsh.define(vmxml.xml, debug=True)
                libvirt.check_exit_status(result_define_vm)
                result_start_vm = virsh.start(vm_name, debug=True)
                libvirt.check_exit_status(result_start_vm)

                # Login to make sure vm is actually started
                vm.create_serial_console()
                vm.wait_for_serial_login().close()

                logging.debug(virsh.dumpxml(vm_name))

                # Get all pcie-to-pci-bridge after test operations
                new_vmxml = VMXML.new_from_inactive_dumpxml(vm_name)
                cur_pci_br = [dev for dev in new_vmxml.get_devices('controller')
                              if dev.type == 'pci' and dev.model == pci_model]
                logging.debug('cur_pci_br: %s', cur_pci_br)

                if case.endswith('multi_br'):
                    # Check whether all new pcie-to-pci-br are successfullly added
                    if len(cur_pci_br) - len(ori_pci_br) != pcie_br_count:
                        test.fail('Not all pcie-to-pci-br successfully added.'
                                  'Should be %d, actually is %d' %
                                  (pcie_br_count, len(cur_pci_br) - len(ori_pci_br)))

                elif case.endswith('no_br'):
                    # Check if a pcie-to-pci-br will be automatically created
                    if len(cur_pci_br) == 0:
                        test.fail('No new pcie-to-pci-bridge automatically created')

            # Check command result if there is a result to be checked
            if 'result_to_check' in locals():
                libvirt.check_exit_status(result_to_check, expect_error=status_error)
                libvirt.check_result(result_to_check, expected_fails=err_msg)

    finally:
        bkxml.sync()
Exemplo n.º 25
0
def run(test, params, env):
    """
    Test command: virsh pool-define-as; pool-build; pool-start; vol-create-as;
    vol-list; attach-device; login; mount and dd; reboot; check persistence;
    detach-device; pool-destroy; pool-undefine; clear lv,vg and pv;
    Create a libvirt npiv pool from a vHBA's device mapper device and create
    a volume out of the newly created pool and attach it to a guest, mount it,
    reboot and check persistence after reboot.

    Pre-requisite :
    Host should have a vHBA associated with a mpath device
    """

    pool_name = params.get("pool_create_name", "virt_test_pool_tmp")
    pool_type = params.get("pool_type", "dir")
    scsi_wwpn = params.get("scsi_wwpn", "WWPN_EXAMPLE")
    scsi_wwnn = params.get("scsi_wwnn", "WWNN_EXAMPLE")
    pool_target = params.get("pool_target", "pool_target")
    target_device = params.get("disk_target_dev", "vda")
    volume_name = params.get("volume_name", "imagefrommapper.qcow2")
    volume_capacity = params.get("volume_capacity", '1G')
    allocation = params.get("allocation", '1G')
    frmt = params.get("volume_format", 'qcow2')
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    mount_disk = None
    test_unit = None

    if 'EXAMPLE' in scsi_wwnn or 'EXAMPLE' in scsi_wwpn:
        raise exceptions.TestSkipError("Please provide proper WWPN/WWNN")

    if not vm.is_alive():
        vm.start()
    pool_extra_args = ""
    libvirt_vm = lib_vm.VM(vm_name, vm.params, vm.root_dir,
                           vm.address_cache)
    process.run("service multipathd restart", shell=True)
    online_hbas_list = nodedev.find_hbas("hba")
    first_online_hba = online_hbas_list[0]
    old_mpath_devs = nodedev.find_mpath_devs()
    logging.debug("the old mpath devs are: %s" % old_mpath_devs)
    new_vhbas = nodedev.nodedev_create_from_xml(
        {"nodedev_parent": first_online_hba,
         "scsi_wwnn": scsi_wwnn,
         "scsi_wwpn": scsi_wwpn})
    logging.info("Newly created vHBA %s" % new_vhbas)
    process.run("service multipathd restart", shell=True)

    utils_misc.wait_for(
        lambda: nodedev.is_mpath_devs_added(old_mpath_devs), timeout=5)

    cur_mpath_devs = nodedev.find_mpath_devs()
    logging.debug("the current mpath devs are: %s" % cur_mpath_devs)
    new_mpath_devs = list(set(cur_mpath_devs).difference(
        set(old_mpath_devs)))

    logging.debug("newly added mpath devs are: %s" % new_mpath_devs)
    if not new_mpath_devs:
        raise exceptions.TestFail("No newly added mpath devices found, \
                please check your FC settings")
    source_dev = os.path.join('/dev/mapper/', new_mpath_devs[0])
    logging.debug("We are going to use \"%s\" as our source device"
                  " to create a logical pool" % source_dev)

    cmd = "parted %s mklabel msdos -s" % source_dev
    cmd_result = process.run(cmd, shell=True)
    utlv.check_exit_status(cmd_result)

    if source_dev:
        pool_extra_args = ' --source-dev %s' % source_dev
    else:
        raise exceptions.TestFail(
            "The vHBA %s does not have any associated mpath device" % new_vhbas)

    pool_ins = libvirt_storage.StoragePool()
    if pool_ins.pool_exists(pool_name):
        raise exceptions.TestFail("Pool %s already exist" % pool_name)
    # if no online hba cards on host, mark case failed
    if not online_hbas_list:
        raise exceptions.TestSkipError("Host doesn't have online hba cards")
    try:
        cmd_result = virsh.pool_define_as(
            pool_name, pool_type, pool_target, pool_extra_args, ignore_status=True,
            debug=True)
        utlv.check_exit_status(cmd_result)

        cmd_result = virsh.pool_build(pool_name)
        utlv.check_exit_status(cmd_result)

        cmd_result = virsh.pool_start(pool_name)
        utlv.check_exit_status(cmd_result)

        utlv.check_actived_pool(pool_name)
        pool_detail = libvirt_xml.PoolXML.get_pool_details(pool_name)
        logging.debug("Pool detail: %s", pool_detail)

        cmd_result = virsh.vol_create_as(
            volume_name, pool_name, volume_capacity, allocation, frmt, "", debug=True)
        utlv.check_exit_status(cmd_result)

        vol_list = utlv.get_vol_list(pool_name, timeout=10)
        logging.debug('Volume list %s', vol_list)
        for unit in vol_list:
            test_unit = vol_list[unit]
            logging.debug(unit)

        disk_params = {'type_name': "file", 'target_dev': target_device,
                       'target_bus': "virtio", 'source_file': test_unit,
                       'driver_name': "qemu", 'driver_type': "raw"}
        disk_xml = utlv.create_disk_xml(disk_params)
        session = vm.wait_for_login()

        bf_disks = libvirt_vm.get_disks()

        attach_success = virsh.attach_device(
            vm_name, disk_xml, debug=True)

        utlv.check_exit_status(attach_success)

        logging.debug("Disks before attach: %s", bf_disks)

        af_disks = libvirt_vm.get_disks()

        logging.debug("Disks after attach: %s", af_disks)

        mount_disk = "".join(list(set(bf_disks) ^ set(af_disks)))
        if not mount_disk:
            raise exceptions.TestFail("Can not get attached device in vm.")
        logging.debug("Attached device in vm:%s", mount_disk)

        output = session.cmd_status_output('lsblk', timeout=15)
        logging.debug("%s", output[1])

        session.cmd_status_output('mkfs.ext4 %s' % mount_disk)
        if mount_disk:
            logging.info("%s", mount_disk)
            mount_success = mount_and_dd(session, mount_disk)
            if not mount_success:
                raise exceptions.TestFail("Can not find mounted device")
        session.close()

        virsh.reboot(vm_name, debug=True)

        session = vm.wait_for_login()
        output = session.cmd_status_output('mount')
        logging.debug("Mount output: %s", output[1])
        if '/mnt' in output[1]:
            logging.debug("Mount Successful accross reboot")
        session.close()

        status = virsh.detach_device(vm_name, disk_xml,
                                     debug=True)
        utlv.check_exit_status(status)

    finally:
        vm.destroy(gracefully=False)
        logging.debug('Destroying pool %s', pool_name)
        virsh.pool_destroy(pool_name)
        logging.debug('Undefining pool %s', pool_name)
        virsh.pool_undefine(pool_name)
        if test_unit:
            process.system('lvremove -f %s' % test_unit, verbose=True)
            process.system('vgremove -f %s' % pool_name, verbose=True)
            process.system('pvremove -f %s' % source_dev, verbose=True)
        if new_vhbas:
            nodedev.vhbas_cleanup(new_vhbas.split())
        process.run("service multipathd restart", shell=True)
Exemplo n.º 26
0
def run(test, params, env):
    """
    1. prepare a vHBA
    2. find the nodedev's lun name
    3. prepare the lun dev's xml
    4. start vm
    5. attach disk xml to vm
    6. login vm and check the disk
    7. detach the virtual disk
    8. check the blkdev gone
    9. cleanup env.
    """
    vm_name = params.get("main_vm", "avocado-vt-vm1")
    wwpn = params.get("wwpn", "WWPN_EXAMPLE")
    wwnn = params.get("wwnn", "WWNN_EXAMPLE")
    disk_device = params.get("disk_device", "disk")
    device_type = params.get("device_type", "file")
    device_target = params.get("device_target", "vdb")
    lun_dir_method = params.get("lun_dir_method", "by-path")
    driver_name = params.get("driver_name", "qemu")
    driver_type = params.get("driver_type", "raw")
    target_bus = params.get("target_bus", "virtio")
    readonly = params.get("readonly", "no")
    new_vhbas = []
    blk_dev = ""
    lun_dev = ""
    lun_dev_path = ""
    lun_sl = []
    new_disk = ""
    old_mpath_conf = ""
    mpath_conf_path = "/etc/multipath.conf"
    original_mpath_conf_exist = os.path.exists(mpath_conf_path)
    vm = env.get_vm(vm_name)
    try:
        vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
        vmxml_backup = vmxml.copy()
        old_disk_count = vmxml.get_disk_count(vm_name)
        # Prepare vHBA
        online_hbas = utils_npiv.find_hbas("hba")
        old_vhbas = utils_npiv.find_hbas("vhba")
        if not online_hbas:
            raise exceptions.TestSkipError("Host doesn't have online hba!")
        old_mpath_conf = utils_npiv.prepare_multipath_conf(conf_path=mpath_conf_path,
                                                           replace_existing=True)
        first_online_hba = online_hbas[0]
        new_vhba = utils_npiv.nodedev_create_from_xml(
                {"nodedev_parent": first_online_hba,
                 "scsi_wwnn": wwnn,
                 "scsi_wwpn": wwpn})
        utils_misc.wait_for(lambda: utils_npiv.is_vhbas_added(old_vhbas),
                            timeout=_TIMEOUT)
        if not utils_npiv.is_vhbas_added(old_vhbas):
            raise exceptions.TestFail("vHBA is not successfully created.")
        new_vhbas.append(new_vhba)
        new_vhba_scsibus = re.sub("\D", "", new_vhba)
        # Get the new block device generated by the new vHBA
        utils_misc.wait_for(lambda: get_blks_by_scsi(new_vhba_scsibus),
                            timeout=_TIMEOUT)
        blk_devs = get_blks_by_scsi(new_vhba_scsibus)
        if not blk_devs:
            raise exceptions.TestFail("block device not found with scsi_%s",
                                      new_vhba_scsibus)
        first_blk_dev = blk_devs[0]
        # Get the symbolic link of the device in /dev/disk/by-[path|uuid|id]
        logging.debug("first_blk_dev = %s, lun_dir_method = %s"
                      % (first_blk_dev, lun_dir_method))
        utils_misc.wait_for(
            lambda: get_symbols_by_blk(first_blk_dev, lun_dir_method),
            timeout=_TIMEOUT)
        lun_sl = get_symbols_by_blk(first_blk_dev, lun_dir_method)
        if not lun_sl:
            raise exceptions.TestFail("lun symbolic links not found under "
                                      "/dev/disk/%s/ for block device %s." %
                                      (lun_dir_method, blk_dev))
        lun_dev = lun_sl[0]
        lun_dev_path = "/dev/disk/" + lun_dir_method + "/" + lun_dev
        # Prepare xml of virtual disk
        disk_params = {'type_name': device_type, 'device': disk_device,
                       'driver_name': driver_name, 'driver_type': driver_type,
                       'source_file': lun_dev_path,
                       'target_dev': device_target, 'target_bus': target_bus,
                       'readonly': readonly}
        disk_xml = os.path.join(data_dir.get_tmp_dir(), 'disk_xml.xml')
        lun_disk_xml = utlv.create_disk_xml(disk_params)
        copyfile(lun_disk_xml, disk_xml)
        if not vm.is_alive():
            vm.start()
        session = vm.wait_for_login()
        libvirt_vm = lib_vm.VM(vm_name, vm.params, vm.root_dir,
                               vm.address_cache)
        old_disks = libvirt_vm.get_disks()
        # Attach disk
        dev_attach_status = virsh.attach_device(
                    vm_name, disk_xml, debug=True)
        utlv.check_exit_status(dev_attach_status)

        cur_disk_count = vmxml.get_disk_count(vm_name)
        cur_disks = libvirt_vm.get_disks()
        if cur_disk_count <= old_disk_count:
            raise exceptions.TestFail(
                    "Failed to attach disk: %s" % lun_disk_xml)
        new_disk = "".join(list(set(old_disks) ^ set(cur_disks)))
        logging.debug("Attached device in vm:%s", new_disk)
        # Check disk in VM
        output = session.cmd_status_output('mkfs.ext4 -F %s' % new_disk)
        logging.debug("mkfs.ext4 the disk in vm, result: %s", output[1])
        if not check_vm_disk(session, new_disk, readonly):
            raise exceptions.TestFail("Failed check the disk in vm.")
        session.cmd_status_output('umount %s' % new_disk)
        # Detach disk
        dev_detach_status = virsh.detach_device(vm_name, disk_xml, debug=True)
        utlv.check_exit_status(dev_detach_status)
        cur_disks = libvirt_vm.get_disks()
        if cur_disks != old_disks:
            raise exceptions.TestFail("Detach disk failed.")
        session.close()

    finally:
        utils_npiv.vhbas_cleanup(new_vhbas)
        if vm.is_alive():
            vm.destroy(gracefully=False)
        vmxml_backup.sync()
        process.system('service multipathd restart', verbose=True)
        if old_mpath_conf:
            utils_npiv.prepare_multipath_conf(conf_path=mpath_conf_path,
                                              conf_content=old_mpath_conf,
                                              replace_existing=True)
        if not original_mpath_conf_exist and os.path.exists(mpath_conf_path):
            os.remove(mpath_conf_path)
Exemplo n.º 27
0
def run(test, params, env):
    """
    Test for basic controller device function.

    1) Define the VM with specified controller device and check result meets
       expectation.
    2) Start the guest and check if start result meets expectation
    3) Test the function of started controller device
    4) Shutdown the VM and clean up environment
    """
    def remove_all_addresses(vm_xml):
        """
        Remove all addresses for all devices who has one.
        """
        try:
            for elem in vm_xml.xmltreefile.findall('/devices/*/address'):
                vm_xml.xmltreefile.remove(elem)
        except (AttributeError, TypeError):
            pass  # Element already doesn't exist
        vm_xml.xmltreefile.write()

    def remove_usb_devices(vm_xml):
        """
        Remove all USB devices.
        """
        try:
            for xml in vm_xml.xmltreefile.findall('/devices/*'):
                if xml.get('bus') == 'usb':
                    vm_xml.xmltreefile.remove(xml)
        except (AttributeError, TypeError):
            pass  # Element already doesn't exist
        vm_xml.xmltreefile.write()

    def prepare_local_image(image_filename):
        """
        Prepare a local image.

        :param image_filename: The name to the local image.
        :return: The path to the image file.
        """
        image_format = 'qcow2'
        image_size = '10M'
        image_path = os.path.join(data_dir.get_tmp_dir(), image_filename)
        try:
            image_path = libvirt.create_local_disk("file",
                                                   image_path,
                                                   image_size,
                                                   disk_format=image_format)
        except Exception as err:
            test.error("Error happens when prepare local image: %s", err)
        disks_img.append(image_path)
        return image_path

    def prepare_usb_controller(vmxml, index, addr):
        """
        Add usb controllers into vm's xml.

        :param vmxml: The vm's xml.
        """
        # Add disk usb controller(s)
        usb_controller = Controller("controller")
        usb_controller.type = "usb"
        usb_controller.index = str(index)
        usb_controller.model = 'qemu-xhci'
        addr_dict = {
            "domain": '0x0000',
            'funtion': '0x0',
            'bus': addr['bus'],
            'slot': addr['slot']
        }
        usb_controller.address = usb_controller.new_controller_address(
            **{"attrs": addr_dict})
        vmxml.add_device(usb_controller)
        # Redefine domain
        vmxml.sync()

    def prepare_virt_disk_xml(virt_disk_device_target,
                              virt_disk_device_bus,
                              usb_bus=None,
                              virt_disk_bus=None,
                              virt_disk_slot=None):
        """
        Prepare the virt disk xml to be attached/detached.

        :param virt_disk_device_target: The target to the local image.
        :param virt_disk_bus: The bus to the local image.
        :return: The virtual disk xml.
        """
        image_filename = ''.join(
            random.choice(string.ascii_lowercase) for _ in range(8)) + ".qcow2"
        virt_disk_device = 'disk'
        virt_disk_device_type = 'file'
        virt_disk_device_format = 'qcow2'
        disk_xml = Disk(type_name=virt_disk_device_type)
        disk_xml.device = virt_disk_device
        disk_src_dict = {
            'attrs': {
                'file': prepare_local_image(image_filename),
                'type_name': 'file'
            }
        }
        disk_xml.source = disk_xml.new_disk_source(**disk_src_dict)
        driver_dict = {"name": "qemu", "type": virt_disk_device_format}
        disk_xml.driver = driver_dict
        disk_xml.target = {
            "dev": virt_disk_device_target,
            "bus": virt_disk_device_bus
        }
        if virt_disk_device_bus == 'usb':
            disk_addr_dict = {'bus': str(usb_bus), 'port': '1'}
            disk_xml.new_disk_address(type_name='usb',
                                      **{"attrs": disk_addr_dict})
        elif virt_disk_device_bus == 'virtio':
            disk_addr_dict = {
                'bus': virt_disk_bus,
                'slot': virt_disk_slot,
                'domain': '0x0000',
                'function': '0x0'
            }
            disk_xml.address = disk_xml.new_disk_address(
                type_name='pci', **{"attrs": disk_addr_dict})
        return disk_xml

    def prepare_iface_xml(iface_bus, iface_slot):
        """
        Create interface xml file
        """
        iface_xml = Interface(type_name='bridge')
        iface_xml.source = {'bridge': 'virbr0'}
        iface_xml.model = "virtio"
        addr_dict = {
            'bus': iface_bus,
            'slot': iface_slot,
            'domain': '0x0000',
            'function': '0x0'
        }
        iface_xml.address = iface_xml.new_iface_address(type_name='pci',
                                                        **{"attrs": addr_dict})
        return iface_xml

    if 'ppc' not in platform.machine():
        test.cancel('Only support PPC')

    # Additional disk images.
    disks_img = []
    devices_xml = []

    prepare_cntlr = "yes" == params.get('prepare_controller', "no")
    cntlr_type = params.get('controller_type')
    cntlr_model = params.get('controller_model', '')
    with_index = 'yes' == params.get('controller_index', 'yes')
    cntlr_index = params.get('controller_index')
    cntlr_node = params.get('controller_node')
    target_index = params.get('target_index')
    cntlr_num = int(params.get('controller_num', '0'))
    cntlr_cur = int(params.get('controller_current', '0'))
    special_num = params.get('special_num')
    addr_str = params.get('address')
    if addr_str:
        addr_str = eval(addr_str)
    device_num = int(params.get('device_num', '0'))
    device_list = params.get('device_list', '')
    if device_list:
        device_list = eval(device_list)
    vm_name = params.get("main_vm", "avocado-vt-vm1")
    qemu_cmd_check = "yes" == params.get("qemu_cmd_check", "no")
    status_error = "yes" == params.get("status_error", "no")
    numa = "yes" == params.get("numa", "no")
    with_define = 'yes' == params.get("with_define", "no")
    coldplug = "yes" == params.get("coldplug", "no")
    hotplug = "yes" == params.get("hotplug", "no")
    hotunplug = "yes" == params.get("hotunplug", "no")

    def check_index_in_xml(xml):
        """
        Check the used target in guest's xml
        :param xml:  The guest's xml
        :return:  A dict of result
        """
        result = {'sd': 'a', 'vd': 'a', 'index': 1}
        disk_list = xml.xmltreefile.findall("devices/disk/target")
        for disk_target in disk_list:
            dev = disk_target.attrib['dev']
            if dev[-1] >= result[dev[0:-1]]:
                result[dev[0:-1]] = chr(ord(dev[-1]) + 1)
        controller_list = xml.xmltreefile.findall("devices/controller")
        for controller in controller_list:
            if int(controller.get('index')) >= result['index']:
                result['index'] = int(controller.get('index')) + 1
        return result

    def enumerate_index(index_dict, index_key):
        index = index_dict[index_key]
        result = index_key + index if index_key in ['sd', 'vd'] else str(index)
        if index_key in ['sd', 'vd'] and index == 'z':
            index = 'aa'
        elif index_key in ['sd', 'vd']:
            if len(index) > 1:
                index = index[0] + chr(ord(index[-1]) + 1)
            else:
                index = chr(ord(index) + 1)
        else:
            index += 1
        index_dict[index_key] = index
        return result

    def match_new_addr(address):
        """
        Match any device address.
        """
        logging.info("The address is:%s" % address)
        match = re.match(
            r"(?P<bus>[0-9a-f]*):(?P<slot>[0-9a-f]*).(?P<function>[0-9a-f])",
            address)
        if match:
            addr_dict = match.groupdict()
            addr_dict['bus'] = hex(int(addr_dict['bus'], 16))
            addr_dict['slot'] = hex(int(addr_dict['slot'], 16))
            addr_dict['function'] = hex(int(addr_dict['function'], 16))
            addr_dict['domain'] = '0x0000'
            return addr_dict
        return None

    def add_device(type="usb", index="0", model="qemu-xhci"):
        """
        Add new device.
        """
        newcontroller = Controller("controller")
        newcontroller.type = type
        newcontroller.index = index
        newcontroller.model = model
        logging.debug("New controller is:%s", newcontroller)
        return newcontroller

    def setup_controller_xml():
        """
        Prepare controller devices of VM XML according to params.
        """

        if cntlr_type is None:
            type = 'pci'
        else:
            type = cntlr_type
        curcntlr = cntlr_cur
        while curcntlr < cntlr_num:
            ctrl = Controller(type_name=type)
            if cntlr_node:
                ctrl.node = cntlr_node
            if cntlr_model:
                ctrl.model = cntlr_model
                if cntlr_model == 'pci-bridge':
                    ctrl.model_name = {'name': 'pci-bridge'}
            if cntlr_index is not None:
                ctrl.index = cntlr_index
            elif with_index:
                if cntlr_model == 'pci-bridge':
                    for i in range(
                            1,
                            int(match_new_addr(addr_str[curcntlr])['bus'], 16)
                            + 1):
                        vm_xml.add_device(add_device('pci', str(i),
                                                     'pci-root'))
                    ctrl.index = str(
                        int(match_new_addr(addr_str[curcntlr])['bus'], 16) + 1)
                else:
                    ctrl.index = str(curcntlr)
            if target_index is not None:
                ctrl.target = {'index': target_index}
            elif with_index:
                if cntlr_model == 'pci-bridge':
                    ctrl.target = {
                        'chassisNr':
                        str(
                            int(match_new_addr(addr_str[curcntlr])['bus'], 16)
                            + 1)
                    }
                else:
                    ctrl.target = {'index': str(curcntlr)}
            if addr_str is not None:
                for address in addr_str:
                    ctrl.address = ctrl.new_controller_address(
                        attrs=match_new_addr(address))

            logging.debug("Controller XML is:%s", ctrl)
            vm_xml.add_device(ctrl)
            curcntlr += 1
        if special_num:
            spe_num = int(special_num)
            ctrl = Controller(type_name=type)

            if cntlr_model:
                ctrl.model = cntlr_model
            ctrl.index = spe_num
            ctrl.target = {'index': spe_num}
            if addr_str is not None and cntlr_model != 'pci-root':
                for address in addr_str:
                    ctrl.address = ctrl.new_controller_address(
                        attrs=match_new_addr(address))

            logging.debug("Controller XML is:%s", ctrl)
            vm_xml.add_device(ctrl)

    def define_and_check():
        """
        Predict the error message when defining and try to define the guest
        with testing XML.
        """
        fail_patts = []
        known_models = {
            'pci': ['pci-root', 'pci-bridge'],
            'virtio-serial': [],
            'usb': ['qemu-xhci'],
            'scsi': ['virtio-scsi'],
        }
        if status_error:
            if cntlr_type == 'pci' and cntlr_model:
                fail_patts.append(r"Invalid PCI controller model")
            if cntlr_type and cntlr_model not in known_models[cntlr_type]:
                fail_patts.append(r"Unknown model type")
            if cntlr_model == 'pcie-root':
                fail_patts.append(r"Device requires a standard PCI slot")
            if addr_str and '02:00.0' in addr_str:
                fail_patts.append(r"slot must be >= 1")
            elif addr_str and '02:32.0' in addr_str:
                fail_patts.append(r"must be <= 0x1F")
            if cntlr_num > 31 and cntlr_type == 'pci':
                fail_patts.append(r"out of range - must be 0-30")
            if cntlr_index and target_index:
                if (int(cntlr_index) != 0) ^ (int(target_index) != 0):
                    fail_patts.append(
                        r"Only the PCI controller with index 0 can have target index 0"
                    )

            # isdigit will return false on negative number, which just meet the
            # requirement of this test.
            if cntlr_index is not None and not cntlr_index.isdigit():
                fail_patts.append(r"Cannot parse controller index")

        vm_xml.undefine()
        res = vm_xml.virsh.define(vm_xml.xml)
        libvirt.check_result(res, expected_fails=fail_patts)
        return not res.exit_status

    def plug_the_devices(attach_options, dev_index):
        for index, dev in enumerate(device_list):
            if addr_str:
                new_addr = match_new_addr(addr_str[index])
            if dev == 'disk':
                disk_xml = prepare_virt_disk_xml(
                    enumerate_index(dev_index, 'vd'),
                    'virtio',
                    virt_disk_bus=new_addr['bus'],
                    virt_disk_slot=new_addr['slot'])
                logging.debug("The disk xml is: %s" % disk_xml)
                result = virsh.attach_device(vm_name,
                                             disk_xml.xml,
                                             flagstr=attach_options,
                                             ignore_status=True,
                                             debug=True)
                libvirt.check_exit_status(result, status_error)
                devices_xml.append(disk_xml)
            elif dev == 'usb':
                disk_xml = prepare_virt_disk_xml(
                    enumerate_index(dev_index, 'sd'),
                    'usb',
                    usb_bus=enumerate_index(dev_index, 'index'))
                logging.debug("The disk xml is: %s" % disk_xml)
                result = virsh.attach_device(vm_name,
                                             disk_xml.xml,
                                             flagstr=attach_options,
                                             ignore_status=True,
                                             debug=True)
                libvirt.check_exit_status(result, status_error)
                devices_xml.append(disk_xml)
            elif dev == 'interface':
                iface_xml = prepare_iface_xml(iface_bus=new_addr['bus'],
                                              iface_slot=new_addr['slot'])
                logging.debug("The nic xml is: %s" % iface_xml)
                result = virsh.attach_device(vm_name,
                                             iface_xml.xml,
                                             flagstr=attach_options,
                                             ignore_status=True,
                                             debug=True)
                libvirt.check_exit_status(result, status_error)
                devices_xml.append(iface_xml)

    def start_and_check():
        """
        Predict the error message when starting and try to start the guest.
        """
        fail_patts = []
        res = virsh.start(vm_name)
        libvirt.check_result(res, expected_fails=fail_patts)
        vm.wait_for_login().close()
        return not res.exit_status

    def check_qemu_cmdline():
        """
        Check domain qemu command line against expectation.
        """
        cmdline = open('/proc/%s/cmdline' % vm.get_pid()).read()
        logging.debug('Qemu command line: %s', cmdline)
        cmdline_list = cmdline.split('\x00')
        # TODO
        checknum = 1
        if cntlr_num > checknum:
            checknum = cntlr_num
        if special_num and int(special_num) > checknum:
            checknum = int(special_num)
        if addr_str:
            for address in addr_str:
                bus = int(match_new_addr(address)['bus'], 16)
                if bus > checknum:
                    checknum = bus
        if device_num:
            if (device_num + 6) / 31 > checknum:
                checknum = int((device_num + 6) / 31) + 1
        if checknum == 1 and cntlr_num != -1:
            test.fail('Multiple controller is not be used')
        else:
            for i in range(1, checknum):
                restr = r'spapr-pci-host-bridge,index=%s' % i
                if restr not in cmdline:
                    test.fail('The number of %s pci root is not created' % i)

    def check_in_guest_pci_with_addr(check_flag=True):
        def generate_match_line(index):
            match_dict = {
                'disk': 'SCSI storage controller',
                'memballoon': 'Unclassified device',
                'interface': 'Ethernet controller',
                'usb': 'USB controller',
                'pci-bridge': 'PCI bridge',
                'serial': 'Communication controller'
            }
            new_addr = match_new_addr(addr_str[index])
            match_line = '00(0[1-9a-f]|1[0-9a-f]):00:%s.0 ' % new_addr[
                'slot'].split('x')[1].zfill(2)
            if device_list[index] in match_dict.keys():
                match_line += match_dict[device_list[index]]
            else:
                test.fail('Unknown device(%s) in case config' %
                          device_list[index])
            return match_line

        session = vm.wait_for_login()
        cmd = 'lspci'
        try:
            guest_out = str(session.cmd_output_safe(cmd))
            logging.debug(guest_out)
            for i in range(len(addr_str)):
                match_line = generate_match_line(i)
                times = 0
                while True:
                    if not re.search(match_line, guest_out) and check_flag:
                        if times < 5:
                            time.sleep(5)
                            times += 1
                            guest_out = str(session.cmd_output_safe(cmd))
                            logging.debug(guest_out)
                        else:
                            test.fail('Could not find pci device in guest')
                    elif re.search(
                            match_line, guest_out
                    ) and not check_flag and device_list[i] != 'usb':
                        if times < 5:
                            time.sleep(5)
                            times += 1
                            guest_out = str(session.cmd_output_safe(cmd))
                            logging.debug(guest_out)
                        else:
                            test.fail('Could find pci device after detach')
                    else:
                        break
        except Exception as e:
            session.close()
            test.fail(e)
        if cntlr_node and check_flag:
            cmd = "lspci -vv -s %s | grep 'NUMA node:' | grep -o [0-9]*"
            for addr in addr_str:
                guest_out = str(session.cmd_output_safe(cmd % addr)).strip()
                if str(guest_out) != cntlr_node:
                    test.fail('No plug on the right node')
        session.close()

    def check_in_guest_pci_with_num():
        if cntlr_type == 'scsi':
            check_line = 'SCSI storage controller'
        else:
            return

        session = vm.wait_for_login()
        cmd = "lspci | grep '%s' | wc -l" % check_line
        try:
            guest_out = str(session.cmd_output_safe(cmd))
            logging.debug(guest_out)
            if int(guest_out) != int(cntlr_num) + int(default_pci):
                test.fail('The number of controller is not right')
        except Exception as e:
            test.fail(e)
        finally:
            session.close()

    vm = env.get_vm(vm_name)
    vm_xml = VMXML.new_from_inactive_dumpxml(vm_name)
    vm_xml_backup = vm_xml.copy()
    try:
        vm_xml.remove_all_device_by_type('controller')
        remove_usb_devices(vm_xml)
        if int(cntlr_num) > 10:
            virsh.start(vm_name)
            session = vm.wait_for_login()
            cmd = "lspci | grep 'SCSI storage controller' | wc -l"
            try:
                default_pci = str(session.cmd_output_safe(cmd))
            except Exception as e:
                test.fail(e)
            finally:
                session.close()
                virsh.destroy(vm_name)
        spe_device = False
        if numa:
            vmcpuxml = vm_xml.xmltreefile.find('/cpu')
            vmcpuxml.numa_cell = [{
                'id': '0',
                'cpus': '0',
                'memory': '1048576'
            }, {
                'id': '1',
                'cpus': '1',
                'memory': '1048576'
            }]
            vm_xml.xmltreefile.write()
        if with_define:
            if addr_str:
                for i in range(len(addr_str)):
                    if device_list[i] in ['disk', 'memballoon', 'interface']:
                        spe_device = True
                        spe_ele = vm_xml.xmltreefile.find(
                            '/devices/%s/address' % device_list[i])
                        new_addr = match_new_addr(addr_str[i])
                        spe_ele.attrib['slot'] = str(new_addr['slot'])
                        spe_ele.attrib['bus'] = str(new_addr['bus'])
                        vm_xml.xmltreefile.write()
            if not spe_device:
                remove_all_addresses(vm_xml)

            if cntlr_num or special_num:
                setup_controller_xml()

            if device_num:
                newdev_list = []
                for i in range(1, device_num + 1):
                    newdev_list.append(add_device(index=str(i)))

                dev_list = vm_xml.get_devices()
                dev_list.extend(newdev_list)
                vm_xml.set_devices(dev_list)

        if prepare_cntlr:
            setup_controller_xml()

        if hotplug or coldplug:
            if 'usb' in device_list:
                vm_xml = VMXML.new_from_inactive_dumpxml(vm_name)
                for index, dev in enumerate(device_list):
                    if dev == 'usb':
                        new_addr = match_new_addr(addr_str[index])
                        dev_index = check_index_in_xml(vm_xml)
                        prepare_usb_controller(vm_xml, dev_index['index'],
                                               new_addr)
                        vm_xml = VMXML.new_from_inactive_dumpxml(vm_name)

        logging.debug("Test VM XML is %s" % vm_xml)

        if not define_and_check() and status_error:
            logging.debug("Expected define the VM fail, exiting.")
        else:
            incxml = virsh.dumpxml(vm_name).stdout
            logging.debug("The inactive xml:%s" % incxml)
            if coldplug:
                attach_options = "--config"
                dev_index = check_index_in_xml(vm_xml)
                plug_the_devices(attach_options, dev_index)
                incxml = virsh.dumpxml(vm_name)
                logging.debug("The xml after cold plug:%s" % incxml)
            try:

                if not start_and_check() and status_error:
                    logging.debug("Expected start the VM fail, exiting.")
                else:
                    if hotplug:
                        attach_options = "--live"
                        dev_index = check_index_in_xml(vm_xml)
                        plug_the_devices(attach_options, dev_index)
                        incxml = virsh.dumpxml(vm_name)
                        logging.debug("The xml after hot plug:%s" % incxml)
                    if qemu_cmd_check:
                        check_qemu_cmdline()
                    if addr_str:
                        check_in_guest_pci_with_addr()
                    if int(cntlr_num) > 10:
                        check_in_guest_pci_with_num()
            except virt_vm.VMStartError as detail:
                test.error(detail)

        if hotunplug:
            logging.debug("Try to hot unplug")
            detach_options = "--live"
            for xml in devices_xml:
                result = virsh.detach_device(vm_name,
                                             xml.xml,
                                             flagstr=detach_options,
                                             ignore_status=True,
                                             debug=True)
                libvirt.check_exit_status(result, status_error)
            if addr_str:
                check_in_guest_pci_with_addr(False)

    finally:
        vm_xml_backup.sync()

        for img in disks_img:
            os.remove(img)
Exemplo n.º 28
0
def run(test, params, env):
    """
    Test command: virsh pool-define; pool-define-as; pool-start;
    vol-list pool; attach-device LUN to guest; mount the device;
    dd to the mounted device; unmount; pool-destroy; pool-undefine;

    Pre-requiste:
    Host needs to have a wwpn and wwnn of a vHBA which is zoned and mapped to
    SAN controller.
    """
    pool_xml_f = params.get("pool_create_xml_file", "/PATH/TO/POOL.XML")
    pool_name = params.get("pool_create_name", "virt_test_pool_tmp")
    pre_def_pool = params.get("pre_def_pool", "no")
    define_pool = params.get("define_pool", "no")
    define_pool_as = params.get("define_pool_as", "no")
    pool_create_as = params.get("pool_create_as", "no")
    need_pool_build = params.get("need_pool_build", "no")
    need_vol_create = params.get("need_vol_create", "no")
    pool_type = params.get("pool_type", "dir")
    source_format = params.get("pool_src_format", "")
    source_name = params.get("pool_source_name", "")
    source_path = params.get("pool_source_path", "/")
    pool_target = params.get("pool_target", "pool_target")
    pool_adapter_type = params.get("pool_adapter_type", "")
    pool_adapter_parent = params.get("pool_adapter_parent", "")
    target_device = params.get("disk_target_dev", "sdc")
    pool_wwnn = params.get("pool_wwnn", "POOL_WWNN_EXAMPLE")
    pool_wwpn = params.get("pool_wwpn", "POOL_WWPN_EXAMPLE")
    vhba_wwnn = params.get("vhba_wwnn", "VHBA_WWNN_EXAMPLE")
    vhba_wwpn = params.get("vhba_wwpn", "VHBA_WWPN_EXAMPLE")
    volume_name = params.get("volume_name", "imagefrommapper.qcow2")
    volume_capacity = params.get("volume_capacity", '1G')
    allocation = params.get("allocation", '1G')
    vol_format = params.get("volume_format", 'raw')
    attach_method = params.get("attach_method", "hot")
    test_unit = None
    mount_disk = None
    pool_kwargs = {}
    pool_extra_args = ""
    emulated_image = "emulated-image"
    disk_xml = ""
    new_vhbas = []
    source_dev = ""
    mpath_vol_path = ""
    old_mpath_conf = ""
    mpath_conf_path = "/etc/multipath.conf"
    original_mpath_conf_exist = os.path.exists(mpath_conf_path)

    if pool_type == "scsi":
        if ('EXAMPLE' in pool_wwnn) or ('EXAMPLE' in pool_wwpn):
            raise exceptions.TestSkipError(
                    "No wwpn and wwnn provided for npiv scsi pool.")
    if pool_type == "logical":
        if ('EXAMPLE' in vhba_wwnn) or ('EXAMPLE' in vhba_wwpn):
            raise exceptions.TestSkipError(
                    "No wwpn and wwnn provided for vhba.")
    online_hbas_list = utils_npiv.find_hbas("hba")
    logging.debug("The online hbas are: %s", online_hbas_list)
    old_mpath_conf = utils_npiv.prepare_multipath_conf(conf_path=mpath_conf_path,
                                                       replace_existing=True)
    if not online_hbas_list:
        raise exceptions.TestSkipError(
            "Host doesn't have online hba cards")
    old_vhbas = utils_npiv.find_hbas("vhba")

    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    vmxml_backup = vmxml.copy()
    if not vm.is_alive():
        vm.start()
    libvirt_vm = lib_vm.VM(vm_name, vm.params, vm.root_dir,
                           vm.address_cache)
    pool_ins = libvirt_storage.StoragePool()
    if pool_ins.pool_exists(pool_name):
        raise exceptions.TestFail("Pool %s already exist" % pool_name)
    if pool_type == "scsi":
        if define_pool == "yes":
            if pool_adapter_parent == "":
                pool_adapter_parent = online_hbas_list[0]
            pool_kwargs = {'source_path': source_path,
                           'source_name': source_name,
                           'source_format': source_format,
                           'pool_adapter_type': pool_adapter_type,
                           'pool_adapter_parent': pool_adapter_parent,
                           'pool_wwnn': pool_wwnn,
                           'pool_wwpn': pool_wwpn}
    elif pool_type == "logical":
        if (not vhba_wwnn) or (not vhba_wwpn):
            raise exceptions.TestFail("No wwnn/wwpn provided to create vHBA.")
        old_mpath_devs = utils_npiv.find_mpath_devs()
        new_vhba = utils_npiv.nodedev_create_from_xml({
                "nodedev_parent": online_hbas_list[0],
                "scsi_wwnn": vhba_wwnn,
                "scsi_wwpn": vhba_wwpn})
        utils_misc.wait_for(
            lambda: utils_npiv.is_vhbas_added(old_vhbas), timeout=_DELAY_TIME*2)
        if not new_vhba:
            raise exceptions.TestFail("vHBA not sucessfully generated.")
        new_vhbas.append(new_vhba)
        utils_misc.wait_for(
            lambda: utils_npiv.is_mpath_devs_added(old_mpath_devs),
            timeout=_DELAY_TIME*5)
        if not utils_npiv.is_mpath_devs_added(old_mpath_devs):
            raise exceptions.TestFail("mpath dev not generated.")
        cur_mpath_devs = utils_npiv.find_mpath_devs()
        new_mpath_devs = list(set(cur_mpath_devs).difference(
            set(old_mpath_devs)))
        logging.debug("The newly added mpath dev is: %s", new_mpath_devs)
        source_dev = "/dev/mapper/" + new_mpath_devs[0]
        logging.debug("We are going to use \"%s\" as our source device"
                      " to create a logical pool", source_dev)
        try:
            cmd = "parted %s mklabel msdos -s" % source_dev
            cmd_result = process.run(cmd, shell=True)
        except Exception as e:
            raise exceptions.TestError("Error occurred when parted mklable")
        if define_pool_as == "yes":
            pool_extra_args = ""
            if source_dev:
                pool_extra_args = ' --source-dev %s' % source_dev
    elif pool_type == "mpath":
        if (not vhba_wwnn) or (not vhba_wwpn):
            raise exceptions.TestFail("No wwnn/wwpn provided to create vHBA.")
        old_mpath_devs = utils_npiv.find_mpath_devs()
        new_vhba = utils_npiv.nodedev_create_from_xml({
                "nodedev_parent": online_hbas_list[0],
                "scsi_wwnn": vhba_wwnn,
                "scsi_wwpn": vhba_wwpn})
        utils_misc.wait_for(
            lambda: utils_npiv.is_vhbas_added(old_vhbas), timeout=_DELAY_TIME*2)
        if not new_vhba:
            raise exceptions.TestFail("vHBA not sucessfully generated.")
        new_vhbas.append(new_vhba)
        utils_misc.wait_for(
            lambda: utils_npiv.is_mpath_devs_added(old_mpath_devs),
            timeout=_DELAY_TIME*2)
        if not utils_npiv.is_mpath_devs_added(old_mpath_devs):
            raise exceptions.TestFail("mpath dev not generated.")
        cur_mpath_devs = utils_npiv.find_mpath_devs()
        new_mpath_devs = list(set(cur_mpath_devs).difference(
            set(old_mpath_devs)))
        logging.debug("The newly added mpath dev is: %s", new_mpath_devs)
        mpath_vol_path = "/dev/mapper/" + new_mpath_devs[0]
        try:
            cmd = "parted %s mklabel msdos -s" % mpath_vol_path
            cmd_result = process.run(cmd, shell=True)
        except Exception as e:
            raise exceptions.TestError("Error occurred when parted mklable")
    if pre_def_pool == "yes":
        try:
            pvt = utlv.PoolVolumeTest(test, params)
            pvt.pre_pool(pool_name, pool_type,
                         pool_target, emulated_image,
                         **pool_kwargs)
            utils_misc.wait_for(
                    lambda: utils_npiv.is_vhbas_added(old_vhbas),
                    _DELAY_TIME*2)
            virsh.pool_dumpxml(pool_name, to_file=pool_xml_f)
            virsh.pool_destroy(pool_name)
        except Exception as e:
            pvt.cleanup_pool(pool_name, pool_type, pool_target,
                             emulated_image, **pool_kwargs)
            raise exceptions.TestError(
                "Error occurred when prepare pool xml:\n %s" % e)
        if os.path.exists(pool_xml_f):
            with open(pool_xml_f, 'r') as f:
                logging.debug("Create pool from file: %s", f.read())
    try:
        # define/create/start the pool
        if (pre_def_pool == "yes") and (define_pool == "yes"):
            pool_define_status = virsh.pool_define(pool_xml_f,
                                                   ignore_status=True,
                                                   debug=True)
            utlv.check_exit_status(pool_define_status)
        if define_pool_as == "yes":
            pool_define_as_status = virsh.pool_define_as(
                pool_name, pool_type,
                pool_target, pool_extra_args,
                ignore_status=True, debug=True
                )
            utlv.check_exit_status(pool_define_as_status)
        if pool_create_as == "yes":
            if pool_type != "scsi":
                raise exceptions.TestSkipError("pool-create-as only needs to "
                                               "be covered by scsi pool for "
                                               "NPIV test.")
            cmd = "virsh pool-create-as %s %s \
                   --adapter-wwnn %s --adapter-wwpn %s \
                   --adapter-parent %s --target %s"\
                   % (pool_name, pool_type, pool_wwnn, pool_wwpn,
                      online_hbas_list[0], pool_target)
            cmd_status = process.system(cmd, verbose=True)
            if cmd_status:
                raise exceptions.TestFail("pool-create-as scsi pool failed.")
        if need_pool_build == "yes":
            pool_build_status = virsh.pool_build(pool_name, "--overwrite")
            utlv.check_exit_status(pool_build_status)

        pool_ins = libvirt_storage.StoragePool()
        if not pool_ins.pool_exists(pool_name):
            raise exceptions.TestFail("define or create pool failed.")
        else:
            if not pool_ins.is_pool_active(pool_name):
                pool_start_status = virsh.pool_start(pool_name)
                utlv.check_exit_status(pool_start_status)
                utlv.check_actived_pool(pool_name)
                pool_detail = libvirt_xml.PoolXML.get_pool_details(pool_name)
                logging.debug("Pool detail: %s", pool_detail)

        # create vol if required
        if need_vol_create == "yes":
            vol_create_as_status = virsh.vol_create_as(
                    volume_name, pool_name,
                    volume_capacity, allocation,
                    vol_format, "", debug=True
                    )
            utlv.check_exit_status(vol_create_as_status)
        virsh.pool_refresh(pool_name)
        vol_list = utlv.get_vol_list(pool_name, vol_check=True,
                                     timeout=_DELAY_TIME*3)
        logging.debug('Volume list is: %s' % vol_list)

        # use test_unit to save the first vol in pool
        if pool_type == "mpath":
            cmd = "virsh vol-list %s | grep \"%s\" |\
                   awk '{FS=\" \"} {print $1}'" % (pool_name, mpath_vol_path)
            cmd_result = process.run(cmd, shell=True)
            status = cmd_result.exit_status
            output = cmd_result.stdout_text.strip()
            if cmd_result.exit_status:
                raise exceptions.TestFail("vol-list pool %s failed", pool_name)
            if not output:
                raise exceptions.TestFail("Newly added mpath dev not in pool.")
            test_unit = output
            logging.info(
                "Using %s to attach to a guest", test_unit)
        else:
            test_unit = list(vol_list.keys())[0]
            logging.info(
                "Using the first volume %s to attach to a guest", test_unit)

        vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
        session = vm.wait_for_login()
        output = session.cmd_status_output('lsblk')
        logging.debug("%s", output[1])
        old_count = vmxml.get_disk_count(vm_name)
        bf_disks = libvirt_vm.get_disks()

        # prepare disk xml which will be hot/cold attached to vm
        disk_params = {'type_name': 'volume', 'target_dev': target_device,
                       'target_bus': 'virtio', 'source_pool': pool_name,
                       'source_volume': test_unit, 'driver_type': vol_format}
        disk_xml = os.path.join(data_dir.get_tmp_dir(), 'disk_xml.xml')
        lun_disk_xml = utlv.create_disk_xml(disk_params)
        copyfile(lun_disk_xml, disk_xml)
        disk_xml_str = open(lun_disk_xml).read()
        logging.debug("The disk xml is: %s", disk_xml_str)

        # hot attach disk xml to vm
        if attach_method == "hot":
            copyfile(lun_disk_xml, disk_xml)
            dev_attach_status = virsh.attach_device(vm_name, disk_xml,
                                                    debug=True)
            # Pool/vol virtual disk is not supported by mpath pool yet.
            if dev_attach_status.exit_status and pool_type == "mpath":
                raise exceptions.TestSkipError("mpath pool vol is not "
                                               "supported in virtual disk yet,"
                                               "the error message is: %s",
                                               dev_attach_status.stderr)
                session.close()
            utlv.check_exit_status(dev_attach_status)
        # cold attach disk xml to vm
        elif attach_method == "cold":
            if vm.is_alive():
                vm.destroy(gracefully=False)
            new_disk = disk.Disk()
            new_disk.xml = disk_xml_str
            vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
            vmxml.devices = vmxml.devices.append(new_disk)
            vmxml.sync()
            logging.debug(vmxml)
            try:
                vm.start()
            except virt_vm.VMStartError as e:
                logging.debug(e)
                if pool_type == "mpath":
                    raise exceptions.TestSkipError("'mpath' pools for backing "
                                                   "'volume' disks isn't "
                                                   "supported for now")
                else:
                    raise exceptions.TestFail("Failed to start vm")
            session = vm.wait_for_login()
        else:
            pass

        # checking attached disk in vm
        logging.info("Checking disk availability in domain")
        if not vmxml.get_disk_count(vm_name):
            raise exceptions.TestFail("No disk in domain %s." % vm_name)
        new_count = vmxml.get_disk_count(vm_name)

        if new_count <= old_count:
            raise exceptions.TestFail(
                "Failed to attach disk %s" % lun_disk_xml)
        logging.debug("Disks before attach: %s", bf_disks)

        af_disks = libvirt_vm.get_disks()
        logging.debug("Disks after attach: %s", af_disks)

        mount_disk = "".join(list(set(bf_disks) ^ set(af_disks)))
        if not mount_disk:
            raise exceptions.TestFail("Can not get attached device in vm.")
        logging.debug("Attached device in vm:%s", mount_disk)

        logging.debug("Creating file system for %s", mount_disk)
        output = session.cmd_status_output(
            'echo yes | mkfs.ext4 %s' % mount_disk)
        logging.debug("%s", output[1])
        if mount_disk:
            mount_success = mount_and_dd(session, mount_disk)
            if not mount_success:
                raise exceptions.TestFail("Mount failed")
        else:
            raise exceptions.TestFail("Partition not available for disk")
        logging.debug("Unmounting disk")
        session.cmd_status_output('umount %s' % mount_disk)
        output = session.cmd_status_output('mount')
        logging.debug("%s", output[1])
        mount_success = mount_and_dd(session, mount_disk)
        if not mount_success:
            raise exceptions.TestFail("Mount failed")
        logging.debug("Unmounting disk")
        session.cmd_status_output('umount %s' % mount_disk)
        session.close()

        # detach disk from vm
        dev_detach_status = virsh.detach_device(vm_name, disk_xml,
                                                debug=True)
        utlv.check_exit_status(dev_detach_status)

    finally:
        vm.destroy(gracefully=False)
        vmxml_backup.sync()
        logging.debug('Destroying pool %s', pool_name)
        virsh.pool_destroy(pool_name)
        logging.debug('Undefining pool %s', pool_name)
        virsh.pool_undefine(pool_name)
        if os.path.exists(pool_xml_f):
            os.remove(pool_xml_f)
        if os.path.exists(disk_xml):
            data_dir.clean_tmp_files()
            logging.debug("Cleanup disk xml")
        if pre_def_pool == "yes":
            # Do not apply cleanup_pool for logical pool, logical pool will
            # be cleaned below
            pvt.cleanup_pool(pool_name, pool_type, pool_target,
                             emulated_image, **pool_kwargs)
        if (test_unit and
                (need_vol_create == "yes" and (pre_def_pool == "no")) and
                (pool_type == "logical")):
            process.system('lvremove -f %s/%s' % (pool_name, test_unit),
                           verbose=True)
            process.system('vgremove -f %s' % pool_name, verbose=True)
            process.system('pvremove -f %s' % source_dev, verbose=True)
        if new_vhbas:
            utils_npiv.vhbas_cleanup(new_vhbas)
        # Restart multipathd, this is to avoid bz1399075
        if source_dev:
            utils_misc.wait_for(lambda: utils_npiv.restart_multipathd(source_dev),
                                _DELAY_TIME*5, 0.0, 5.0)
        elif mpath_vol_path:
            utils_misc.wait_for(lambda: utils_npiv.restart_multipathd(mpath_vol_path),
                                _DELAY_TIME*5, 0.0, 5.0)
        else:
            utils_npiv.restart_multipathd()
        if old_mpath_conf:
            utils_npiv.prepare_multipath_conf(conf_path=mpath_conf_path,
                                              conf_content=old_mpath_conf,
                                              replace_existing=True)
        if not original_mpath_conf_exist and os.path.exists(mpath_conf_path):
            os.remove(mpath_conf_path)
Exemplo n.º 29
0
    def detach_interface():
        """
            Detach interface:

            1.Detach interface from xml;
            2.Check the live xml after detach interface;
            3.Check the vf driver after detach interface.
        """
        def _detach_completed():
            result = virsh.domiflist(vm_name, "", ignore_status=True)
            return result.stdout.find(mac_addr) == -1

        def check_addr_attrs():
            live_xml = vm_xml.VMXML.new_from_dumpxml(vm_name)
            device = live_xml.devices
            hostdev_list = device.by_device_tag("hostdev")
            for hostdev in hostdev_list:
                addr = hostdev.source.untyped_address
                hostdev_addr_attrs = {
                    "domain": addr.domain,
                    "bus": addr.bus,
                    "slot": addr.slot,
                    "function": addr.function
                }
                if hostdev_addr_attrs == vf_addr_attrs:
                    return False
            return True

        result = virsh.detach_device(vm_name, new_iface.xml)
        utils_test.libvirt.check_exit_status(result, expect_error=False)
        if vf_type == "hostdev":
            check_ret = utils_misc.wait_for(check_addr_attrs, timeout=60)
            if not check_ret:
                test.fail("The hostdev device detach failed from xml\n")
        else:
            utils_misc.wait_for(_detach_completed, timeout=60)
        live_xml = vm_xml.VMXML.new_from_dumpxml(vm_name)
        device = live_xml.devices
        logging.debug("Domain xml after detach interface:\n %s", live_xml)

        if vf_type == "vf" or vf_type == "vf_pool":
            for interface in device.by_device_tag("interface"):
                if interface.type_name == "hostdev":
                    if interface.hostdev_address.attrs == vf_addr_attrs:
                        test.fail(
                            "The hostdev interface still in the guest xml after detach\n"
                        )
                    break
            driver = os.readlink(
                os.path.join(pci_device_dir, vf_addr, "driver")).split('/')[-1]
            logging.debug("The driver after vf detached from guest is %s\n",
                          driver)
            if managed == "no":
                if driver != "vfio-pci":
                    test.fail(
                        "The vf pci driver is not vfio-pci after detached from guest with managed as no\n"
                    )
                result = virsh.nodedev_reattach(nodedev_pci_addr)
                utils_test.libvirt.check_exit_status(result,
                                                     expect_error=False)
            elif driver != origin_driver:
                test.fail(
                    "The vf pci driver is not reset to the origin driver after detach from guest: %s vs %s\n"
                    % (driver, origin_driver))
        else:
            for interface in device.by_device_tag("interface"):
                if interface.type_name == "direct":
                    if interface.source["dev"] == vf_name:
                        test.fail(
                            "The macvtap interface still exist in the guest xml after detach\n"
                        )
                    break
Exemplo n.º 30
0
def run(test, params, env):
    """
    Test virtio/virtio-transitional/virtio-non-transitional model of vsock

    :param test: Test object
    :param params: Dictionary with the test parameters
    :param env: Dictionary with test environment
    """
    vm_name = params.get("main_vm", "avocado-vt-vm1")
    vm = env.get_vm(params["main_vm"])
    vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    backup_xml = vmxml.copy()
    add_pcie_to_pci_bridge = params.get("add_pcie_to_pci_bridge")
    guest_src_url = params.get("guest_src_url")
    virtio_model = params['virtio_model']
    boot_with_vsock = (params.get('boot_with_vsock', 'yes') == 'yes')
    hotplug = (params.get('hotplug', 'no') == 'yes')
    addr_pattern = params['addr_pattern']
    device_pattern = params['device_pattern']

    if not libvirt_version.version_compare(5, 0, 0):
        test.cancel("This libvirt version doesn't support "
                    "virtio-transitional model.")

    def check_vsock_inside_guest():
        """
        check vsock device inside guest
        """
        lspci_cmd = 'lspci'
        lspci_output = session.cmd_output(lspci_cmd)
        device_str = re.findall(r'%s\s%s' % (addr_pattern, device_pattern),
                                lspci_output)
        if not device_str:
            test.fail('lspci failed, no device "%s"' % device_pattern)

    # Download and replace image when guest_src_url provided
    if guest_src_url:
        image_name = params['image_path']
        target_path = utils_misc.get_path(data_dir.get_data_dir(), image_name)
        if not os.path.exists(target_path):
            download.get_file(guest_src_url, target_path)
        params["blk_source_name"] = target_path

    # Add pcie-to-pci-bridge when it is required
    if add_pcie_to_pci_bridge:
        pci_controllers = vmxml.get_controllers('pci')
        for controller in pci_controllers:
            if controller.get('model') == 'pcie-to-pci-bridge':
                break
        else:
            contr_dict = {'controller_type': 'pci',
                          'controller_model': 'pcie-to-pci-bridge'}
            cntl_add = libvirt.create_controller_xml(contr_dict)
            libvirt.add_controller(vm_name, cntl_add)

    # Generate xml for device vsock
    vsock_xml = libvirt.create_vsock_xml(virtio_model)
    if boot_with_vsock:  # Add vsock xml to vm only when needed
        libvirt.add_vm_device(vmxml, vsock_xml)
    try:
        if (params["os_variant"] == 'rhel6' or
                'rhel6' in params.get("shortname")):
            # Update interface to virtio-transitional mode for
            # rhel6 guest to make it works for login
            iface_params = {'model': 'virtio-transitional'}
            libvirt.modify_vm_iface(vm_name, "update_iface", iface_params)
        libvirt.set_vm_disk(vm, params)
        if hotplug:
            file_arg = vsock_xml.xml
            with open(file_arg) as vsock_file:
                logging.debug("Attach vsock by XML: %s", vsock_file.read())
            s_attach = virsh.attach_device(vm_name, file_arg, debug=True)
            libvirt.check_exit_status(s_attach)
        if add_pcie_to_pci_bridge:
            # Check device should be plug to right bus
            virtio_transitional_base.check_plug_to(vm_name, 'vsock')
        session = vm.wait_for_login()
        check_vsock_inside_guest()
        if hotplug:
            with open(file_arg) as vsock_file:
                logging.debug("Detach vsock by XML: %s", vsock_file.read())
            s_detach = virsh.detach_device(vm_name, file_arg, debug=True)
            libvirt.check_exit_status(s_detach)
    finally:
        vm.destroy()
        backup_xml.sync()
Exemplo n.º 31
0
def run(test, params, env):
    """
    Test network/interface function on 2 vms:

        - Test settings on 2 vms
        - Run ping check on 2 vms including pinging each other
        ...

    """
    vms = params.get('vms').split()
    vm_list = [env.get_vm(v_name) for v_name in vms]
    if len(vm_list) != 2:
        test.cancel('More or less than 2 vms is currently unsupported')

    feature = params.get('feature', '')
    case = params.get('case', '')
    check_ping = 'yes' == params.get('check_ping')
    expect_ping_host = 'yes' == params.get('expect_ping_host', 'no')
    expect_ping_out = 'yes' == params.get('expect_ping_out', 'no')
    expect_ping_vm = 'yes' == params.get('expect_ping_vm', 'no')
    out_ip = params.get('out_ip', 'www.redhat.com')
    live_update = 'yes' == params.get('live_update', 'no')
    set_all = 'yes' == params.get('set_all', 'no')

    rand_id = '_' + utils_misc.generate_random_string(3)
    bridge_name = params.get('bridge_name', 'test_br0') + rand_id
    iface_name = utils_net.get_net_if(state="UP")[0]
    test_net = 'net_isolated' + rand_id
    bridge_created = False

    vmxml_backup_list = []
    for vm_i in vm_list:
        vmxml_backup_list.append(vm_xml.VMXML.new_from_inactive_dumpxml(vm_i.name))

    try:
        # Test feature: port isolated
        if feature == 'port_isolated':
            if not libvirt_version.version_compare(6, 2, 0):
                test.cancel('Libvirt version should be'
                            ' > 6.2.0 to support port isolated')

            if case.startswith('set_iface'):
                create_bridge(bridge_name, iface_name)
                bridge_created = True
                iface_type = case.split('_')[-1]
                if iface_type == 'network':
                    net_dict = {'net_forward': "{'mode': 'bridge'}",
                                'net_bridge': "{'name': '%s'}" % bridge_name}
                    prepare_network(test_net, **net_dict)
                    updated_iface_dict = {
                        'type': iface_type,
                        'source': "{'network': '%s'}" % test_net,
                    }
                elif iface_type == 'bridge':
                    updated_iface_dict = {
                        'type': iface_type,
                        'source': "{'bridge': '%s'}" % bridge_name,
                    }
                else:
                    test.error('Unsupported iface type: %s' % iface_type)

                # Set 2 vms to isolated=yes or set one to 'yes', the other to 'no'
                isolated_settings = ['yes'] * 2 if set_all else ['yes', 'no']
                for i in (0, 1):
                    vm_i = vm_list[i]
                    new_iface_dict = dict(
                        list(updated_iface_dict.items()) +
                        [('port', "{'isolated': '%s'}" % isolated_settings[i])]
                    )
                    libvirt.modify_vm_iface(vm_i.name, 'update_iface',
                                            new_iface_dict)
                    logging.debug(virsh.dumpxml(vm_i.name).stdout_text)

            if case == 'update_iface':
                if params.get('iface_port'):
                    iface_dict = {'port': params['iface_port']}
                    for vm_i in vm_list:
                        libvirt.modify_vm_iface(vm_i.name, 'update_iface', iface_dict)
                        logging.debug(virsh.dumpxml(vm_i.name).stdout_text)
                if live_update:
                    for vm_i in vm_list:
                        vm_i.start()

                # Test Update iface with new attrs
                new_iface_dict = {}
                if params.get('new_iface_port'):
                    new_iface_dict['port'] = params['new_iface_port']
                elif params.get('del_port') == 'yes':
                    new_iface_dict['del_port'] = True
                for vm_i in vm_list:
                    updated_iface = libvirt.modify_vm_iface(vm_i.name, 'get_xml', new_iface_dict)
                    result = virsh.update_device(vm_i.name, updated_iface, debug=True)
                    libvirt.check_exit_status(result)

            if case == 'attach_iface':
                new_ifaces = {}
                for vm_i in vm_list:
                    # Create iface xml to be attached
                    new_iface = interface.Interface('network')
                    new_iface.xml = libvirt.modify_vm_iface(
                        vm_i.name, 'get_xml',
                        {'port': params.get('new_iface_port')}
                    )
                    new_ifaces[vm_i.name] = new_iface

                    # Remove current ifaces on vm
                    vmxml_i = vm_xml.VMXML.new_from_inactive_dumpxml(vm_i.name)
                    vmxml_i.remove_all_device_by_type('interface')
                    vmxml_i.sync()
                    logging.debug(virsh.dumpxml(vm_i.name).stdout_text)

                    # Start vm for hotplug
                    vm_i.start()
                    session = vm_i.wait_for_serial_login()

                    # Hotplug iface
                    virsh.attach_device(vm_i.name, new_iface.xml, debug=True, ignore_status=False)

                    # Wait a few seconds for interface to be fully attached
                    time.sleep(5)
                    ip_l_before = session.cmd_output('ip l')
                    logging.debug(ip_l_before)
                    session.close()

            if case == 'set_network':
                create_bridge(bridge_name, iface_name)
                bridge_created = True
                net_dict = {
                    'net_forward': "{'mode': 'bridge'}",
                    'net_bridge': "{'name': '%s'}" % bridge_name,
                    'net_port': "{'isolated': '%s'}" % params.get(
                        'net_isolated', 'yes')}
                prepare_network(test_net, **net_dict)

                # Modify iface to connect to newly added network
                updated_iface_dict = {'type': 'network',
                                      'source': "{'network': '%s'}" % test_net}
                for vm_i in vm_list:
                    libvirt.modify_vm_iface(vm_i.name, 'update_iface',
                                            updated_iface_dict)
                    logging.debug(virsh.domiflist(vm_i.name).stdout_text)

        # Check ping result from vm session to host, outside, the other vm
        if check_ping:
            for vm_i in vm_list:
                if vm_i.is_dead():
                    vm_i.start()
            host_ip = utils_net.get_host_ip_address()
            ping_expect = {
                host_ip: expect_ping_host,
                out_ip: expect_ping_out,
            }

            # A map of vm session and vm's ip addr
            session_n_ip = {}
            for vm_i in vm_list:
                mac = vm_i.get_mac_address()
                sess = vm_i.wait_for_serial_login()
                vm_ip = utils_net.get_guest_ip_addr(sess, mac)
                session_n_ip[sess] = vm_ip
                logging.debug('Vm %s ip: %s', vm_i.name, vm_ip)

            # Check ping result from each vm's session
            for i in (0, 1):
                sess = list(session_n_ip.keys())[i]
                another_sess = list(session_n_ip.keys())[1 - i]
                ping_expect[session_n_ip[another_sess]] = expect_ping_vm
                if not ping_func(sess, **ping_expect):
                    test.fail('Ping check failed')
                # Remove the other session's ip from ping result, then the
                # next round of ping check will not do a ping check to the vm itself
                ping_expect.pop(session_n_ip[another_sess])

        # Some test steps after ping check
        if feature == 'port_isolated':
            if case == 'attach_iface':
                # Test detach of iface
                for vm_name_i in new_ifaces:
                    virsh.detach_device(
                        vm_name_i,
                        new_ifaces[vm_name_i].xml,
                        wait_for_event=True,
                        debug=True, ignore_status=False
                    )

                # Check whether iface successfully detached by checking 'ip l' output
                for vm_i in vm_list:
                    session = vm_i.wait_for_serial_login()
                    ip_l_after = session.cmd_output('ip l')
                    session.close()
                    if len(ip_l_before.splitlines()) == len(ip_l_after.splitlines()):
                        test.fail('Output of "ip l" is not changed afte detach, '
                                  'interface not successfully detached')
Exemplo n.º 32
0
def run(test, params, env):
    """
    Test rbd disk device.

    1.Prepare test environment,destroy or suspend a VM.
    2.Prepare disk image.
    3.Edit disks xml and start the domain.
    4.Perform test operation.
    5.Recover test environment.
    """
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    virsh_dargs = {'debug': True, 'ignore_status': True}
    additional_xml_file = os.path.join(TMP_DATA_DIR, "additional_disk.xml")

    def config_ceph():
        """
        Write the configs to the file.
        """
        src_host = disk_src_host.split()
        src_port = disk_src_port.split()
        conf_str = "mon_host = "
        hosts = []
        for host, port in zip(src_host, src_port):
            hosts.append("%s:%s" % (host, port))
        with open(disk_src_config, 'w') as f:
            f.write(conf_str + ','.join(hosts) + '\n')

    def create_pool():
        """
        Define and start a pool.
        """
        sp = libvirt_storage.StoragePool()
        if create_by_xml:
            p_xml = pool_xml.PoolXML(pool_type=pool_type)
            p_xml.name = pool_name
            s_xml = pool_xml.SourceXML()
            s_xml.vg_name = disk_src_pool
            source_host = []
            for (host_name, host_port) in zip(disk_src_host.split(),
                                              disk_src_port.split()):
                source_host.append({'name': host_name, 'port': host_port})

            s_xml.hosts = source_host
            if auth_type:
                s_xml.auth_type = auth_type
            if auth_user:
                s_xml.auth_username = auth_user
            if auth_usage:
                s_xml.secret_usage = auth_usage
            p_xml.source = s_xml
            logging.debug("Pool xml: %s", p_xml)
            p_xml.xmltreefile.write()
            ret = virsh.pool_define(p_xml.xml, **virsh_dargs)
            libvirt.check_exit_status(ret)
            ret = virsh.pool_build(pool_name, **virsh_dargs)
            libvirt.check_exit_status(ret)
            ret = virsh.pool_start(pool_name, **virsh_dargs)
            libvirt.check_exit_status(ret)
        else:
            auth_opt = ""
            if client_name and client_key:
                auth_opt = (
                    "--auth-type %s --auth-username %s --secret-usage '%s'" %
                    (auth_type, auth_user, auth_usage))
            if not sp.define_rbd_pool(
                    pool_name, mon_host, disk_src_pool, extra=auth_opt):
                test.fail("Failed to define storage pool")
            if not sp.build_pool(pool_name):
                test.fail("Failed to build storage pool")
            if not sp.start_pool(pool_name):
                test.fail("Failed to start storage pool")

        # Check pool operation
        ret = virsh.pool_refresh(pool_name, **virsh_dargs)
        libvirt.check_exit_status(ret)
        ret = virsh.pool_uuid(pool_name, **virsh_dargs)
        libvirt.check_exit_status(ret)
        # pool-info
        pool_info = sp.pool_info(pool_name)
        if pool_info["Autostart"] != 'no':
            test.fail("Failed to check pool information")
        # pool-autostart
        if not sp.set_pool_autostart(pool_name):
            test.fail("Failed to set pool autostart")
        pool_info = sp.pool_info(pool_name)
        if pool_info["Autostart"] != 'yes':
            test.fail("Failed to check pool information")
        # pool-autostart --disable
        if not sp.set_pool_autostart(pool_name, "--disable"):
            test.fail("Failed to set pool autostart")
        # If port is not pre-configured, port value should not be hardcoded in pool information.
        if "yes" == params.get("rbd_port", "no"):
            if 'port' in virsh.pool_dumpxml(pool_name):
                test.fail("port attribute should not be in pool information")
        # find-storage-pool-sources-as
        if "yes" == params.get("find_storage_pool_sources_as", "no"):
            ret = virsh.find_storage_pool_sources_as("rbd", mon_host)
            libvirt.check_result(ret, skip_if=unsupported_err)

    def create_vol(vol_params):
        """
        Create volume.

        :param p_name. Pool name.
        :param vol_params. Volume parameters dict.
        :return: True if create successfully.
        """
        pvt = libvirt.PoolVolumeTest(test, params)
        if create_by_xml:
            pvt.pre_vol_by_xml(pool_name, **vol_params)
        else:
            pvt.pre_vol(vol_name, None, '2G', None, pool_name)

    def check_vol(vol_params):
        """
        Check volume information.
        """
        pv = libvirt_storage.PoolVolume(pool_name)
        # Supported operation
        if vol_name not in pv.list_volumes():
            test.fail("Volume %s doesn't exist" % vol_name)
        ret = virsh.vol_dumpxml(vol_name, pool_name)
        libvirt.check_exit_status(ret)
        # vol-info
        if not pv.volume_info(vol_name):
            test.fail("Can't see volume info")
        # vol-key
        ret = virsh.vol_key(vol_name, pool_name)
        libvirt.check_exit_status(ret)
        if "%s/%s" % (disk_src_pool, vol_name) not in ret.stdout.strip():
            test.fail("Volume key isn't correct")
        # vol-path
        ret = virsh.vol_path(vol_name, pool_name)
        libvirt.check_exit_status(ret)
        if "%s/%s" % (disk_src_pool, vol_name) not in ret.stdout.strip():
            test.fail("Volume path isn't correct")
        # vol-pool
        ret = virsh.vol_pool("%s/%s" % (disk_src_pool, vol_name))
        libvirt.check_exit_status(ret)
        if pool_name not in ret.stdout.strip():
            test.fail("Volume pool isn't correct")
        # vol-name
        ret = virsh.vol_name("%s/%s" % (disk_src_pool, vol_name))
        libvirt.check_exit_status(ret)
        if vol_name not in ret.stdout.strip():
            test.fail("Volume name isn't correct")
        # vol-resize
        ret = virsh.vol_resize(vol_name, "2G", pool_name)
        libvirt.check_exit_status(ret)

        # Not supported operation
        # vol-clone
        ret = virsh.vol_clone(vol_name, cloned_vol_name, pool_name)
        libvirt.check_result(ret, skip_if=unsupported_err)
        # vol-create-from
        volxml = vol_xml.VolXML()
        vol_params.update({"name": "%s" % create_from_cloned_volume})
        v_xml = volxml.new_vol(**vol_params)
        v_xml.xmltreefile.write()
        ret = virsh.vol_create_from(pool_name, v_xml.xml, vol_name, pool_name)
        libvirt.check_result(ret, skip_if=unsupported_err)

        # vol-wipe
        ret = virsh.vol_wipe(vol_name, pool_name)
        libvirt.check_result(ret, skip_if=unsupported_err)
        # vol-upload
        ret = virsh.vol_upload(vol_name,
                               vm.get_first_disk_devices()['source'],
                               "--pool %s" % pool_name)
        libvirt.check_result(ret, skip_if=unsupported_err)
        # vol-download
        ret = virsh.vol_download(vol_name, cloned_vol_name,
                                 "--pool %s" % pool_name)
        libvirt.check_result(ret, skip_if=unsupported_err)

    def check_qemu_cmd():
        """
        Check qemu command line options.
        """
        cmd = ("ps -ef | grep %s | grep -v grep " % vm_name)
        process.run(cmd, shell=True)
        if disk_src_name:
            cmd += " | grep file=rbd:%s:" % disk_src_name
            if auth_user and auth_key:
                cmd += ('id=%s:auth_supported=cephx' % auth_user)
        if disk_src_config:
            cmd += " | grep 'conf=%s'" % disk_src_config
        elif mon_host:
            hosts = '\:6789\;'.join(mon_host.split())
            cmd += " | grep 'mon_host=%s'" % hosts
        if driver_iothread:
            cmd += " | grep iothread%s" % driver_iothread
        # Run the command
        process.run(cmd, shell=True)

    def check_save_restore():
        """
        Test save and restore operation
        """
        save_file = os.path.join(TMP_DATA_DIR, "%s.save" % vm_name)
        ret = virsh.save(vm_name, save_file, **virsh_dargs)
        libvirt.check_exit_status(ret)
        ret = virsh.restore(save_file, **virsh_dargs)
        libvirt.check_exit_status(ret)
        if os.path.exists(save_file):
            os.remove(save_file)
        # Login to check vm status
        vm.wait_for_login().close()

    def check_snapshot(snap_option, target_dev='vda'):
        """
        Test snapshot operation.
        """
        snap_name = "s1"
        snap_mem = os.path.join(TMP_DATA_DIR, "rbd.mem")
        snap_disk = os.path.join(TMP_DATA_DIR, "rbd.disk")
        xml_snap_exp = [
            "disk name='%s' snapshot='external' type='file'" % target_dev
        ]
        xml_dom_exp = [
            "source file='%s'" % snap_disk,
            "backingStore type='network' index='1'",
            "source protocol='rbd' name='%s'" % disk_src_name
        ]
        if snap_option.count("disk-only"):
            options = ("%s --diskspec %s,file=%s --disk-only" %
                       (snap_name, target_dev, snap_disk))
        elif snap_option.count("disk-mem"):
            options = ("%s --memspec file=%s --diskspec %s,file="
                       "%s" % (snap_name, snap_mem, target_dev, snap_disk))
            xml_snap_exp.append("memory snapshot='external' file='%s'" %
                                snap_mem)
        else:
            options = snap_name

        ret = virsh.snapshot_create_as(vm_name, options)
        if test_disk_internal_snapshot:
            libvirt.check_result(ret, expected_fails=unsupported_err)
        elif test_disk_readonly:
            if libvirt_version.version_compare(6, 0, 0):
                libvirt.check_result(ret)
            else:
                libvirt.check_result(ret, expected_fails=unsupported_err)
        else:
            libvirt.check_result(ret, skip_if=unsupported_err)

        # check xml file.
        if not ret.exit_status:
            snap_xml = virsh.snapshot_dumpxml(vm_name, snap_name,
                                              debug=True).stdout.strip()
            dom_xml = virsh.dumpxml(vm_name, debug=True).stdout.strip()
            # Delete snapshots.
            libvirt.clean_up_snapshots(vm_name)
            if os.path.exists(snap_mem):
                os.remove(snap_mem)
            if os.path.exists(snap_disk):
                os.remove(snap_disk)

            if not all([x in snap_xml for x in xml_snap_exp]):
                test.fail("Failed to check snapshot xml")
            if not all([x in dom_xml for x in xml_dom_exp]):
                test.fail("Failed to check domain xml")

    def check_blockcopy(target):
        """
        Block copy operation test.
        """
        blk_file = os.path.join(TMP_DATA_DIR, "blk.rbd")
        if os.path.exists(blk_file):
            os.remove(blk_file)
        blk_mirror = ("mirror type='file' file='%s' "
                      "format='raw' job='copy'" % blk_file)

        # Do blockcopy
        ret = virsh.blockcopy(vm_name, target, blk_file)
        libvirt.check_result(ret, skip_if=unsupported_err)

        dom_xml = virsh.dumpxml(vm_name, debug=True).stdout.strip()
        if not dom_xml.count(blk_mirror):
            test.fail("Can't see block job in domain xml")

        # Abort
        ret = virsh.blockjob(vm_name, target, "--abort")
        libvirt.check_exit_status(ret)
        dom_xml = virsh.dumpxml(vm_name, debug=True).stdout.strip()
        if dom_xml.count(blk_mirror):
            test.fail("Failed to abort block job")
        if os.path.exists(blk_file):
            os.remove(blk_file)

        # Sleep for a while after abort operation.
        time.sleep(5)
        # Do blockcopy again
        ret = virsh.blockcopy(vm_name, target, blk_file)
        libvirt.check_exit_status(ret)

        # Wait for complete
        def wait_func():
            ret = virsh.blockjob(vm_name, target, "--info")
            return ret.stderr.count("Block Copy: [100 %]")

        timeout = params.get("blockjob_timeout", 600)
        utils_misc.wait_for(wait_func, int(timeout))

        # Pivot
        ret = virsh.blockjob(vm_name, target, "--pivot")
        libvirt.check_exit_status(ret)
        dom_xml = virsh.dumpxml(vm_name, debug=True).stdout.strip()
        if not dom_xml.count("source file='%s'" % blk_file):
            test.fail("Failed to pivot block job")
        # Remove the disk file.
        if os.path.exists(blk_file):
            os.remove(blk_file)

    def check_in_vm(vm_obj, target, old_parts, read_only=False):
        """
        Check mount/read/write disk in VM.
        :param vm. VM guest.
        :param target. Disk dev in VM.
        :return: True if check successfully.
        """
        try:
            session = vm_obj.wait_for_login()
            new_parts = utils_disk.get_parts_list(session)
            added_parts = list(set(new_parts).difference(set(old_parts)))
            logging.info("Added parts:%s", added_parts)
            if len(added_parts) != 1:
                logging.error("The number of new partitions is invalid in VM")
                return False

            added_part = None
            if target.startswith("vd"):
                if added_parts[0].startswith("vd"):
                    added_part = added_parts[0]
            elif target.startswith("hd"):
                if added_parts[0].startswith("sd"):
                    added_part = added_parts[0]
            elif target.startswith("sd"):
                if added_parts[0].startswith("sd"):
                    added_part = added_parts[0]

            if not added_part:
                logging.error("Can't see added partition in VM")
                return False

            cmd = ("mount /dev/{0} /mnt && ls /mnt && (sleep 15;"
                   " touch /mnt/testfile; umount /mnt)".format(added_part))
            s, o = session.cmd_status_output(cmd, timeout=60)
            session.close()
            logging.info("Check disk operation in VM:\n, %s, %s", s, o)
            # Readonly fs, check the error messages.
            # The command may return True, read-only
            # messages can be found from the command output
            if read_only:
                if "Read-only file system" not in o:
                    return False
                else:
                    return True

            # Other errors
            if s != 0:
                return False
            return True

        except (remote.LoginError, virt_vm.VMError, aexpect.ShellError) as e:
            logging.error(str(e))
            return False

    def clean_up_volume_snapshots():
        """
        Get all snapshots for rbd_vol.img volume,unprotect and then clean up them.
        """
        cmd = ("rbd -m {0} {1} info {2}"
               "".format(mon_host, key_opt,
                         os.path.join(disk_src_pool, vol_name)))
        if process.run(cmd, ignore_status=True, shell=True).exit_status:
            return
        # Get snapshot list.
        cmd = ("rbd -m {0} {1} snap"
               " list {2}"
               "".format(mon_host, key_opt,
                         os.path.join(disk_src_pool, vol_name)))
        snaps_out = process.run(cmd, ignore_status=True,
                                shell=True).stdout_text
        snap_names = []
        if snaps_out:
            for line in snaps_out.rsplit("\n"):
                if line.startswith("SNAPID") or line == "":
                    continue
                snap_line = line.rsplit()
                if len(snap_line) == 4:
                    snap_names.append(snap_line[1])
            logging.debug("Find snapshots: %s", snap_names)
            # Unprotect snapshot first,otherwise it will fail to purge volume
            for snap_name in snap_names:
                cmd = ("rbd -m {0} {1} snap"
                       " unprotect {2}@{3}"
                       "".format(mon_host, key_opt,
                                 os.path.join(disk_src_pool, vol_name),
                                 snap_name))
                process.run(cmd, ignore_status=True, shell=True)
        # Purge volume,and then delete volume.
        cmd = ("rbd -m {0} {1} info {2} && rbd -m {0} {1} snap"
               " purge {2} && rbd -m {0} {1} rm {2}"
               "".format(mon_host, key_opt,
                         os.path.join(disk_src_pool, vol_name)))
        process.run(cmd, ignore_status=True, shell=True)

    def make_snapshot():
        """
        make external snapshots.

        :return external snapshot path list
        """
        logging.info("Making snapshot...")
        first_disk_source = vm.get_first_disk_devices()['source']
        snapshot_path_list = []
        snapshot2_file = os.path.join(TMP_DATA_DIR, "mem.s2")
        snapshot3_file = os.path.join(TMP_DATA_DIR, "mem.s3")
        snapshot4_file = os.path.join(TMP_DATA_DIR, "mem.s4")
        snapshot4_disk_file = os.path.join(TMP_DATA_DIR, "disk.s4")
        snapshot5_file = os.path.join(TMP_DATA_DIR, "mem.s5")
        snapshot5_disk_file = os.path.join(TMP_DATA_DIR, "disk.s5")

        # Attempt to take different types of snapshots.
        snapshots_param_dict = {
            "s1":
            "s1 --disk-only --no-metadata",
            "s2":
            "s2 --memspec %s --no-metadata" % snapshot2_file,
            "s3":
            "s3 --memspec %s --no-metadata --live" % snapshot3_file,
            "s4":
            "s4 --memspec %s --diskspec vda,file=%s --no-metadata" %
            (snapshot4_file, snapshot4_disk_file),
            "s5":
            "s5 --memspec %s --diskspec vda,file=%s --live --no-metadata" %
            (snapshot5_file, snapshot5_disk_file)
        }
        for snapshot_name in sorted(snapshots_param_dict.keys()):
            ret = virsh.snapshot_create_as(vm_name,
                                           snapshots_param_dict[snapshot_name],
                                           **virsh_dargs)
            libvirt.check_exit_status(ret)
            if snapshot_name != 's4' and snapshot_name != 's5':
                snapshot_path_list.append(
                    first_disk_source.replace('qcow2', snapshot_name))
        return snapshot_path_list

    def get_secret_list():
        """
        Get secret list.

        :return secret list
        """
        logging.info("Get secret list ...")
        secret_list_result = virsh.secret_list()
        secret_list = secret_list_result.stdout_text.strip().splitlines()
        # First two lines contain table header followed by entries
        # for each secret, such as:
        #
        # UUID                                  Usage
        # --------------------------------------------------------------------------------
        # b4e8f6d3-100c-4e71-9f91-069f89742273  ceph client.libvirt secret
        secret_list = secret_list[2:]
        result = []
        # If secret list is empty.
        if secret_list:
            for line in secret_list:
                # Split on whitespace, assume 1 column
                linesplit = line.split(None, 1)
                result.append(linesplit[0])
        return result

    mon_host = params.get("mon_host")
    disk_src_name = params.get("disk_source_name")
    disk_src_config = params.get("disk_source_config")
    disk_src_host = params.get("disk_source_host")
    disk_src_port = params.get("disk_source_port")
    disk_src_pool = params.get("disk_source_pool")
    disk_format = params.get("disk_format", "raw")
    driver_iothread = params.get("driver_iothread")
    snap_name = params.get("disk_snap_name")
    attach_device = "yes" == params.get("attach_device", "no")
    attach_disk = "yes" == params.get("attach_disk", "no")
    test_save_restore = "yes" == params.get("test_save_restore", "no")
    test_snapshot = "yes" == params.get("test_snapshot", "no")
    test_blockcopy = "yes" == params.get("test_blockcopy", "no")
    test_qemu_cmd = "yes" == params.get("test_qemu_cmd", "no")
    test_vm_parts = "yes" == params.get("test_vm_parts", "no")
    additional_guest = "yes" == params.get("additional_guest", "no")
    create_snapshot = "yes" == params.get("create_snapshot", "no")
    convert_image = "yes" == params.get("convert_image", "no")
    create_volume = "yes" == params.get("create_volume", "no")
    rbd_blockcopy = "yes" == params.get("rbd_blockcopy", "no")
    enable_slice = "yes" == params.get("enable_slice", "no")
    create_by_xml = "yes" == params.get("create_by_xml", "no")
    client_key = params.get("client_key")
    client_name = params.get("client_name")
    auth_key = params.get("auth_key")
    auth_user = params.get("auth_user")
    auth_type = params.get("auth_type")
    auth_usage = params.get("secret_usage")
    pool_name = params.get("pool_name")
    pool_type = params.get("pool_type")
    vol_name = params.get("vol_name")
    cloned_vol_name = params.get("cloned_volume", "cloned_test_volume")
    create_from_cloned_volume = params.get("create_from_cloned_volume",
                                           "create_from_cloned_test_volume")
    vol_cap = params.get("vol_cap")
    vol_cap_unit = params.get("vol_cap_unit")
    start_vm = "yes" == params.get("start_vm", "no")
    test_disk_readonly = "yes" == params.get("test_disk_readonly", "no")
    test_disk_internal_snapshot = "yes" == params.get(
        "test_disk_internal_snapshot", "no")
    test_disk_external_snapshot = "yes" == params.get(
        "test_disk_external_snapshot", "no")
    test_json_pseudo_protocol = "yes" == params.get("json_pseudo_protocol",
                                                    "no")
    disk_snapshot_with_sanlock = "yes" == params.get(
        "disk_internal_with_sanlock", "no")
    auth_place_in_source = params.get("auth_place_in_source")
    test_target_bus = "yes" == params.get("scsi_target_test", "no")

    # Prepare a blank params to confirm if delete the configure at the end of the test
    ceph_cfg = ""
    # Create config file if it doesn't exist
    ceph_cfg = ceph.create_config_file(mon_host)

    # After libvirt 3.9.0, auth element can be put into source part.
    if auth_place_in_source and not libvirt_version.version_compare(3, 9, 0):
        test.cancel(
            "place auth in source is not supported in current libvirt version")

    # After libvirt 6.0.0, blockcopy rbd backend feature is support.
    if rbd_blockcopy and not libvirt_version.version_compare(6, 0, 0):
        test.cancel(
            "blockcopy rbd backend is not supported in current libvirt version"
        )

    # Start vm and get all partitions in vm.
    if vm.is_dead():
        vm.start()
    session = vm.wait_for_login()
    old_parts = utils_disk.get_parts_list(session)
    session.close()
    vm.destroy(gracefully=False)
    if additional_guest:
        guest_name = "%s_%s" % (vm_name, '1')
        timeout = params.get("clone_timeout", 360)
        utils_libguestfs.virt_clone_cmd(vm_name,
                                        guest_name,
                                        True,
                                        timeout=timeout,
                                        ignore_status=False)
        additional_vm = vm.clone(guest_name)
        if start_vm:
            virsh.start(guest_name)

    # Back up xml file.
    vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    key_opt = ""
    secret_uuid = None
    snapshot_path = None
    key_file = os.path.join(TMP_DATA_DIR, "ceph.key")
    img_file = os.path.join(TMP_DATA_DIR, "%s_test.img" % vm_name)
    front_end_img_file = os.path.join(TMP_DATA_DIR,
                                      "%s_frontend_test.img" % vm_name)
    # Construct a unsupported error message list to skip these kind of tests
    unsupported_err = []
    if driver_iothread:
        unsupported_err.append('IOThreads not supported')
    if test_snapshot:
        unsupported_err.append('live disk snapshot not supported')
    if test_disk_readonly:
        if not libvirt_version.version_compare(5, 0, 0):
            unsupported_err.append('Could not create file: Permission denied')
            unsupported_err.append('Permission denied')
        else:
            unsupported_err.append(
                'unsupported configuration: external snapshot ' +
                'for readonly disk vdb is not supported')
    if test_disk_internal_snapshot:
        unsupported_err.append(
            'unsupported configuration: internal snapshot for disk ' +
            'vdb unsupported for storage type raw')
    if test_blockcopy:
        unsupported_err.append('block copy is not supported')
    if attach_disk:
        unsupported_err.append('No such file or directory')
    if create_volume:
        unsupported_err.append("backing 'volume' disks isn't yet supported")
        unsupported_err.append('this function is not supported')

    try:
        # Clean up dirty secrets in test environments if there have.
        dirty_secret_list = get_secret_list()
        if dirty_secret_list:
            for dirty_secret_uuid in dirty_secret_list:
                virsh.secret_undefine(dirty_secret_uuid)
        # Prepare test environment.
        qemu_config = LibvirtQemuConfig()

        if disk_snapshot_with_sanlock:
            # Install necessary package:sanlock,libvirt-lock-sanlock
            if not utils_package.package_install(["sanlock"]):
                test.error("fail to install sanlock")
            if not utils_package.package_install(["libvirt-lock-sanlock"]):
                test.error("fail to install libvirt-lock-sanlock")

            # Set virt_use_sanlock
            result = process.run("setsebool -P virt_use_sanlock 1", shell=True)
            if result.exit_status:
                test.error("Failed to set virt_use_sanlock value")

            # Update lock_manager in qemu.conf
            qemu_config.lock_manager = 'sanlock'

            # Update qemu-sanlock.conf.
            san_lock_config = LibvirtSanLockConfig()
            san_lock_config.user = '******'
            san_lock_config.group = 'sanlock'
            san_lock_config.host_id = 1
            san_lock_config.auto_disk_leases = True
            process.run("mkdir -p /var/lib/libvirt/sanlock", shell=True)
            san_lock_config.disk_lease_dir = "/var/lib/libvirt/sanlock"
            san_lock_config.require_lease_for_disks = False

            # Start sanlock service and restart libvirtd to enforce changes.
            result = process.run("systemctl start wdmd", shell=True)
            if result.exit_status:
                test.error("Failed to start wdmd service")
            result = process.run("systemctl start sanlock", shell=True)
            if result.exit_status:
                test.error("Failed to start sanlock service")
            utils_libvirtd.Libvirtd().restart()

            # Prepare lockspace and lease file for sanlock in order.
            sanlock_cmd_dict = OrderedDict()
            sanlock_cmd_dict[
                "truncate -s 1M /var/lib/libvirt/sanlock/TEST_LS"] = "Failed to truncate TEST_LS"
            sanlock_cmd_dict[
                "sanlock direct init -s TEST_LS:0:/var/lib/libvirt/sanlock/TEST_LS:0"] = "Failed to sanlock direct init TEST_LS:0"
            sanlock_cmd_dict[
                "chown sanlock:sanlock /var/lib/libvirt/sanlock/TEST_LS"] = "Failed to chown sanlock TEST_LS"
            sanlock_cmd_dict[
                "restorecon -R -v /var/lib/libvirt/sanlock"] = "Failed to restorecon sanlock"
            sanlock_cmd_dict[
                "truncate -s 1M /var/lib/libvirt/sanlock/test-disk-resource-lock"] = "Failed to truncate test-disk-resource-lock"
            sanlock_cmd_dict[
                "sanlock direct init -r TEST_LS:test-disk-resource-lock:" +
                "/var/lib/libvirt/sanlock/test-disk-resource-lock:0"] = "Failed to sanlock direct init test-disk-resource-lock"
            sanlock_cmd_dict[
                "chown sanlock:sanlock " +
                "/var/lib/libvirt/sanlock/test-disk-resource-lock"] = "Failed to chown test-disk-resource-loc"
            sanlock_cmd_dict[
                "sanlock client add_lockspace -s TEST_LS:1:" +
                "/var/lib/libvirt/sanlock/TEST_LS:0"] = "Failed to client add_lockspace -s TEST_LS:0"
            for sanlock_cmd in sanlock_cmd_dict.keys():
                result = process.run(sanlock_cmd, shell=True)
                if result.exit_status:
                    test.error(sanlock_cmd_dict[sanlock_cmd])

            # Create one lease device and add it to VM.
            san_lock_vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
            lease_device = Lease()
            lease_device.lockspace = 'TEST_LS'
            lease_device.key = 'test-disk-resource-lock'
            lease_device.target = {
                'path': '/var/lib/libvirt/sanlock/test-disk-resource-lock'
            }
            san_lock_vmxml.add_device(lease_device)
            san_lock_vmxml.sync()

        # Install ceph-common package which include rbd command
        if utils_package.package_install(["ceph-common"]):
            if client_name and client_key:
                with open(key_file, 'w') as f:
                    f.write("[%s]\n\tkey = %s\n" % (client_name, client_key))
                key_opt = "--keyring %s" % key_file

                # Create secret xml
                sec_xml = secret_xml.SecretXML("no", "no")
                sec_xml.usage = auth_type
                sec_xml.usage_name = auth_usage
                sec_xml.xmltreefile.write()

                logging.debug("Secret xml: %s", sec_xml)
                ret = virsh.secret_define(sec_xml.xml)
                libvirt.check_exit_status(ret)

                secret_uuid = re.findall(r".+\S+(\ +\S+)\ +.+\S+",
                                         ret.stdout.strip())[0].lstrip()
                logging.debug("Secret uuid %s", secret_uuid)
                if secret_uuid is None:
                    test.error("Failed to get secret uuid")

                # Set secret value
                auth_key = params.get("auth_key")
                ret = virsh.secret_set_value(secret_uuid, auth_key,
                                             **virsh_dargs)
                libvirt.check_exit_status(ret)

            # Delete the disk if it exists
            cmd = ("rbd -m {0} {1} info {2} && rbd -m {0} {1} rm "
                   "{2}".format(mon_host, key_opt, disk_src_name))
            process.run(cmd, ignore_status=True, shell=True)
        else:
            test.error("Failed to install ceph-common")

        if disk_src_config:
            config_ceph()
        disk_path = ("rbd:%s:mon_host=%s" % (disk_src_name, mon_host))
        if auth_user and auth_key:
            disk_path += (":id=%s:key=%s" % (auth_user, auth_key))
        targetdev = params.get("disk_target", "vdb")
        targetbus = params.get("disk_target_bus", "virtio")
        if test_target_bus:
            targetdev = params.get("disk_target", "sdb")
            targetbus = params.get("disk_target_bus", "scsi")
            # Add virtio-scsi controller for sd* test
            controller_vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
            conduct = {'controller_type': targetbus}
            ctrl = libvirt.create_controller_xml(conduct)
            libvirt.add_controller(vm.name, ctrl)
            logging.debug("Controller XML is:%s", ctrl)
        # To be compatible with create_disk_xml function,
        # some parameters need to be updated.
        params.update({
            "type_name": params.get("disk_type", "network"),
            "target_bus": targetbus,
            "target_dev": targetdev,
            "secret_uuid": secret_uuid,
            "source_protocol": params.get("disk_source_protocol"),
            "source_name": disk_src_name,
            "source_host_name": disk_src_host,
            "source_host_port": disk_src_port
        })
        # Prepare disk image
        if convert_image:
            first_disk = vm.get_first_disk_devices()
            blk_source = first_disk['source']
            # Convert the image to remote storage
            disk_cmd = ("rbd -m %s %s info %s 2> /dev/null|| qemu-img convert"
                        " -O %s %s %s" % (mon_host, key_opt, disk_src_name,
                                          disk_format, blk_source, disk_path))
            process.run(disk_cmd, ignore_status=False, shell=True)

        elif create_volume:
            vol_params = {
                "name": vol_name,
                "capacity": int(vol_cap),
                "capacity_unit": vol_cap_unit,
                "format": disk_format
            }

            create_pool()
            create_vol(vol_params)
            check_vol(vol_params)
        elif rbd_blockcopy:
            # Create one disk to attach to VM as second disk device
            second_disk_params = {}
            disk_size = params.get("virt_disk_device_size", "50M")
            device_source = libvirt.create_local_disk("file",
                                                      img_file,
                                                      disk_size,
                                                      disk_format="qcow2")
            second_disk_params.update({"source_file": device_source})
            second_disk_params.update({"driver_type": "qcow2"})
            second_xml_file = libvirt.create_disk_xml(second_disk_params)
            opts = params.get("attach_option", "--config")
            ret = virsh.attach_device(vm_name,
                                      second_xml_file,
                                      flagstr=opts,
                                      debug=True)
            libvirt.check_result(ret)
        else:
            # Create an local image and make FS on it.
            disk_cmd = ("qemu-img create -f %s %s 10M && mkfs.ext4 -F %s" %
                        (disk_format, img_file, img_file))
            process.run(disk_cmd, ignore_status=False, shell=True)
            # Convert the image to remote storage
            disk_cmd = (
                "rbd -m %s %s info %s 2> /dev/null|| qemu-img convert -O"
                " %s %s %s" % (mon_host, key_opt, disk_src_name, disk_format,
                               img_file, disk_path))
            process.run(disk_cmd, ignore_status=False, shell=True)
            # Create disk snapshot if needed.
            if create_snapshot:
                snap_cmd = ("rbd -m %s %s snap create %s@%s" %
                            (mon_host, key_opt, disk_src_name, snap_name))
                process.run(snap_cmd, ignore_status=False, shell=True)
            if test_json_pseudo_protocol:
                # After block-dev introduced, qemu-img: warning: RBD options encoded in the filename as keyvalue pairs is deprecated
                if libvirt_version.version_compare(6, 0, 0):
                    test.cancel(
                        "qemu-img: warning: RBD options encoded in the filename as keyvalue pairs in json format is deprecated"
                    )
                # Create one frontend image with the rbd backing file.
                json_str = ('json:{"file.driver":"rbd",'
                            '"file.filename":"rbd:%s:mon_host=%s"}' %
                            (disk_src_name, mon_host))
                # pass different json string according to the auth config
                if auth_user and auth_key:
                    json_str = ('%s:id=%s:key=%s"}' %
                                (json_str[:-2], auth_user, auth_key))
                disk_cmd = ("qemu-img create -f qcow2 -b '%s' %s" %
                            (json_str, front_end_img_file))
                disk_path = front_end_img_file
                process.run(disk_cmd, ignore_status=False, shell=True)
        # If hot plug, start VM first, and then wait the OS boot.
        # Otherwise stop VM if running.
        if start_vm:
            if vm.is_dead():
                vm.start()
            vm.wait_for_login().close()
        else:
            if not vm.is_dead():
                vm.destroy()
        if attach_device:
            if create_volume:
                params.update({"source_pool": pool_name})
                params.update({"type_name": "volume"})
                # No need auth options for volume
                if "auth_user" in params:
                    params.pop("auth_user")
                if "auth_type" in params:
                    params.pop("auth_type")
                if "secret_type" in params:
                    params.pop("secret_type")
                if "secret_uuid" in params:
                    params.pop("secret_uuid")
                if "secret_usage" in params:
                    params.pop("secret_usage")
            # After 3.9.0,the auth element can be place in source part.
            if auth_place_in_source:
                params.update({"auth_in_source": auth_place_in_source})
            xml_file = libvirt.create_disk_xml(params)
            if additional_guest:
                # Copy xml_file for additional guest VM.
                shutil.copyfile(xml_file, additional_xml_file)
            opts = params.get("attach_option", "")
            ret = virsh.attach_device(vm_name,
                                      xml_file,
                                      flagstr=opts,
                                      debug=True)
            libvirt.check_result(ret, skip_if=unsupported_err)
            if additional_guest:
                # Make sure the additional VM is running
                if additional_vm.is_dead():
                    additional_vm.start()
                    additional_vm.wait_for_login().close()
                ret = virsh.attach_device(guest_name,
                                          additional_xml_file,
                                          "",
                                          debug=True)
                libvirt.check_result(ret, skip_if=unsupported_err)
        elif attach_disk:
            opts = params.get("attach_option", "")
            ret = virsh.attach_disk(vm_name, disk_path, targetdev, opts)
            libvirt.check_result(ret, skip_if=unsupported_err)
        elif test_disk_readonly:
            params.update({'readonly': "yes"})
            xml_file = libvirt.create_disk_xml(params)
            opts = params.get("attach_option", "")
            ret = virsh.attach_device(vm_name,
                                      xml_file,
                                      flagstr=opts,
                                      debug=True)
            libvirt.check_result(ret, skip_if=unsupported_err)
        elif test_disk_internal_snapshot:
            xml_file = libvirt.create_disk_xml(params)
            opts = params.get("attach_option", "")
            ret = virsh.attach_device(vm_name,
                                      xml_file,
                                      flagstr=opts,
                                      debug=True)
            libvirt.check_result(ret, skip_if=unsupported_err)
        elif disk_snapshot_with_sanlock:
            if vm.is_dead():
                vm.start()
            snapshot_path = make_snapshot()
            if vm.is_alive():
                vm.destroy()
        elif rbd_blockcopy:
            if enable_slice:
                disk_cmd = ("rbd -m %s %s create %s --size 400M 2> /dev/null" %
                            (mon_host, key_opt, disk_src_name))
                process.run(disk_cmd, ignore_status=False, shell=True)
                slice_dict = {
                    "slice_type": "storage",
                    "slice_offset": "12345",
                    "slice_size": "52428800"
                }
                params.update({"disk_slice": slice_dict})
                logging.debug(
                    'create one volume on ceph backend storage for slice testing'
                )
            # Create one file on VM before doing blockcopy
            try:
                session = vm.wait_for_login()
                cmd = (
                    "mkfs.ext4 -F /dev/{0} && mount /dev/{0} /mnt && ls /mnt && (sleep 3;"
                    " touch /mnt/rbd_blockcopyfile; umount /mnt)".format(
                        targetdev))
                s, o = session.cmd_status_output(cmd, timeout=60)
                session.close()
                logging.info(
                    "touch one file in new added disk in VM:\n, %s, %s", s, o)
            except (remote.LoginError, virt_vm.VMError,
                    aexpect.ShellError) as e:
                logging.error(str(e))
            # Create rbd backend xml
            rbd_blockcopy_xml_file = libvirt.create_disk_xml(params)
            logging.debug("The rbd blockcopy xml is: %s" %
                          rbd_blockcopy_xml_file)
            dest_path = " --xml %s" % rbd_blockcopy_xml_file
            options1 = params.get("rbd_pivot_option",
                                  " --wait --verbose --transient-job --pivot")
            extra_dict = {'debug': True}
            cmd_result = virsh.blockcopy(vm_name, targetdev, dest_path,
                                         options1, **extra_dict)
            libvirt.check_exit_status(cmd_result)
        elif not create_volume:
            libvirt.set_vm_disk(vm, params)
        if test_blockcopy:
            logging.info("Creating %s...", vm_name)
            vmxml_for_test = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
            if vm.is_alive():
                vm.destroy(gracefully=False)
            vm.undefine()
            if virsh.create(vmxml_for_test.xml, **virsh_dargs).exit_status:
                vmxml_backup.define()
                test.fail("Can't create the domain")
        elif vm.is_dead():
            vm.start()
        # Wait for vm is running
        vm.wait_for_login(timeout=600).close()
        if additional_guest:
            if additional_vm.is_dead():
                additional_vm.start()

        # After block-dev introduced in libvirt 6.0.0 afterwards, file=rbd:* format information is not provided from qemu output
        if libvirt_version.version_compare(6, 0, 0):
            test_qemu_cmd = False

        # Check qemu command line
        if test_qemu_cmd:
            check_qemu_cmd()
        # Check partitions in vm
        if test_vm_parts:
            if not check_in_vm(
                    vm, targetdev, old_parts, read_only=create_snapshot):
                test.fail("Failed to check vm partitions")
            if additional_guest:
                if not check_in_vm(additional_vm, targetdev, old_parts):
                    test.fail("Failed to check vm partitions")
        # Save and restore operation
        if test_save_restore:
            check_save_restore()
        if test_snapshot:
            snap_option = params.get("snapshot_option", "")
            check_snapshot(snap_option)
        if test_blockcopy:
            check_blockcopy(targetdev)
        if test_disk_readonly and not libvirt_version.version_compare(6, 0, 0):
            snap_option = params.get("snapshot_option", "")
            check_snapshot(snap_option, 'vdb')
        if test_disk_internal_snapshot:
            snap_option = params.get("snapshot_option", "")
            check_snapshot(snap_option, targetdev)
        if test_disk_external_snapshot:
            snap_option = params.get("snapshot_option", "")
            check_snapshot(snap_option, targetdev)
        # Check rbd blockcopy inside VM
        if rbd_blockcopy:
            try:
                session = vm.wait_for_login()
                cmd = (
                    "mount /dev/{0} /mnt && ls /mnt/rbd_blockcopyfile && (sleep 3;"
                    " umount /mnt)".format(targetdev))
                s, o = session.cmd_status_output(cmd, timeout=60)
                session.close()
                logging.info(
                    "list one file in new rbd backend disk in VM:\n, %s, %s",
                    s, o)
            except (remote.LoginError, virt_vm.VMError,
                    aexpect.ShellError) as e:
                logging.error(str(e))
            debug_vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)

            def _check_slice_in_xml():
                """
                Check slice attribute in disk xml.
                """
                debug_vmxml = virsh.dumpxml(vm_name, "",
                                            debug=True).stdout.strip()
                if 'slices' in debug_vmxml:
                    return True
                else:
                    return False

            if enable_slice:
                if not _check_slice_in_xml():
                    test.fail("Failed to find slice attribute in VM xml")
        # Detach the device.
        if attach_device:
            xml_file = libvirt.create_disk_xml(params)
            ret = virsh.detach_device(vm_name, xml_file, wait_for_event=True)
            libvirt.check_exit_status(ret)
            if additional_guest:
                ret = virsh.detach_device(guest_name,
                                          xml_file,
                                          wait_for_event=True)
                libvirt.check_exit_status(ret)
        elif attach_disk:
            ret = virsh.detach_disk(vm_name, targetdev, wait_remove_event=True)
            libvirt.check_exit_status(ret)

        # Check disk in vm after detachment.
        if attach_device or attach_disk:
            session = vm.wait_for_login()
            new_parts = utils_disk.get_parts_list(session)
            if len(new_parts) != len(old_parts):
                test.fail("Disk still exists in vm" " after detachment")
            session.close()

    except virt_vm.VMStartError as details:
        for msg in unsupported_err:
            if msg in str(details):
                test.cancel(str(details))
        else:
            test.fail("VM failed to start." "Error: %s" % str(details))
    finally:
        # Remove ceph configure file if created.
        if ceph_cfg:
            os.remove(ceph_cfg)
        # Delete snapshots.
        snapshot_lists = virsh.snapshot_list(vm_name)
        if len(snapshot_lists) > 0:
            libvirt.clean_up_snapshots(vm_name, snapshot_lists)
            for snap in snapshot_lists:
                virsh.snapshot_delete(vm_name, snap, "--metadata")

        # Recover VM.
        if vm.is_alive():
            vm.destroy(gracefully=False)
        if additional_guest:
            virsh.remove_domain(guest_name,
                                "--remove-all-storage",
                                ignore_stauts=True)
        # Remove the snapshot.
        if create_snapshot:
            cmd = ("rbd -m {0} {1} info {2} && rbd -m {0} {1} snap"
                   " purge {2} && rbd -m {0} {1} rm {2}"
                   "".format(mon_host, key_opt, disk_src_name))
            process.run(cmd, ignore_status=True, shell=True)
        elif create_volume:
            cmd = ("rbd -m {0} {1} info {2} && rbd -m {0} {1} rm {2}"
                   "".format(mon_host, key_opt,
                             os.path.join(disk_src_pool, cloned_vol_name)))
            process.run(cmd, ignore_status=True, shell=True)
            cmd = ("rbd -m {0} {1} info {2} && rbd -m {0} {1} rm {2}"
                   "".format(
                       mon_host, key_opt,
                       os.path.join(disk_src_pool, create_from_cloned_volume)))
            process.run(cmd, ignore_status=True, shell=True)
            clean_up_volume_snapshots()
        else:
            cmd = ("rbd -m {0} {1} info {2} && rbd -m {0} {1} rm {2}"
                   "".format(mon_host, key_opt, disk_src_name))
            process.run(cmd, ignore_status=True, shell=True)

        # Delete tmp files.
        if os.path.exists(key_file):
            os.remove(key_file)
        if os.path.exists(img_file):
            os.remove(img_file)
        # Clean up volume, pool
        if vol_name and vol_name in str(virsh.vol_list(pool_name).stdout):
            virsh.vol_delete(vol_name, pool_name)
        if pool_name and pool_name in virsh.pool_state_dict():
            virsh.pool_destroy(pool_name, **virsh_dargs)
            virsh.pool_undefine(pool_name, **virsh_dargs)

        # Clean up secret
        secret_list = get_secret_list()
        if secret_list:
            for secret_uuid in secret_list:
                virsh.secret_undefine(secret_uuid)

        logging.info("Restoring vm...")
        vmxml_backup.sync()

        if disk_snapshot_with_sanlock:
            # Restore virt_use_sanlock setting.
            process.run("setsebool -P virt_use_sanlock 0", shell=True)
            # Restore qemu config
            qemu_config.restore()
            utils_libvirtd.Libvirtd().restart()
            # Force shutdown sanlock service.
            process.run("sanlock client shutdown -f 1", shell=True)
            # Clean up lockspace folder
            process.run("rm -rf  /var/lib/libvirt/sanlock/*", shell=True)
            if snapshot_path is not None:
                for snapshot in snapshot_path:
                    if os.path.exists(snapshot):
                        os.remove(snapshot)
Exemplo n.º 33
0
def run(test, params, env):
    """
    Test memory management of nvdimm
    """
    vm_name = params.get('main_vm')

    check = params.get('check', '')
    qemu_checks = params.get('qemu_checks', '')

    def mount_hugepages(page_size):
        """
        To mount hugepages

        :param page_size: unit is kB, it can be 4,2048,1048576,etc
        """
        if page_size == 4:
            perm = ""
        else:
            perm = "pagesize=%dK" % page_size

        tlbfs_status = utils_misc.is_mounted("hugetlbfs", "/dev/hugepages",
                                             "hugetlbfs")
        if tlbfs_status:
            utils_misc.umount("hugetlbfs", "/dev/hugepages", "hugetlbfs")
        utils_misc.mount("hugetlbfs", "/dev/hugepages", "hugetlbfs", perm)

    def setup_hugepages(page_size=2048, shp_num=4000):
        """
        To setup hugepages

        :param page_size: unit is kB, it can be 4,2048,1048576,etc
        :param shp_num: number of hugepage, string type
        """
        mount_hugepages(page_size)
        utils_memory.set_num_huge_pages(shp_num)
        config.hugetlbfs_mount = ["/dev/hugepages"]
        utils_libvirtd.libvirtd_restart()

    def restore_hugepages(page_size=4):
        """
        To recover hugepages
        :param page_size: unit is kB, it can be 4,2048,1048576,etc
        """
        mount_hugepages(page_size)
        config.restore()
        utils_libvirtd.libvirtd_restart()

    def create_mbxml():
        """
        Create memoryBacking xml for test
        """
        mb_params = {k: v for k, v in params.items() if k.startswith('mbxml_')}
        logging.debug(mb_params)
        mb_xml = vm_xml.VMMemBackingXML()
        mb_xml.xml = "<memoryBacking></memoryBacking>"
        for attr_key in mb_params:
            val = mb_params[attr_key]
            logging.debug('Set mb params')
            setattr(mb_xml, attr_key.replace('mbxml_', ''),
                    eval(val) if ':' in val else val)
        logging.debug(mb_xml)
        return mb_xml.copy()

    def create_cpuxml():
        """
        Create cpu xml for test
        """
        cpu_params = {
            k: v
            for k, v in params.items() if k.startswith('cpuxml_')
        }
        logging.debug(cpu_params)
        cpu_xml = vm_xml.VMCPUXML()
        cpu_xml.xml = "<cpu><numa/></cpu>"
        if 'cpuxml_numa_cell' in cpu_params:
            cpu_params['cpuxml_numa_cell'] = cpu_xml.dicts_to_cells(
                eval(cpu_params['cpuxml_numa_cell']))
        for attr_key in cpu_params:
            val = cpu_params[attr_key]
            logging.debug('Set cpu params')
            setattr(cpu_xml, attr_key.replace('cpuxml_', ''),
                    eval(val) if ':' in val else val)
        logging.debug(cpu_xml)
        return cpu_xml.copy()

    def create_dimm_xml(**mem_param):
        """
        Create xml of dimm memory device
        """
        mem_xml = utils_hotplug.create_mem_xml(
            pg_size=int(mem_param['source_pagesize']),
            tg_size=mem_param['target_size'],
            tg_sizeunit=mem_param['target_size_unit'],
            tg_node=mem_param['target_node'],
            mem_model="dimm")
        logging.debug(mem_xml)
        return mem_xml.copy()

    huge_pages = [
        ast.literal_eval(x) for x in params.get("huge_pages", "").split()
    ]

    config = utils_config.LibvirtQemuConfig()
    page_size = params.get("page_size")
    discard = params.get("discard")
    setup_hugepages(int(page_size))

    bkxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)

    try:
        vm = env.get_vm(vm_name)
        vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)

        # Set cpu according to params
        cpu_xml = create_cpuxml()
        vmxml.cpu = cpu_xml

        # Set memoryBacking according to params
        mb_xml = create_mbxml()
        vmxml.mb = mb_xml

        # Update other vcpu, memory info according to params
        update_vm_args = {
            k: params[k]
            for k in params if k.startswith('setvm_')
        }
        logging.debug(update_vm_args)
        for key, value in list(update_vm_args.items()):
            attr = key.replace('setvm_', '')
            logging.debug('Set %s = %s', attr, value)
            setattr(vmxml, attr, int(value) if value.isdigit() else value)
        vmxml.sync()
        logging.debug(virsh.dumpxml(vm_name))

        # hugepages setting
        if huge_pages:
            membacking = vm_xml.VMMemBackingXML()
            hugepages = vm_xml.VMHugepagesXML()
            pagexml_list = []
            for i in range(len(huge_pages)):
                pagexml = hugepages.PageXML()
                pagexml.update(huge_pages[i])
                pagexml_list.append(pagexml)
            hugepages.pages = pagexml_list
            membacking.hugepages = hugepages
            vmxml.mb = membacking
            logging.debug(virsh.dumpxml(vm_name))

        if check == "mem_dev" or check == "hot_plug":
            # Add  dimm mem device to vm xml
            dimm_params = {
                k.replace('dimmxml_', ''): v
                for k, v in params.items() if k.startswith('dimmxml_')
            }
            dimm_xml = create_dimm_xml(**dimm_params)
            if params.get('dimmxml_mem_access'):
                dimm_xml.mem_access = dimm_params['mem_access']
            vmxml.add_device(dimm_xml)
            logging.debug(virsh.dumpxml(vm_name))

        test_vm = env.get_vm(vm_name)
        vmxml.sync()
        if test_vm.is_alive():
            test_vm.destroy()

        virsh.start(vm_name, debug=True, ignore_status=False)
        test_vm.wait_for_login()

        if check == 'numa_cell' or check == 'mem_dev':
            # Check qemu command line one by one
            logging.debug("enter check")
            if discard == 'yes':
                libvirt.check_qemu_cmd_line(qemu_checks)
            elif libvirt.check_qemu_cmd_line(qemu_checks, True):
                test.fail("The unexpected [%s] exist in qemu cmd" %
                          qemu_checks)

        if check == 'hot_plug':
            # Add dimm device to vm xml
            dimm_params2 = {
                k.replace('dimmxml2_', ''): v
                for k, v in params.items() if k.startswith('dimmxml2_')
            }
            dimm_xml2 = create_dimm_xml(**dimm_params2)
            if params.get('dimmxml2_mem_access'):
                dimm_xml2.mem_access = dimm_params2['mem_access']

            result = virsh.attach_device(vm_name, dimm_xml2.xml, debug=True)

            ori_devices = vm_xml.VMXML.new_from_dumpxml(vm_name).get_devices(
                'memory')
            logging.debug('Starts with %d memory devices', len(ori_devices))

            result = virsh.attach_device(vm_name, dimm_xml2.xml, debug=True)
            libvirt.check_exit_status(result)

            # After attach, there should be a memory device added
            devices_after_attach = vm_xml.VMXML.new_from_dumpxml(
                vm_name).get_devices('memory')
            logging.debug('After detach, vm has %d memory devices',
                          len(devices_after_attach))

            if len(ori_devices) != len(devices_after_attach) - 1:
                test.fail(
                    'Number of memory devices after attach is %d, should be %d'
                    % (len(devices_after_attach), len(ori_devices) + 1))

            alive_vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
            dimm_detach = alive_vmxml.get_devices('memory')[-1]
            logging.debug(dimm_detach)

            # Hot-unplug dimm device
            result = virsh.detach_device(vm_name, dimm_detach.xml, debug=True)
            libvirt.check_exit_status(result)

            left_devices = vm_xml.VMXML.new_from_dumpxml(vm_name).get_devices(
                'memory')
            logging.debug(left_devices)

            if len(left_devices) != len(ori_devices):
                time.sleep(60)
                test.fail(
                    'Number of memory devices after detach is %d, should be %d'
                    % (len(left_devices), len(ori_devices)))

    except virt_vm.VMStartError as e:
        test.fail("VM failed to start." "Error: %s" % str(e))
    finally:
        if vm.is_alive():
            vm.destroy(gracefully=False)
        bkxml.sync()
Exemplo n.º 34
0
def run(test, params, env):
    """
    1. prepare a vHBA
    2. find the nodedev's lun name
    3. prepare the lun dev's xml
    4. start vm
    5. attach disk xml to vm
    6. login vm and check the disk
    7. detach the virtual disk
    8. check the blkdev gone
    9. cleanup env.
    """
    vm_name = params.get("main_vm", "avocado-vt-vm1")
    wwpn = params.get("wwpn", "WWPN_EXAMPLE")
    wwnn = params.get("wwnn", "WWNN_EXAMPLE")
    disk_device = params.get("disk_device", "disk")
    device_type = params.get("device_type", "file")
    device_target = params.get("device_target", "vdb")
    lun_dir_method = params.get("lun_dir_method", "by-path")
    driver_name = params.get("driver_name", "qemu")
    driver_type = params.get("driver_type", "raw")
    target_bus = params.get("target_bus", "virtio")
    readonly = params.get("readonly", "no")
    new_vhbas = []
    blk_dev = ""
    lun_dev = ""
    lun_dev_path = ""
    lun_sl = []
    new_disk = ""
    vm = env.get_vm(vm_name)
    try:
        vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
        vmxml_backup = vmxml.copy()
        old_disk_count = vmxml.get_disk_count(vm_name)
        # Prepare vHBA
        online_hbas = utils_npiv.find_hbas("hba")
        old_vhbas = utils_npiv.find_hbas("vhba")
        if not online_hbas:
            raise exceptions.TestSkipError("Host doesn't have online hba!")
        first_online_hba = online_hbas[0]
        new_vhba = utils_npiv.nodedev_create_from_xml({
            "nodedev_parent": first_online_hba,
            "scsi_wwnn": wwnn,
            "scsi_wwpn": wwpn
        })
        utils_misc.wait_for(lambda: utils_npiv.is_vhbas_added(old_vhbas),
                            timeout=_TIMEOUT)
        if not utils_npiv.is_vhbas_added(old_vhbas):
            raise exceptions.TestFail("vHBA is not successfully created.")
        new_vhbas.append(new_vhba)
        new_vhba_scsibus = re.sub("\D", "", new_vhba)
        # Get the new block device generated by the new vHBA
        utils_misc.wait_for(lambda: get_blks_by_scsi(new_vhba_scsibus),
                            timeout=_TIMEOUT)
        blk_devs = get_blks_by_scsi(new_vhba_scsibus)
        if not blk_devs:
            raise exceptions.TestFail("block device not found with scsi_%s",
                                      new_vhba_scsibus)
        first_blk_dev = blk_devs[0]
        # Get the symbolic link of the device in /dev/disk/by-[path|uuid|id]
        logging.debug("first_blk_dev = %s, lun_dir_method = %s" %
                      (first_blk_dev, lun_dir_method))
        utils_misc.wait_for(
            lambda: get_symbols_by_blk(first_blk_dev, lun_dir_method),
            timeout=_TIMEOUT)
        lun_sl = get_symbols_by_blk(first_blk_dev, lun_dir_method)
        if not lun_sl:
            raise exceptions.TestFail("lun symbolic links not found under "
                                      "/dev/disk/%s/ for block device %s." %
                                      (lun_dir_method, blk_dev))
        lun_dev = lun_sl[0]
        lun_dev_path = "/dev/disk/" + lun_dir_method + "/" + lun_dev
        # Prepare xml of virtual disk
        disk_params = {
            'type_name': device_type,
            'device': disk_device,
            'driver_name': driver_name,
            'driver_type': driver_type,
            'source_file': lun_dev_path,
            'target_dev': device_target,
            'target_bus': target_bus,
            'readonly': readonly
        }
        disk_xml = os.path.join(data_dir.get_tmp_dir(), 'disk_xml.xml')
        lun_disk_xml = utlv.create_disk_xml(disk_params)
        copyfile(lun_disk_xml, disk_xml)
        if not vm.is_alive():
            vm.start()
        session = vm.wait_for_login()
        libvirt_vm = lib_vm.VM(vm_name, vm.params, vm.root_dir,
                               vm.address_cache)
        old_disks = libvirt_vm.get_disks()
        # Attach disk
        dev_attach_status = virsh.attach_device(vm_name, disk_xml, debug=True)
        utlv.check_exit_status(dev_attach_status)

        cur_disk_count = vmxml.get_disk_count(vm_name)
        cur_disks = libvirt_vm.get_disks()
        if cur_disk_count <= old_disk_count:
            raise exceptions.TestFail("Failed to attach disk: %s" %
                                      lun_disk_xml)
        new_disk = "".join(list(set(old_disks) ^ set(cur_disks)))
        logging.debug("Attached device in vm:%s", new_disk)
        # Check disk in VM
        output = session.cmd_status_output('mkfs.ext4 -F %s' % new_disk)
        logging.debug("mkfs.ext4 the disk in vm, result: %s", output[1])
        if not check_vm_disk(session, new_disk, readonly):
            raise exceptions.TestFail("Failed check the disk in vm.")
        session.cmd_status_output('umount %s' % new_disk)
        # Detach disk
        dev_detach_status = virsh.detach_device(vm_name, disk_xml, debug=True)
        utlv.check_exit_status(dev_detach_status)
        cur_disks = libvirt_vm.get_disks()
        if cur_disks != old_disks:
            raise exceptions.TestFail("Detach disk failed.")
        session.close()

    finally:
        utils_npiv.vhbas_cleanup(new_vhbas)
        if vm.is_alive():
            vm.destroy(gracefully=False)
        vmxml_backup.sync()
        process.system('service multipathd restart', verbose=True)
Exemplo n.º 35
0
    def trigger_events(dom, events_list=[]):
        """
        Trigger various events in events_list

        :param dom: the vm objects corresponding to the domain
        :return: the expected output that virsh event command prints out
        """
        expected_events_list = []
        save_path = os.path.join(tmpdir, "%s_event.save" % dom.name)
        print(dom.name)
        xmlfile = dom.backup_xml()

        try:
            for event in events_list:
                if event in [
                        'start', 'restore', 'create', 'edit', 'define',
                        'undefine', 'crash'
                ]:
                    if dom.is_alive():
                        dom.destroy()
                        if event in ['create', 'define']:
                            dom.undefine()
                else:
                    if not dom.is_alive():
                        dom.start()
                        dom.wait_for_login().close()
                        if event == "resume":
                            dom.pause()

                if event == "undefine":
                    virsh.undefine(dom.name, **virsh_dargs)
                    expected_events_list.append("'lifecycle' for %s:"
                                                " Undefined Removed")
                elif event == "create":
                    virsh.create(xmlfile, **virsh_dargs)
                    expected_events_list.append("'lifecycle' for %s:"
                                                " Resumed Unpaused")
                    expected_events_list.append("'lifecycle' for %s:"
                                                " Started Booted")
                elif event == "destroy":
                    virsh.destroy(dom.name, **virsh_dargs)
                    expected_events_list.append("'lifecycle' for %s:"
                                                " Stopped Destroyed")
                elif event == "define":
                    virsh.define(xmlfile, **virsh_dargs)
                    expected_events_list.append("'lifecycle' for %s:"
                                                " Defined Added")
                elif event == "start":
                    virsh.start(dom.name, **virsh_dargs)
                    expected_events_list.append("'lifecycle' for %s:"
                                                " Resumed Unpaused")
                    expected_events_list.append("'lifecycle' for %s:"
                                                " Started Booted")
                    dom.wait_for_login().close()
                elif event == "suspend":
                    virsh.suspend(dom.name, **virsh_dargs)
                    expected_events_list.append("'lifecycle' for %s:"
                                                " Suspended Paused")
                    if not libvirt_version.version_compare(5, 3, 0):
                        expected_events_list.append("'lifecycle' for %s:"
                                                    " Suspended Paused")
                elif event == "resume":
                    virsh.resume(dom.name, **virsh_dargs)
                    expected_events_list.append("'lifecycle' for %s:"
                                                " Resumed Unpaused")
                elif event == "save":
                    virsh.save(dom.name, save_path, **virsh_dargs)
                    expected_events_list.append("'lifecycle' for %s:"
                                                " Suspended Paused")
                    expected_events_list.append("'lifecycle' for %s:"
                                                " Stopped Saved")
                elif event == "restore":
                    if not os.path.exists(save_path):
                        logging.error("%s not exist", save_path)
                    else:
                        virsh.restore(save_path, **virsh_dargs)
                        expected_events_list.append("'lifecycle' for %s:"
                                                    " Started Restored")
                        expected_events_list.append("'lifecycle' for %s:"
                                                    " Resumed Snapshot")
                elif event == "edit":
                    #Check whether 'description' element exists.
                    domxml = virsh.dumpxml(dom.name).stdout.strip()
                    find_desc = parseString(domxml).getElementsByTagName(
                        "description")
                    if find_desc == []:
                        #If not exists, add one for it.
                        logging.info("Adding <description> to guest")
                        virsh.desc(dom.name, "--config",
                                   "Added desc for testvm", **virsh_dargs)
                    #The edit operation is to delete 'description' element.
                    edit_cmd = [r":g/<description.*<\/description>/d"]
                    utlv.exec_virsh_edit(dom.name, edit_cmd)
                    expected_events_list.append("'lifecycle' for %s:"
                                                " Defined Updated")
                elif event == "shutdown":
                    if signal_name is None:
                        virsh.shutdown(dom.name, **virsh_dargs)
                        # Wait a few seconds for shutdown finish
                        time.sleep(3)
                        if utils_misc.compare_qemu_version(2, 9, 0):
                            #Shutdown reason distinguished from qemu_2.9.0-9
                            expected_events_list.append(
                                "'lifecycle' for %s:"
                                " Shutdown Finished after guest request")
                    else:
                        os.kill(dom.get_pid(), getattr(signal, signal_name))
                        if utils_misc.compare_qemu_version(2, 9, 0):
                            expected_events_list.append(
                                "'lifecycle' for %s:"
                                " Shutdown Finished after host request")
                    if not utils_misc.compare_qemu_version(2, 9, 0):
                        expected_events_list.append("'lifecycle' for %s:"
                                                    " Shutdown Finished")
                    wait_for_shutoff(dom)
                    expected_events_list.append("'lifecycle' for %s:"
                                                " Stopped Shutdown")
                elif event == "crash":
                    if not vmxml.xmltreefile.find('devices').findall('panic'):
                        # Set panic device
                        panic_dev = Panic()
                        panic_dev.model = panic_model
                        panic_dev.addr_type = addr_type
                        panic_dev.addr_iobase = addr_iobase
                        vmxml.add_device(panic_dev)
                    vmxml.on_crash = "coredump-restart"
                    vmxml.sync()
                    logging.info("Guest xml now is: %s", vmxml)
                    dom.start()
                    session = dom.wait_for_login()
                    # Stop kdump in the guest
                    session.cmd("systemctl stop kdump", ignore_all_errors=True)
                    # Enable sysRq
                    session.cmd("echo 1 > /proc/sys/kernel/sysrq")
                    try:
                        # Crash the guest
                        session.cmd("echo c > /proc/sysrq-trigger", timeout=60)
                    except (ShellTimeoutError,
                            ShellProcessTerminatedError) as details:
                        logging.info(details)
                    session.close()
                    expected_events_list.append("'lifecycle' for %s:"
                                                " Crashed Panicked")
                    expected_events_list.append("'lifecycle' for %s:"
                                                " Resumed Unpaused")
                elif event == "reset":
                    virsh.reset(dom.name, **virsh_dargs)
                    expected_events_list.append("'reboot' for %s")
                elif event == "vcpupin":
                    virsh.vcpupin(dom.name, '0', '0', **virsh_dargs)
                    expected_events_list.append("'tunable' for %s:"
                                                "\n\tcputune.vcpupin0: 0")
                elif event == "emulatorpin":
                    virsh.emulatorpin(dom.name, '0', **virsh_dargs)
                    expected_events_list.append("'tunable' for %s:"
                                                "\n\tcputune.emulatorpin: 0")
                elif event == "setmem":
                    mem_size = int(params.get("mem_size", 512000))
                    virsh.setmem(dom.name, mem_size, **virsh_dargs)
                    expected_events_list.append("'balloon-change' for %s:")
                elif event == "device-added-removed":
                    add_disk(dom.name, new_disk, 'vdb', '')
                    expected_events_list.append("'device-added' for %s:"
                                                " virtio-disk1")
                    virsh.detach_disk(dom.name, 'vdb', **virsh_dargs)
                    expected_events_list.append("'device-removed' for %s:"
                                                " virtio-disk1")
                    iface_xml_obj = create_iface_xml()
                    iface_xml_obj.xmltreefile.write()
                    virsh.detach_device(dom.name, iface_xml_obj.xml,
                                        **virsh_dargs)
                    expected_events_list.append("'device-removed' for %s:"
                                                " net0")
                    time.sleep(2)
                    virsh.attach_device(dom.name, iface_xml_obj.xml,
                                        **virsh_dargs)
                    expected_events_list.append("'device-added' for %s:"
                                                " net0")
                elif event == "change-media":
                    target_device = "hdc"
                    device_target_bus = params.get("device_target_bus", "ide")
                    disk_blk = vm_xml.VMXML.get_disk_blk(dom.name)
                    logging.info("disk_blk %s", disk_blk)
                    if target_device not in disk_blk:
                        logging.info("Adding cdrom to guest")
                        if dom.is_alive():
                            dom.destroy()
                        add_disk(
                            dom.name, "''", target_device,
                            ("--type cdrom --sourcetype file --driver qemu " +
                             "--config --targetbus %s" % device_target_bus))
                        dom.start()
                    all_options = new_disk + " --insert"
                    virsh.change_media(dom.name, target_device, all_options,
                                       **virsh_dargs)
                    expected_events_list.append("'tray-change' for %s disk" +
                                                " .*%s.*:" %
                                                device_target_bus + " opened")
                    expected_events_list.append("'tray-change' for %s disk" +
                                                " .*%s.*:" %
                                                device_target_bus + " closed")
                    all_options = new_disk + " --eject"
                    virsh.change_media(dom.name, target_device, all_options,
                                       **virsh_dargs)
                    expected_events_list.append("'tray-change' for %s disk" +
                                                " .*%s.*:" %
                                                device_target_bus + " opened")

                else:
                    test.error("Unsupported event: %s" % event)
                # Event may not received immediately
                time.sleep(3)
        finally:
            if os.path.exists(save_path):
                os.unlink(save_path)
            if os.path.exists(new_disk):
                os.unlink(new_disk)
        return [(dom.name, event) for event in expected_events_list]
Exemplo n.º 36
0
def run(test, params, env):
    """
    Test hosted scsi device passthrough
    """
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    virsh_dargs = {'debug': True, 'ignore_status': True}

    def prepare_hostdev_xml(**kwargs):
        """
        Prepare the scsi device's xml

        :param kwargs: The arguments to generate scsi host device xml.
        :return: The xml of the scsi host device.
        """
        hostdev_xml = hostdev.Hostdev()
        hostdev_xml.type = "scsi"
        if kwargs.get("managed"):
            hostdev_xml.managed = kwargs.get("managed")
        hostdev_xml.mode = kwargs.get("mode", "subsystem")
        if kwargs.get("sgio"):
            hostdev_xml.sgio = kwargs.get("sgio")
        if kwargs.get("rawio"):
            hostdev_xml.rawio = kwargs.get("rawio")
        hostdev_xml.readonly = "yes" == kwargs.get("readonly")
        hostdev_xml.shareable = "yes" == kwargs.get("shareable")

        source_args = {}
        source_protocol = kwargs.get("source_protocol")
        if source_protocol == "iscsi":
            # Use iscsi lun directly
            source_args['protocol'] = "iscsi"
            source_args['host_name'] = kwargs.get("iscsi_host", "ISCSI_HOST")
            source_args['host_port'] = kwargs.get("iscsi_port", "ISCSI_PORT")
            source_args['source_name'] = kwargs.get("iqn_name", "IQN_NAME")
            source_args['auth_user'] = kwargs.get("auth_user")
            source_args['secret_type'] = kwargs.get("secret_type")
            source_args['secret_uuid'] = kwargs.get("secret_uuid")
            source_args['secret_usage'] = kwargs.get("secret_usage")
            source_args['iqn_id'] = kwargs.get("iqn_id")
        elif source_protocol:
            test.cancel("We do not support source protocol = %s yet" %
                        source_protocol)
        else:
            # Use local scsi device
            source_args['adapter_name'] = kwargs.get("adapter_name",
                                                     "scsi_host999")
            source_args['bus'] = kwargs.get("addr_bus", "0")
            source_args['target'] = kwargs.get('addr_target', "0")
            source_args['unit'] = kwargs.get('addr_unit', "0")
        # If any attributes not used, remove them from source dict to avoid
        # attr="" or attr="None" situation.
        for key, value in list(source_args.items()):
            if not value:
                source_args.pop(key)
        hostdev_xml.source = hostdev_xml.new_source(**source_args)
        logging.info("hostdev xml is: %s", hostdev_xml)
        return hostdev_xml

    def prepare_iscsi_lun(emulated_img="emulated-iscsi", img_size='1G'):
        """
        Prepare iscsi lun

        :param emulated_img: The name of the iscsi lun device.
        :param img_size: The size of the iscsi lun device.
        :return: The iscsi target and lun number.
        """
        enable_chap_auth = "yes" == params.get("enable_chap_auth")
        if enable_chap_auth:
            chap_user = params.get("chap_user", "redhat")
            chap_passwd = params.get("chap_passwd", "password")
        else:
            chap_user = ""
            chap_passwd = ""
        iscsi_target, lun_num = libvirt.setup_or_cleanup_iscsi(
            is_setup=True,
            is_login=False,
            emulated_image=emulated_img,
            image_size=img_size,
            chap_user=chap_user,
            chap_passwd=chap_passwd,
            portal_ip="127.0.0.1")
        return iscsi_target, lun_num

    def prepare_local_scsi(emulated_img="emulated-iscsi", img_size='1G'):
        """
        Prepare a local scsi device

        :param emulated_img: The name of the iscsi lun device.
        :param img_size: The size of the iscsi lun device.
        :return: The iscsi scsi/bus/target/unit number.
        """
        lun_info = []
        device_source = libvirt.setup_or_cleanup_iscsi(
            is_setup=True,
            is_login=True,
            emulated_image=emulated_img,
            image_size=img_size)
        cmd = "targetcli ls"
        cmd_result = process.run(cmd, shell=True)
        logging.debug("new block device is: %s", device_source)
        cmd = "lsscsi | grep %s | awk '{print $1}'" % device_source
        cmd_result = process.run(cmd, shell=True)
        lun_info = re.findall("\d+", str(cmd_result.stdout.strip()))
        if len(lun_info) != 4:
            test.fail("Get wrong scsi lun info: %s" % lun_info)
        scsi_num = lun_info[0]
        bus_num = lun_info[1]
        target_num = lun_info[2]
        unit_num = lun_info[3]
        return scsi_num, bus_num, target_num, unit_num

    def get_new_disks(vm, old_partitions):
        """
        Get new disks in vm after hostdev plug.

        :param vm: The vm to be checked.
        :param old_partitions: Already existing partitions in vm.
        :return: New disks/partitions in vm, or None if no new disk/partitions.
        """
        try:
            session = vm.wait_for_login()
            if platform.platform().count('ppc64'):
                logging.debug("PPC machine may need a little sleep time "
                              "to see all disks, related owner may need "
                              "further investigation. Skip the sleep for now.")
                #time.sleep(10)
            new_partitions = utils_disk.get_parts_list(session)
            logging.debug("new partitions are: %s", new_partitions)
            added_partitions = list(
                set(new_partitions).difference(set(old_partitions)))
            session.close()
            if not added_partitions:
                logging.debug("No new partitions found in vm.")
            else:
                logging.debug("Newly added partition(s) is: %s",
                              added_partitions)
            return added_partitions
        except (remote.LoginError, virt_vm.VMError, aexpect.ShellError) as err:
            test.fail("Error happens when get new disk: %s" % str(err))

    def get_unpriv_sgio(scsi_dev):
        """
        Get scsi dev's unpriv_sgio value.

        :param scsi_dev: The scsi device to be checked.
        :return: The unpriv_sgio value of the scsi device.
        """
        cmd = "lsscsi -g | grep '\[%s\]'" % scsi_dev
        try:
            output = process.system_output(cmd, verbose=True, shell=True)
            blkdev = output.split()[-2]
            chardev = output.split()[-1]
            blk_stat = os.stat(blkdev)
            sg_stat = os.stat(chardev)
            blkdev_major = os.major(blk_stat.st_rdev)
            blkdev_minor = os.minor(blk_stat.st_rdev)
            chardev_major = os.major(sg_stat.st_rdev)
            chardev_minor = os.minor(sg_stat.st_rdev)
            blkdev_unpriv_path = ("/sys/dev/block/%s:%s/queue/unpriv_sgio" %
                                  (blkdev_major, blkdev_minor))
            chardev_unpriv_path = ("/sys/dev/char/%s:%s/device/unpriv_sgio" %
                                   (chardev_major, chardev_minor))
            # unpriv_sgio feature change in certain kernel,e.g: /sys/dev/char/%s:%s/queue/unpriv_sgio may not exist
            if os.path.exists(blkdev_unpriv_path) is False:
                return
            with open(blkdev_unpriv_path, 'r') as f:
                blkdev_unpriv_value = f.read().strip()
            with open(chardev_unpriv_path, 'r') as f:
                chardev_unpriv_value = f.read().strip()
            logging.debug("blkdev unpriv_sgio:%s\nchardev unpriv_sgio:%s",
                          blkdev_unpriv_value, chardev_unpriv_value)
            if ((not blkdev_unpriv_value or not chardev_unpriv_value)
                    or (blkdev_unpriv_value != chardev_unpriv_value)):
                test.error("unpriv_sgio values are incorrect under block "
                           "and char folders.")
            return blkdev_unpriv_value
        except Exception as detail:
            test.fail(
                "Error happens when try to get the unpriv_sgio value: %s" %
                detail)

    def check_unpriv_sgio(scsi_dev, unpriv_sgio=False, shareable_dev=True):
        """
        Check device's unpriv_sgio value with provided boolean value.

        :param scsi_dev: The scsi device to be checked.
        :param unpriv_sgio: If the expected unpriv_sgio is True or False.
        :param shareable_dev: If the device is a shareable one.
        """
        scsi_unpriv_sgio = get_unpriv_sgio(scsi_dev)
        # On rhel9, previously skip check folder in get_unpriv_sgio(),so here return True directly
        if scsi_unpriv_sgio is None:
            return True
        if shareable_dev:
            # Only when <shareable/> set, the sgio takes effect.
            if ((unpriv_sgio and scsi_unpriv_sgio == '1')
                    or (not unpriv_sgio and scsi_unpriv_sgio == '0')):
                return True
        else:
            if scsi_unpriv_sgio == '0':
                return True
        return False

    def check_disk_io(vm, partition):
        """
        Check if the disk partition in vm can be normally used.

        :param vm: The vm to be checked.
        :param partition: The disk partition in vm to be checked.
        :return: If the disk can be used, return True.
        """
        readonly = "yes" == params.get("readonly")
        readonly_keywords = ['readonly', 'read-only', 'read only']
        try:
            session = vm.wait_for_login()
            cmd = (
                "fdisk -l /dev/{0} && mkfs.ext4 -F /dev/{0} && "
                "mkdir -p {0} && mount /dev/{0} {0} && echo"
                " teststring > {0}/testfile && umount {0}".format(partition))
            status, output = session.cmd_status_output(cmd)
            session.close()
            logging.debug("Disk operation in VM:\nexit code:\n%s\noutput:\n%s",
                          status, output)
            if readonly:
                for ro_kw in readonly_keywords:
                    if ro_kw in str(output).lower():
                        return True
                logging.error("Hostdev set with 'readonly'. "
                              "But still can be operated.")
                return False
            return status == 0
        except (remote.LoginError, virt_vm.VMError, aexpect.ShellError) as err:
            logging.debug("Error happens when check disk io in vm: %s",
                          str(err))
            return False

    def ppc_controller_update(vmxml):
        """
        Update controller of ppc vm to 'virtio-scsi' to support 'scsi' type
        :return:
        """
        device_bus = 'scsi'
        if params.get('machine_type') == 'pseries':
            if not vmxml.get_controllers(device_bus, 'virtio-scsi'):
                vmxml.del_controller(device_bus)
                ppc_controller = Controller('controller')
                ppc_controller.type = device_bus
                ppc_controller.index = '0'
                ppc_controller.model = 'virtio-scsi'
                vmxml.add_device(ppc_controller)
                vmxml.sync()

    coldplug = "cold_plug" == params.get("attach_method")
    hotplug = "hot_plug" == params.get("attach_method")
    status_error = "yes" == params.get("status_error")
    use_iscsi_directly = "iscsi" == params.get("source_protocol")
    sgio = params.get("sgio")
    test_shareable = "yes" == params.get("shareable")
    device_num = int(params.get("device_num", "1"))
    new_disks = []
    new_disk = ""
    attach_options = ""
    iscsi_target = ""
    lun_num = ""
    adapter_name = ""
    addr_scsi = ""
    addr_bus = ""
    addr_target = ""
    addr_unit = ""
    auth_sec_uuid = ""
    hostdev_xmls = []

    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    virsh_dargs = {'debug': True, 'ignore_status': True}
    enable_initiator_set = "yes" == params.get("enable_initiator_set", "no")
    if enable_initiator_set and not libvirt_version.version_compare(6, 7, 0):
        test.cancel(
            "current version doesn't support iscsi initiator hostdev feature")
    try:
        # Load sg module if necessary
        process.run("modprobe sg",
                    shell=True,
                    ignore_status=True,
                    verbose=True)
        # Backup vms' xml
        vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
        vmxml = vmxml_backup.copy()
        ppc_controller_update(vmxml)
        if test_shareable:
            vm_names = params.get("vms").split()
            if len(vm_names) < 2:
                test.error("At least 2 vms should be prepared "
                           "for shareable test.")
            vm2_name = vm_names[1]
            vm2 = env.get_vm(vm2_name)
            vm2_xml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm2_name)
            vm2_xml = vm2_xml_backup.copy()
            ppc_controller_update(vm2_xml)
            if vm2.is_dead():
                vm2.start()
                session = vm2.wait_for_login()
                vm2_old_partitions = utils_disk.get_parts_list(session)
                session.close()

        # Get disk partitions info before hot/cold plug virtual disk
        if vm.is_dead():
            vm.start()
        session = vm.wait_for_login()
        old_partitions = utils_disk.get_parts_list(session)
        session.close()
        for dev_num in range(device_num):
            if use_iscsi_directly:
                iscsi_target, lun_num = prepare_iscsi_lun(emulated_img='img' +
                                                          str(dev_num))
                params['iscsi_host'] = "127.0.0.1"
                params['iscsi_port'] = "3260"
                params['iqn_name'] = iscsi_target + "/" + lun_num
            else:
                addr_scsi, addr_bus, addr_target, addr_unit = prepare_local_scsi(
                    emulated_img='img' + str(dev_num))
                if not params.get('adapter_name') or dev_num >= 1:
                    params['adapter_name'] = "scsi_host" + addr_scsi
                params['addr_bus'] = addr_bus
                params['addr_target'] = addr_target
                params['addr_unit'] = addr_unit
                lsscsi_keyword = (addr_scsi + ":" + addr_bus + ":" +
                                  addr_target + ":" + addr_unit)

            enable_chap_auth = "yes" == params.get("enable_chap_auth")
            auth_sec_usage = params.get("auth_sec_usage", "libvirtiscsi")
            if enable_chap_auth:
                chap_user = params.get("chap_user", "redhat")
                chap_passwd = params.get("chap_password", "password")
                auth_sec_dict = {
                    "sec_usage": "iscsi",
                    "sec_target": auth_sec_usage
                }
                auth_sec_uuid = libvirt.create_secret(auth_sec_dict)
                virsh.secret_set_value(auth_sec_uuid,
                                       chap_passwd,
                                       encode=True,
                                       debug=True)
                params['auth_user'] = chap_user
                params['secret_type'] = "iscsi"
                params['secret_uuid'] = auth_sec_uuid

            if enable_initiator_set:
                params['iqn_id'] = iscsi_target
            hostdev_xml = prepare_hostdev_xml(**params)
            hostdev_xmls.append(hostdev_xml)

        if coldplug:
            attach_options = "--config"
        # Attach virtual disk to vm
        for dev_num in range(device_num):
            result = virsh.attach_device(vm_name,
                                         hostdev_xmls[dev_num].xml,
                                         flagstr=attach_options,
                                         ignore_status=True,
                                         debug=True)
            libvirt.check_exit_status(result, status_error & hotplug)
        if coldplug:
            vm.destroy(gracefully=False)
            result = virsh.start(vm_name, ignore_status=True, debug=True)
            libvirt.check_exit_status(result, status_error & coldplug)
        if not status_error:
            vm.wait_for_login().close()
            # Here we may need to wait for sometime, update if issue happens
            # again.
            #time.sleep(10)
            utils_misc.wait_for(lambda: get_new_disks(vm, old_partitions), 20)
            new_disks = get_new_disks(vm, old_partitions)
            if len(new_disks) != device_num:
                test.fail("Attached %s virtual disk but got %s." %
                          (device_num, len(new_disks)))
            new_disk = new_disks[0]
            for new_disk in new_disks:
                # Check disk io of the hostdev in vm.
                if not check_disk_io(vm, new_disk):
                    test.fail("Got unexpected result when operate the newly "
                              "added disk in vm.")

                # Check if unpri_sgio value correctly set by the xml sgio param.
                if not use_iscsi_directly:
                    if sgio == "unfiltered":
                        unpriv_sgio = True
                    else:
                        unpriv_sgio = False
                    if not (check_unpriv_sgio(lsscsi_keyword, unpriv_sgio,
                                              test_shareable)):
                        test.fail(
                            "SCSI dev's unpriv_sgio value is inconsistent with "
                            "hostdev xml's sgio value.")

                # Check shareable device.
                if test_shareable:
                    vm2_xml.add_device(hostdev_xml)
                    session = vm2.wait_for_login()
                    result = virsh.attach_device(vm2_name,
                                                 hostdev_xml.xml,
                                                 ignore_status=False,
                                                 debug=True)
                    utils_misc.wait_for(
                        lambda: get_new_disks(vm2, vm2_old_partitions), 20)
                    vm2_new_disks = get_new_disks(vm2, vm2_old_partitions)
                    if len(vm2_new_disks) != 1:
                        test.fail(
                            "In second vm, attached 1 virtual disk but got %s."
                            % len(vm2_new_disks))
                    vm2_new_disk = vm2_new_disks[0]
                    if not check_disk_io(vm2, vm2_new_disk):
                        test.fail(
                            "Testing shareable device, got unexpected result "
                            "when operate the newly added disk in the second vm."
                        )

            # Detach the disk from vm.
            for dev_num in range(device_num):
                result = virsh.detach_device(vm_name,
                                             hostdev_xmls[dev_num].xml,
                                             flagstr=attach_options,
                                             ignore_status=False,
                                             debug=True)

            # Check the detached disk in vm.
            if coldplug:
                vm.destroy(gracefully=False)
                vm.start()
                vm.wait_for_login().close()
            utils_misc.wait_for(lambda: not get_new_disks(vm, old_partitions),
                                20)
            new_disks = get_new_disks(vm, old_partitions)
            if len(new_disks) != 0:
                test.fail("Unplug virtual disk failed.")
    finally:
        if vm.is_alive():
            vm.destroy(gracefully=False)
        # Restoring vm
        vmxml_backup.sync()
        if test_shareable:
            if vm2.is_alive():
                vm2.destroy(gracefully=False)
            vm2_xml_backup.sync()
        if auth_sec_uuid:
            virsh.secret_undefine(auth_sec_uuid)
        for dev_num in range(device_num):
            libvirt.setup_or_cleanup_iscsi(is_setup=False,
                                           emulated_image='img' + str(dev_num))
Exemplo n.º 37
0
def run(test, params, env):
    """
    Test rbd disk device.

    1.Prepare test environment,destroy or suspend a VM.
    2.Prepare disk image.
    3.Edit disks xml and start the domain.
    4.Perform test operation.
    5.Recover test environment.
    """
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    virsh_dargs = {'debug': True, 'ignore_status': True}
    # Global variable to store max/current memory,
    # it may change after attach/detach
    new_max_mem = None
    new_cur_mem = None

    def get_vm_memtotal(session):
        """
        Get guest total memory
        """
        proc_meminfo = session.cmd_output("cat /proc/meminfo")
        # verify format and units are expected
        return int(re.search(r'MemTotal:\s+(\d+)\s+[kK]B',
                             proc_meminfo).group(1))

    def consume_vm_mem(size=1000, timeout=360):
        """
        To consume guest memory, default size is 1000M
        """
        session = vm.wait_for_login()
        # Mount tmpfs on /mnt and write to a file on it,
        # it is the memory operation
        sh_cmd = ("swapoff -a; mount -t tmpfs -o size={0}M tmpfs "
                  "/mnt; dd if=/dev/urandom of=/mnt/test bs=1M"
                  " count={0}".format(size))
        session.cmd(sh_cmd, timeout=timeout)
        session.close()

    def check_qemu_cmd():
        """
        Check qemu command line options.
        """
        cmd = ("ps -ef | grep %s | grep -v grep " % vm_name)
        if max_mem_rt:
            cmd += (" | grep 'slots=%s,maxmem=%sk'"
                    % (max_mem_slots, max_mem_rt))
        if tg_size:
            size = int(tg_size) * 1024
            cmd += (" | grep 'memory-backend-ram,id=memdimm0,size=%s"
                    % size)
            if pg_size:
                cmd += ",host-nodes=%s" % node_mask
                if numa_memnode:
                    for node in numa_memnode:
                        if ('nodeset' in node and
                                node['nodeset'] in node_mask):
                            cmd += ",policy=%s" % node['mode']
                cmd += ".*pc-dimm,node=%s" % tg_node
            if mem_addr:
                cmd += (".*slot=%s,addr=%s" %
                        (mem_addr['slot'], int(mem_addr['base'], 16)))
            cmd += "'"
        # Run the command
        utils.run(cmd)

    def check_guest_meminfo(old_mem):
        """
        Check meminfo on guest.
        """
        assert old_mem is not None
        session = vm.wait_for_login()
        # Hot-plugged memory should be online by udev rules
        udev_file = "/lib/udev/rules.d/80-hotplug-cpu-mem.rules"
        udev_rules = ('SUBSYSTEM=="memory", ACTION=="add", TEST=="state",'
                      ' ATTR{state}=="offline", ATTR{state}="online"')
        cmd = ("grep memory %s || echo '%s' >> %s"
               % (udev_file, udev_rules, udev_file))
        session.cmd(cmd)
        # Wait a while for new memory to be detected.
        utils_misc.wait_for(
            lambda: get_vm_memtotal(session) != int(old_mem), 5)
        new_mem = get_vm_memtotal(session)
        session.close()
        logging.debug("Memtotal on guest: %s", new_mem)
        if new_mem != int(old_mem) + int(tg_size):
            raise error.TestFail("Total memory on guest couldn't"
                                 " changed after attach memory "
                                 "device")

    def check_dom_xml(at_mem=False, dt_mem=False):
        """
        Check domain xml options.
        """
        # Global variable to store max/current memory
        global new_max_mem
        global new_cur_mem
        if attach_option.count("config"):
            dom_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
        else:
            dom_xml = vm_xml.VMXML.new_from_dumpxml(vm_name)
        try:
            xml_max_mem_rt = int(dom_xml.max_mem_rt)
            xml_max_mem = int(dom_xml.max_mem)
            xml_cur_mem = int(dom_xml.current_mem)
            assert int(max_mem_rt) == xml_max_mem_rt

            # Check attached/detached memory
            if at_mem:
                assert int(max_mem) + int(tg_size) == xml_max_mem
                # Bug 1220702, skip the check for current memory
                #assert int(cur_mem) + int(tg_size) == xml_cur_mem
                new_max_mem = xml_max_mem
                new_cur_mem = xml_cur_mem
                mem_dev = dom_xml.get_devices("memory")
                if len(mem_dev) != 1:
                    raise error.TestFail("Found wrong number of"
                                         " memory device")
                assert int(tg_size) == int(mem_dev[0].target.size)
                assert int(tg_node) == int(mem_dev[0].target.node)
            elif dt_mem:
                assert int(new_max_mem) - int(tg_size) == xml_max_mem
                # Bug 1220702, skip the check for current memory
                #assert int(new_cur_mem) - int(tg_size) == xml_cur_mem
        except AssertionError:
            utils.log_last_traceback()
            raise error.TestFail("Found unmatched memory setting"
                                 " from domain xml")

    def check_save_restore():
        """
        Test save and restore operation
        """
        save_file = os.path.join(test.tmpdir,
                                 "%s.save" % vm_name)
        ret = virsh.save(vm_name, save_file, **virsh_dargs)
        libvirt.check_exit_status(ret)
        ret = virsh.restore(save_file, **virsh_dargs)
        libvirt.check_exit_status(ret)
        if os.path.exists(save_file):
            os.remove(save_file)
        # Login to check vm status
        vm.wait_for_login().close()

    def create_mem_xml():
        """
        Create memory device xml.
        """
        mem_xml = memory.Memory()
        mem_model = params.get("mem_model", "dimm")
        mem_xml.mem_model = mem_model
        if tg_size:
            tg_xml = memory.Memory.Target()
            tg_xml.size = int(tg_size)
            tg_xml.size_unit = tg_sizeunit
            tg_xml.node = int(tg_node)
            mem_xml.target = tg_xml
        if pg_size:
            src_xml = memory.Memory.Source()
            src_xml.pagesize = int(pg_size)
            src_xml.pagesize_unit = pg_unit
            src_xml.nodemask = node_mask
            mem_xml.source = src_xml
        if mem_addr:
            mem_xml.address = mem_xml.new_mem_address(
                **{"attrs": mem_addr})

        logging.debug("Memory device xml: %s", mem_xml)
        return mem_xml.copy()

    def add_device(dev_xml, at_error=False):
        """
        Add memory device by attachment or modify domain xml.
        """
        if attach_device:
            ret = virsh.attach_device(vm_name, dev_xml.xml,
                                      flagstr=attach_option)
            libvirt.check_exit_status(ret, at_error)
        else:
            vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm.name)
            if numa_cells:
                del vmxml.max_mem
                del vmxml.current_mem
            vmxml.add_device(dev_xml)
            vmxml.sync()

    def modify_domain_xml():
        """
        Modify domain xml and define it.
        """
        vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm.name)
        mem_unit = params.get("mem_unit", "KiB")
        vcpu = params.get("vcpu", "4")
        if max_mem_rt:
            vmxml.max_mem_rt = int(max_mem_rt)
            vmxml.max_mem_rt_slots = max_mem_slots
            vmxml.max_mem_rt_unit = mem_unit
        if vcpu:
            vmxml.vcpu = int(vcpu)
            vcpu_placement = params.get("vcpu_placement", "static")
            vmxml.placement = vcpu_placement
        if numa_memnode:
            vmxml.numa_memory = {}
            vmxml.numa_memnode = numa_memnode
        else:
            try:
                del vmxml.numa_memory
                del vmxml.numa_memnode
            except:
                # Not exists
                pass

        if numa_cells:
            cells = [ast.literal_eval(x) for x in numa_cells]
            cpu_xml = vm_xml.VMCPUXML()
            cpu_xml.xml = "<cpu><numa/></cpu>"
            cpu_mode = params.get("cpu_mode")
            model_fallback = params.get("model_fallback")
            if cpu_mode:
                cpu_xml.mode = cpu_mode
            if model_fallback:
                cpu_xml.fallback = model_fallback
            cpu_xml.numa_cell = cells
            vmxml.cpu = cpu_xml
            # Delete memory and currentMemory tag,
            # libvirt will fill it automatically
            del vmxml.max_mem
            del vmxml.current_mem

        # hugepages setting
        if huge_pages:
            membacking = vm_xml.VMMemBackingXML()
            hugepages = vm_xml.VMHugepagesXML()
            pagexml_list = []
            for i in range(len(huge_pages)):
                pagexml = hugepages.PageXML()
                pagexml.update(huge_pages[i])
                pagexml_list.append(pagexml)
            hugepages.pages = pagexml_list
            membacking.hugepages = hugepages
            vmxml.mb = membacking

        logging.debug("vm xml: %s", vmxml)
        vmxml.sync()

    pre_vm_state = params.get("pre_vm_state", "running")
    attach_device = "yes" == params.get("attach_device", "no")
    detach_device = "yes" == params.get("detach_device", "no")
    attach_error = "yes" == params.get("attach_error", "no")
    start_error = "yes" == params.get("start_error", "no")
    detach_error = "yes" == params.get("detach_error", "no")
    maxmem_error = "yes" == params.get("maxmem_error", "no")
    attach_option = params.get("attach_option", "")
    test_qemu_cmd = "yes" == params.get("test_qemu_cmd", "no")
    test_managedsave = "yes" == params.get("test_managedsave", "no")
    test_save_restore = "yes" == params.get("test_save_restore", "no")
    test_mem_binding = "yes" == params.get("test_mem_binding", "no")
    restart_libvirtd = "yes" == params.get("restart_libvirtd", "no")
    add_mem_device = "yes" == params.get("add_mem_device", "no")
    test_dom_xml = "yes" == params.get("test_dom_xml", "no")
    max_mem = params.get("max_mem")
    max_mem_rt = params.get("max_mem_rt")
    max_mem_slots = params.get("max_mem_slots", "16")
    #cur_mem = params.get("current_mem")
    numa_cells = params.get("numa_cells", "").split()
    set_max_mem = params.get("set_max_mem")

    # params for attached device
    tg_size = params.get("tg_size")
    tg_sizeunit = params.get("tg_sizeunit", 'KiB')
    tg_node = params.get("tg_node", 0)
    pg_size = params.get("page_size")
    pg_unit = params.get("page_unit", "KiB")
    node_mask = params.get("node_mask", "0")
    mem_addr = ast.literal_eval(params.get("memory_addr", "{}"))
    huge_pages = [ast.literal_eval(x)
                  for x in params.get("huge_pages", "").split()]
    numa_memnode = [ast.literal_eval(x)
                    for x in params.get("numa_memnode", "").split()]

    # Back up xml file.
    vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)

    try:
        # Drop caches first for host has enough memory
        drop_caches()
        # Destroy domain first
        if vm.is_alive():
            vm.destroy(gracefully=False)
        modify_domain_xml()

        # Start the domain any way if attach memory device
        old_mem_total = None
        if attach_device:
            vm.start()
            session = vm.wait_for_login()
            old_mem_total = get_vm_memtotal(session)
            logging.debug("Memtotal on guest: %s", old_mem_total)
            session.close()
        dev_xml = None

        # To attach the memory device.
        if add_mem_device:
            at_times = int(params.get("attach_times", 1))
            dev_xml = create_mem_xml()
            for x in xrange(at_times):
                # If any error excepted, command error status should be
                # checked in the last time
                if x == at_times - 1:
                    add_device(dev_xml, attach_error)
                else:
                    add_device(dev_xml)

        # Check domain xml after attach device.
        if test_dom_xml:
            check_dom_xml(at_mem=attach_device)

        # Set domain state
        if pre_vm_state == "transient":
            logging.info("Creating %s...", vm_name)
            vmxml_for_test = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
            if vm.is_alive():
                vm.destroy(gracefully=False)
            vm.undefine()
            if virsh.create(vmxml_for_test.xml,
                            **virsh_dargs).exit_status:
                vmxml_backup.define()
                raise error.TestFail("Cann't create the domain")
        elif vm.is_dead():
            try:
                vm.start()
                vm.wait_for_login().close()
            except virt_vm.VMStartError:
                if start_error:
                    pass
                else:
                    raise error.TestFail("VM Failed to start"
                                         " for some reason!")

        # Set memory operation
        if set_max_mem:
            max_mem_option = params.get("max_mem_option", "")
            ret = virsh.setmaxmem(vm_name, set_max_mem,
                                  flagstr=max_mem_option)
            libvirt.check_exit_status(ret, maxmem_error)

        # Check domain xml after start the domain.
        if test_dom_xml:
            check_dom_xml(at_mem=attach_device)

        # Check qemu command line
        if test_qemu_cmd:
            check_qemu_cmd()

        # Check guest meminfo after attachment
        if (attach_device and not attach_option.count("config") and
                not any([attach_error, start_error])):
            check_guest_meminfo(old_mem_total)

        # Consuming memory on guest,
        # to verify memory changes by numastat
        if test_mem_binding:
            pid = vm.get_pid()
            old_numastat = read_from_numastat(pid, "Total")
            logging.debug("Numastat: %s", old_numastat)
            consume_vm_mem()
            new_numastat = read_from_numastat(pid, "Total")
            logging.debug("Numastat: %s", new_numastat)
            # Only check total memory which is the last element
            if float(new_numastat[-1]) - float(old_numastat[-1]) < 0:
                raise error.TestFail("Numa memory can't be consumed"
                                     " on guest")

        # Run managedsave command to check domain xml.
        if test_managedsave:
            ret = virsh.managedsave(vm_name, **virsh_dargs)
            libvirt.check_exit_status(ret)
            vm.start()
            vm.wait_for_login().close()
            if test_dom_xml:
                check_dom_xml(at_mem=attach_device)

        # Run save and restore command to check domain xml
        if test_save_restore:
            check_save_restore()
            if test_dom_xml:
                check_dom_xml(at_mem=attach_device)

        # Check domain xml after restarting libvirtd
        if restart_libvirtd:
            libvirtd = utils_libvirtd.Libvirtd()
            libvirtd.restart()
            if test_dom_xml:
                check_dom_xml(at_mem=attach_device)

        # Detach the memory device
        if detach_device:
            if not dev_xml:
                dev_xml = create_mem_xml()
            ret = virsh.detach_device(vm_name, dev_xml.xml,
                                      flagstr=attach_option)
            libvirt.check_exit_status(ret, detach_error)
            if test_dom_xml:
                check_dom_xml(dt_mem=detach_device)

    finally:
        # Delete snapshots.
        snapshot_lists = virsh.snapshot_list(vm_name)
        if len(snapshot_lists) > 0:
            libvirt.clean_up_snapshots(vm_name, snapshot_lists)
            for snap in snapshot_lists:
                virsh.snapshot_delete(vm_name, snap, "--metadata")

        # Recover VM.
        if vm.is_alive():
            vm.destroy(gracefully=False)
        logging.info("Restoring vm...")
        vmxml_backup.sync()
Exemplo n.º 38
0
def run(test, params, env):
    """
    Test virsh detach-device command.

    The command can detach disk.
    1.Prepare test environment,destroy or suspend a VM.
    2.Perform virsh detach-device operation.
    3.Recover test environment.
    4.Confirm the test result.
    """
    def create_device_file(device_source="/tmp/attach.img"):
        """
        Create a device source file.

        :param device_source: Device source file.
        """
        try:
            with open(device_source, 'wb') as device_file:
                device_file.seek((512 * 1024 * 1024) - 1)
                device_file.write(str(0).encode())
        except IOError:
            logging.error("Image file %s created failed.", device_source)

    def check_vm_partition(vm, device, os_type, target_name):
        """
        Check VM disk's partition.

        :param vm. VM guest.
        :param os_type. VM's operation system type.
        :param target_name. Device target type.
        :return: True if check successfully.
        """
        logging.info("Checking VM partittion...")
        if vm.is_dead():
            vm.start()
        try:
            if os_type == "linux":
                session = vm.wait_for_login()
                if device == "disk":
                    s, o = session.cmd_status_output(
                        "grep %s /proc/partitions" % target_name)
                    logging.info("Virtio devices in VM:\n%s", o)
                elif device == "cdrom":
                    s, o = session.cmd_status_output("ls /dev/cdrom")
                    logging.info("CDROM in VM:\n%s", o)
                elif device == "iface":
                    s, o = session.cmd_status_output("ls /")
                session.close()
                if s != 0:
                    return False
            return True
        except (remote.LoginError, virt_vm.VMError, aexpect.ShellError) as e:
            logging.error(str(e))
            return False

    def acpiphp_module_modprobe(vm, os_type):
        """
        Add acpiphp module if VM's os type is rhle5.*

        :param vm. VM guest.
        :param os_type. VM's operation system type.
        :return: True if operate successfully.
        """
        if vm.is_dead():
            vm.start()
        try:
            if os_type == "linux":
                session = vm.wait_for_login()
                s_rpm, _ = session.cmd_status_output("rpm --version")
                # If status is different from 0, this
                # guest OS doesn't support the rpm package
                # manager
                if s_rpm:
                    session.close()
                    return True
                _, o_vd = session.cmd_status_output("rpm -qa | grep"
                                                    " redhat-release")
                if o_vd.find("5Server") != -1:
                    s_mod, _ = session.cmd_status_output("modprobe acpiphp")
                    if s_mod != 0:
                        session.close()
                        return False
                session.close()
            return True
        except (remote.LoginError, virt_vm.VMError, aexpect.ShellError) as e:
            logging.error(str(e))
            return False

    def create_device_xml(params, xml_path, device_source):
        """
        Create a xml file for device
        """
        device_xml_name = params.get("dt_device_xml", "device.xml")
        device_xml_file = os.path.join(xml_path, device_xml_name)
        device_type = params.get("dt_device_device", "disk")
        if device_type in ["disk", 'cdrom']:
            disk_class = vm_xml.VMXML.get_device_class('disk')
            if test_block_dev:
                disk = disk_class(type_name='block')
                stype = 'dev'
            else:
                disk = disk_class(type_name='file')
                stype = 'file'
            disk.device = device_type
            disk.driver = dict(name='qemu', type='raw')
            disk.source = disk.new_disk_source(attrs={stype: device_source})
            disk.target = dict(bus=device_bus, dev=device_target)
            if detach_alias:
                disk.alias = dict(name=device_alias)
            disk.xmltreefile.write()
            shutil.copyfile(disk.xml, device_xml_file)
        else:
            iface_class = vm_xml.VMXML.get_device_class('interface')
            iface = iface_class(type_name='network')
            iface.mac_address = iface_mac_address
            iface.source = dict(network=iface_network)
            iface.model = iface_model_type
            if detach_alias:
                iface.alias = dict(name=device_alias)
            iface.xmltreefile.write()
            shutil.copyfile(iface.xml, device_xml_file)
        return device_xml_file

    vm_ref = params.get("dt_device_vm_ref", "name")
    dt_options = params.get("dt_device_options", "")
    pre_vm_state = params.get("dt_device_pre_vm_state", "running")
    status_error = "yes" == params.get("status_error", 'no')
    no_attach = "yes" == params.get("dt_device_no_attach", 'no')
    os_type = params.get("os_type", "linux")
    device = params.get("dt_device_device", "disk")
    readonly = "yes" == params.get("detach_readonly", "no")
    tmp_dir = data_dir.get_tmp_dir()
    test_cmd = "detach-device"
    detach_alias = "yes" == params.get("dt_device_alias", "no")
    if detach_alias:
        test_cmd = "detach-device-alias"
        device_alias = "ua-" + str(uuid.uuid4())
    else:
        test_cmd = "detach-device"
    if not virsh.has_command_help_match(test_cmd, dt_options) and\
       not status_error:
        test.cancel("Current libvirt version doesn't support '%s'"
                    " for %s" % (dt_options, test_cmd))

    # Disk specific attributes.
    device_source_name = params.get("dt_device_device_source", "attach.img")
    device_target = params.get("dt_device_device_target", "vdd")
    device_bus = params.get("dt_device_bus_type")
    test_block_dev = "yes" == params.get("dt_device_iscsi_device", "no")

    # interface specific attributes.
    iface_network = params.get("dt_device_iface_network")
    iface_model_type = params.get("dt_device_iface_model_type")
    iface_mac_address = params.get("dt_device_iface_mac_address")

    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    if vm.is_alive():
        vm.destroy(gracefully=False)

    # Back up xml file.
    backup_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    device_source = os.path.join(tmp_dir, device_source_name)

    # Create virtual device file.
    if test_block_dev:
        device_source = libvirt.setup_or_cleanup_iscsi(True)
        if not device_source:
            # We should skip this case
            test.cancel("Can not get iscsi device name in host")
    else:
        create_device_file(device_source)

    try:
        if vm.is_alive():
            vm.destroy(gracefully=False)

        # If we are testing cdrom device, we need to detach hdc in VM first.
        if device == "cdrom":
            virsh.detach_disk(vm_name,
                              device_target,
                              "--config",
                              ignore_status=True)

        device_xml = create_device_xml(params, tmp_dir, device_source)
        if not no_attach:
            s_attach = virsh.attach_device(vm_name,
                                           device_xml,
                                           flagstr="--config",
                                           debug=True).exit_status
            if s_attach != 0:
                logging.error("Attach device failed before testing "
                              "detach-device")

        vm.start()
        vm.wait_for_serial_login()

        # Add acpiphp module before testing if VM's os type is rhle5.*
        if device in ['disk', 'cdrom']:
            if not acpiphp_module_modprobe(vm, os_type):
                test.error("Add acpiphp module failed before test.")

        # Turn VM into certain state.
        if pre_vm_state == "paused":
            logging.info("Suspending %s...", vm_name)
            if vm.is_alive():
                vm.pause()
        elif pre_vm_state == "shut off":
            logging.info("Shutting down %s...", vm_name)
            if vm.is_alive():
                vm.destroy(gracefully=False)

        # Get disk count before test.
        if device in ['disk', 'cdrom']:
            device_count_before_cmd = vm_xml.VMXML.get_disk_count(vm_name)
        else:
            vm_cls = vm_xml.VMXML.new_from_dumpxml(vm_name)
            device_count_before_cmd = len(vm_cls.devices)

        # Test.
        domid = vm.get_id()
        domuuid = vm.get_uuid()

        # Confirm how to reference a VM.
        if vm_ref == "name":
            vm_ref = vm_name
        elif vm_ref.find("invalid") != -1:
            vm_ref = params.get(vm_ref)
        elif vm_ref == "id":
            vm_ref = domid
        elif vm_ref == "hex_id":
            vm_ref = hex(int(domid))
        elif vm_ref == "uuid":
            vm_ref = domuuid
        else:
            vm_ref = ""

        if detach_alias:
            status = virsh.detach_device_alias(vm_ref,
                                               device_alias,
                                               dt_options,
                                               True,
                                               7,
                                               readonly=readonly,
                                               debug=True).exit_status
        else:
            status = virsh.detach_device(vm_ref,
                                         device_xml,
                                         readonly=readonly,
                                         flagstr=dt_options,
                                         debug=True).exit_status
        # If status_error is False, then wait for seconds to let detach operation accomplish complete.
        if not status_error:
            utils_misc.wait_for(
                lambda: not libvirt.device_exists(vm, device_target),
                timeout=40)
        time.sleep(2)
        # Resume guest after command. On newer libvirt this is fixed as it has
        # been a bug. The change in xml file is done after the guest is
        # resumed.
        if pre_vm_state == "paused":
            vm.resume()

        # Check disk count after command.
        check_count_after_cmd = True
        if device in ['disk', 'cdrom']:
            device_count_after_cmd = vm_xml.VMXML.get_disk_count(vm_name)
        else:
            vm_cls = vm_xml.VMXML.new_from_dumpxml(vm_name)
            device_count_after_cmd = len(vm_cls.devices)
        if device_count_after_cmd < device_count_before_cmd:
            check_count_after_cmd = False

        # Recover VM state.
        if pre_vm_state == "shut off" and device in ['disk', 'cdrom']:
            vm.start()

        # Check in VM after command.
        check_vm_after_cmd = True
        if device in ['disk', 'cdrom']:
            check_vm_after_cmd = check_vm_partition(vm, device, os_type,
                                                    device_target)

        # Destroy VM.
        if vm.is_alive():
            vm.destroy(gracefully=False)

        # Check disk count after VM shutdown (with --config).
        check_count_after_shutdown = True
        if device in ['disk', 'cdrom']:
            device_count_after_shutdown = vm_xml.VMXML.get_disk_count(vm_name)
        else:
            vm_cls = vm_xml.VMXML.new_from_dumpxml(vm_name)
            device_count_after_shutdown = len(vm_cls.devices)
        if device_count_after_shutdown < device_count_before_cmd:
            check_count_after_shutdown = False
    finally:
        # Recover VM.
        if vm.is_alive():
            vm.destroy(gracefully=False)
        backup_xml.sync()
        if test_block_dev:
            libvirt.setup_or_cleanup_iscsi(False)
        elif os.path.exists(device_source):
            os.remove(device_source)
        elif os.path.exists(tmp_dir):
            os.remove(tmp_dir)

    # Check results.
    if status_error:
        if not status:
            test.fail("detach-device exit with unexpected value.")
    else:
        if status:
            test.fail("virsh detach-device failed.")
        if dt_options.count("config"):
            if check_count_after_shutdown:
                test.fail("See config detached device in "
                          "xml file after VM shutdown.")
            if pre_vm_state == "shut off":
                if check_count_after_cmd:
                    test.fail("See device in xml after detach with"
                              " --config option")
            elif pre_vm_state == "running":
                if not check_vm_after_cmd and device in ['disk', 'cdrom']:
                    test.fail("Cannot see device in VM after"
                              " detach with '--config' option"
                              " when VM is running.")

        elif dt_options.count("live"):
            if check_count_after_cmd:
                test.fail("See device in xml after detach with"
                          "--live option")
            if not check_count_after_shutdown:
                test.fail("Cannot see config detached device in"
                          " xml file after VM shutdown with"
                          " '--live' option.")
            if check_vm_after_cmd and device in ['disk', 'cdrom']:
                test.fail("See device in VM with '--live' option"
                          " when VM is running")
        elif dt_options.count("current"):
            if check_count_after_cmd:
                test.fail("See device in xml after detach with"
                          " --current option")
            if pre_vm_state == "running":
                if not check_count_after_shutdown:
                    test.fail("Cannot see config detached device in"
                              " xml file after VM shutdown with"
                              " '--current' option.")
                if check_vm_after_cmd and device in ['disk', 'cdrom']:
                    test.fail("See device in VM with '--live'"
                              " option when VM is running")
        elif dt_options.count("persistent"):
            if check_count_after_shutdown:
                test.fail("See device deattached with "
                          "'--persistent' option after "
                          "VM shutdown.")
Exemplo n.º 39
0
def run(test, params, env):
    """
    Test bridge support from network

    1) create a linux bridge and connect a physical interface to it
    2) define nwfilter with "vdsm-no-mac-spoofing"
    3) redefine the vm with the new create bridge and filter
    4) check if guest can get public ip after vm start
    5) check if guest and host can ping each other
    6) check if guest and host can ping outside
    7) start another vm connected to the same bridge
    8) check if the 2 guests can ping each other
    """
    def create_bridge(br_name, iface_name):
        """
        Create a linux bridge by virsh cmd:
        1. Stop NetworkManager and Start network service
        2. virsh iface-bridge <iface> <name> [--no-stp]

        :param br_name: bridge name
        :param iface_name: physical interface name
        :return: bridge created or raise exception
        """
        # Make sure the bridge not exist
        if libvirt.check_iface(br_name, "exists", "--all"):
            test.cancel("The bridge %s already exist" % br_name)

        # Create bridge
        utils_package.package_install('tmux')
        cmd = 'tmux -c "ip link add name {0} type bridge; ip link set {1} up;' \
              ' ip link set {1} master {0}; ip link set {0} up;' \
              ' pkill dhclient; sleep 6; dhclient {0}; ifconfig {1} 0"'.format(br_name, iface_name)
        process.run(cmd, shell=True, verbose=True)

    def define_nwfilter(filter_name):
        """
        Define nwfilter vdsm-no-mac-spoofing with content like:
        <filter name='vdsm-no-mac-spoofing' chain='root'>
            <filterref filter='no-mac-spoofing'/>
            <filterref filter='no-arp-mac-spoofing'/>
        </filter>

        :param filter_name: the name of nwfilter
        :return: filter created or raise exception
        """
        filter_uuid = params.get("filter_uuid",
                                 "11111111-b071-6127-b4ec-111111111111")
        filter_params = {
            "filter_name": "vdsm-no-mac-spoofing",
            "filter_chain": "root",
            "filter_uuid": filter_uuid,
            "filterref_name_1": "no-mac-spoofing",
            "filterref_name_2": "no-arp-mac-spoofing"
        }
        filter_xml = libvirt.create_nwfilter_xml(filter_params).xml
        # Run command
        result = virsh.nwfilter_define(filter_xml,
                                       ignore_status=True,
                                       debug=True)
        if result.exit_status:
            test.fail("Failed to define nwfilter with %s" % filter_xml)

    def ping(src_ip, dest_ip, ping_count, timeout, session=None):
        """
        Wrap of ping

        :param src_ip: source address
        :param dest_ip: destination address
        :param ping_count: count of icmp packet
        :param timeout: timeout for the ping command
        :param session: local execution or session to execute the ping command
        :return: ping succeed or raise exception
        """
        status, output = utils_net.ping(dest=dest_ip,
                                        count=ping_count,
                                        interface=src_ip,
                                        timeout=timeout,
                                        session=session,
                                        force_ipv4=True)
        if status:
            test.fail("Fail to ping %s from %s" % (dest_ip, src_ip))

    def check_net_functions(guest_ip, ping_count, ping_timeout, guest_session,
                            host_ip, remote_url, endpoint_ip):
        # make sure host network works well
        # host ping remote url
        ping(host_ip, remote_url, ping_count, ping_timeout)
        # host ping guest
        ping(host_ip, guest_ip, ping_count, ping_timeout)
        # guest ping host
        ping(guest_ip,
             host_ip,
             ping_count,
             ping_timeout,
             session=guest_session)
        # guest ping remote url
        ping(guest_ip,
             remote_url,
             ping_count,
             ping_timeout,
             session=guest_session)
        # guest ping endpoint
        ping(guest_ip,
             endpoint_ip,
             ping_count,
             ping_timeout,
             session=guest_session)

    # Get test params
    bridge_name = params.get("bridge_name", "test_br0")
    filter_name = params.get("filter_name", "vdsm-no-mac-spoofing")
    ping_count = params.get("ping_count", "5")
    ping_timeout = float(params.get("ping_timeout", "10"))
    iface_name = utils_net.get_net_if(state="UP")[0]
    bridge_script = NETWORK_SCRIPT + bridge_name
    iface_script = NETWORK_SCRIPT + iface_name
    iface_script_bk = os.path.join(data_dir.get_tmp_dir(),
                                   "iface-%s.bk" % iface_name)
    attach_interface = "yes" == params.get("attach_interface", "no")
    iface_model = params.get("iface_model", "virtio")
    iface_source = eval(params.get("iface_source", "{'bridge':'test_br0'}"))
    iface_type = params.get("iface_type", None)
    iface_target = params.get("iface_target", None)
    iface_alias = params.get("iface_alias", None)
    hotplug = "yes" == params.get("hotplug", "no")

    vms = params.get("vms").split()
    if len(vms) <= 1:
        test.cancel("Need two VMs to test")
    else:
        vm1_name = vms[0]
        vm2_name = vms[1]

    vm1 = env.get_vm(vm1_name)
    vm2 = env.get_vm(vm2_name)

    # Back up the interface script
    process.run("cp %s %s" % (iface_script, iface_script_bk),
                shell=True,
                verbose=True)
    # Back up vm xml
    vm1_xml_bak = vm_xml.VMXML.new_from_dumpxml(vm1_name)
    vm2_xml_bak = vm_xml.VMXML.new_from_dumpxml(vm2_name)

    # Stop NetworkManager service
    NM_service = service.Factory.create_service("NetworkManager")
    NM_status = NM_service.status()
    if not NM_status:
        NM_service.start()
    mac = utils_net.generate_mac_address_simple()

    try:
        create_bridge(bridge_name, iface_name)
        define_nwfilter(filter_name)
        if hotplug:
            err_msgs = ("No more available PCI slots",
                        "No more available PCI addresses")
            # delete the original interface on the vm before hot-plug
            if vm1.is_alive():
                vm1.destroy()
            vmxml = vm_xml.VMXML.new_from_dumpxml(vm1_name)
            iface_xml = vmxml.get_devices('interface')[0]
            logging.debug("Delete the original interface")
            vmxml.del_device(iface_xml)
            vmxml.sync()
            vm1.start()
            # do hot-plug
            if attach_interface:
                logging.info("Try to hot-plug interface")
                options = (
                    "%s %s --model %s --mac %s" %
                    (iface_type, iface_source['bridge'], iface_model, mac))
                ret = virsh.attach_interface(vm1_name,
                                             options,
                                             ignore_status=True)
            else:
                logging.info("Try to hot-plug device")
                target = str({'dev': iface_target})
                iface_alias = str({'name': iface_alias})
                vm_iface_source = str(iface_source)
                iface_params = {
                    "type": "bridge",
                    "source": vm_iface_source,
                    "filter": filter_name,
                    "mac": mac,
                    'alias': iface_alias,
                    'target': target,
                    'model': iface_model
                }
                attach_xml = libvirt.modify_vm_iface(vm1_name, 'get_xml',
                                                     iface_params)
                ret = virsh.attach_device(vm1_name,
                                          attach_xml,
                                          ignore_status=True,
                                          debug=True)
            if ret.exit_status:
                if any([msg in ret.stderr for msg in err_msgs]):
                    test.error("No more pci slots, can't attach more devices")
                else:
                    test.fail("Failed to attach-interface: %s" %
                              ret.stderr.strip())
            else:
                logging.debug("Hot-plug interface or device pass")

        else:
            vm_iface_source = str(iface_source)
            vm1_iface_params = {
                "type": "bridge",
                "source": vm_iface_source,
                "filter": filter_name,
                "mac": mac
            }
            libvirt.modify_vm_iface(vm1_name, "update_iface", vm1_iface_params)

            if vm1.is_alive():
                vm1.destroy()

            vm1.start()
        # apply ip address as it may not be initialized
        session1 = session2 = None
        session1 = vm1.wait_for_serial_login()
        utils_net.restart_guest_network(session1)
        output = session1.cmd_output("ifconfig || ip a")
        logging.debug("guest1 ip info %s" % output)

        # Check guest's network function
        host_ip = utils_net.get_ip_address_by_interface(bridge_name)
        remote_url = params.get("remote_ip", "www.google.com")

        try:
            vm1_ip = utils_net.get_guest_ip_addr(session1, mac)
        except Exception as errs:
            test.fail("vm1 can't get IP with the new create bridge: %s" % errs)
        if hotplug:
            # reboot vm1 then check network function to ensure the interface still there and works fine
            logging.info("reboot the vm")
            virsh.reboot(vm1)
            if session1 is None:
                session1 = vm1.wait_for_serial_login()
            ping(vm1_ip,
                 remote_url,
                 ping_count,
                 ping_timeout,
                 session=session1)
            # restart libvirtd service then check the interface still works fine
            libvirtd = utils_libvirtd.Libvirtd()
            libvirtd.restart()
            vm1.cleanup_serial_console()
            vm1.create_serial_console()
            session1 = vm1.wait_for_serial_login()
            ping(vm1_ip,
                 remote_url,
                 ping_count,
                 ping_timeout,
                 session=session1)
            logging.info(
                "after reboot and restart libvirtd, the network works fine")
            # hot-unplug interface/device
            if attach_interface:
                ret = virsh.detach_interface(vm1_name,
                                             "bridge",
                                             ignore_status=True)
            else:
                ret = virsh.detach_device(vm1_name,
                                          attach_xml,
                                          ignore_status=True,
                                          debug=True)
            if ret.exit_status:
                test.fail("Hot-unplug interface/device fail")
            else:
                logging.info("hot-unplug interface/device succeed")

        else:
            # Start vm2 connect to the same bridge
            mac2 = utils_net.generate_mac_address_simple()
            vm2_iface_params = {
                "type": "bridge",
                "source": vm_iface_source,
                "filter": filter_name,
                "mac": mac2
            }
            libvirt.modify_vm_iface(vm2_name, "update_iface", vm2_iface_params)
            if vm2.is_alive():
                vm2.destroy()
            vm2.start()

            # Check if vm1 and vm2 can ping each other
            try:
                utils_net.update_mac_ip_address(vm2, timeout=120)
                vm2_ip = vm2.get_address()
            except Exception as errs:
                test.fail("vm2 can't get IP with the new create bridge: %s" %
                          errs)
            session2 = vm2.wait_for_login()
            # make sure guest has got ip address
            utils_net.restart_guest_network(session2)
            output2 = session2.cmd_output("ifconfig || ip a")
            logging.debug("guest ip info %s" % output2)
            # check 2 guests' network functions
            check_net_functions(vm1_ip, ping_count, ping_timeout, session1,
                                host_ip, remote_url, vm2_ip)
            check_net_functions(vm2_ip, ping_count, ping_timeout, session2,
                                host_ip, remote_url, vm1_ip)

    finally:
        logging.debug("Start to restore")
        vm1_xml_bak.sync()
        vm2_xml_bak.sync()
        virsh.nwfilter_undefine(filter_name, ignore_status=True)
        if libvirt.check_iface(bridge_name, "exists", "--all"):
            virsh.iface_unbridge(bridge_name, timeout=60, debug=True)
        if os.path.exists(iface_script_bk):
            process.run("mv %s %s" % (iface_script_bk, iface_script),
                        shell=True,
                        verbose=True)
        if os.path.exists(bridge_script):
            process.run("rm -rf %s" % bridge_script, shell=True, verbose=True)
        cmd = 'tmux -c "ip link set {1} nomaster;  ip link delete {0};' \
              'pkill dhclient; sleep 6; dhclient {1}"'.format(bridge_name, iface_name)
        process.run(cmd, shell=True, verbose=True)
        # reload network configuration
        NM_service.restart()
        # recover NetworkManager
        if NM_status is True:
            NM_service.start()
Exemplo n.º 40
0
def run(test, params, env):
    """
    Test bridge support from network

    1) create a linux bridge and connect a physical interface to it
    2) define nwfilter with "vdsm-no-mac-spoofing"
    3) redefine the vm with the new create bridge and filter
    4) check if guest can get public ip after vm start
    5) check if guest and host can ping each other
    6) check if guest and host can ping outside
    7) start another vm connected to the same bridge
    8) check if the 2 guests can ping each other
    """
    def create_bridge_network(br_name,
                              net_name,
                              inbound="{'average':'0'}",
                              outbound="{'average':'0'}"):
        """
        Define and start the bridge type network
        """
        # check if network with the same name already exists
        output_all = virsh.net_list("--all").stdout.strip()
        if re.search(net_name, output_all):
            test.cancel("Network with the same name already exists!")
        test_xml = network_xml.NetworkXML(network_name="%s" % net_name)
        test_xml.forward = {"mode": "bridge"}
        test_xml.bridge = {"name": br_name}
        test_xml.bandwidth_inbound = eval(inbound)
        test_xml.bandwidth_outbound = eval(outbound)
        logging.debug("The network's xml is %s", test_xml)
        test_xml.create()

    def define_nwfilter(filter_name):
        """
        Define nwfilter vdsm-no-mac-spoofing with content like:
        <filter name='vdsm-no-mac-spoofing' chain='root'>
            <filterref filter='no-mac-spoofing'/>
            <filterref filter='no-arp-mac-spoofing'/>
        </filter>

        :param filter_name: the name of nwfilter
        :return: filter created or raise exception
        """
        filter_uuid = params.get("filter_uuid",
                                 "11111111-b071-6127-b4ec-111111111111")
        filter_params = {
            "filter_name": "vdsm-no-mac-spoofing",
            "filter_chain": "root",
            "filter_uuid": filter_uuid,
            "filterref_name_1": "no-mac-spoofing",
            "filterref_name_2": "no-arp-mac-spoofing"
        }
        filter_xml = libvirt.create_nwfilter_xml(filter_params).xml
        # Run command
        result = virsh.nwfilter_define(filter_xml,
                                       ignore_status=True,
                                       debug=True)
        if result.exit_status:
            test.fail("Failed to define nwfilter with %s" % filter_xml)

    def ping(src_ip, dest_ip, ping_count, timeout, session=None):
        """
        Wrap of ping

        :param src_ip: source address
        :param dest_ip: destination address
        :param ping_count: count of icmp packet
        :param timeout: timeout for the ping command
        :param session: local execution or session to execute the ping command
        :return: ping succeed or raise exception
        """
        status, output = utils_net.ping(dest=dest_ip,
                                        count=ping_count,
                                        interface=src_ip,
                                        timeout=timeout,
                                        session=session,
                                        force_ipv4=True)
        if status:
            test.fail("Fail to ping %s from %s" % (dest_ip, src_ip))

    def check_net_functions(guest_ip, ping_count, ping_timeout, guest_session,
                            host_ip, remote_url, endpoint_ip):
        # make sure host network works well
        # host ping remote url
        ping(host_ip, remote_url, ping_count, ping_timeout)
        # host ping guest
        ping(host_ip, guest_ip, ping_count, ping_timeout)
        # guest ping host
        ping(guest_ip,
             host_ip,
             ping_count,
             ping_timeout,
             session=guest_session)
        # guest ping remote url
        ping(guest_ip,
             remote_url,
             ping_count,
             ping_timeout,
             session=guest_session)
        # guest ping endpoint
        ping(guest_ip,
             endpoint_ip,
             ping_count,
             ping_timeout,
             session=guest_session)

    # Get test params
    bridge_name = params.get("bridge_name", "test_br0")
    filter_name = params.get("filter_name", "vdsm-no-mac-spoofing")
    ping_count = params.get("ping_count", "5")
    ping_timeout = float(params.get("ping_timeout", "10"))
    iface_name = utils_net.get_net_if(state="UP")[0]
    bridge_script = NETWORK_SCRIPT + bridge_name
    iface_script = NETWORK_SCRIPT + iface_name
    iface_script_bk = os.path.join(data_dir.get_tmp_dir(),
                                   "iface-%s.bk" % iface_name)
    attach_interface = "yes" == params.get("attach_interface", "no")
    iface_model = params.get("iface_model", "virtio")
    iface_source = eval(params.get("iface_source", "{'bridge':'test_br0'}"))
    iface_type = params.get("iface_type", 'bridge')
    iface_target = params.get("iface_target", "br_target")
    iface_alias = params.get("iface_alias", None)
    hotplug = "yes" == params.get("hotplug", "no")
    iface_driver = params.get("iface_driver", None)
    reconnect_tap = "yes" == params.get("reconnect_tap", "no")
    restart_net = "yes" == params.get("restart_net", "no")
    start_vm2 = "yes" == params.get("start_vm2", "no")
    create_network = "yes" == params.get("create_network", "no")
    update_device = "yes" == params.get("update_with_diff_type", "no")
    test_qos = "yes" == params.get("test_qos", "no")
    test_net_qos = "yes" == params.get("test_net_qos", "no")
    iface_inbound = params.get("iface_bandwidth_inbound", "{'average':'0'}")
    iface_outbound = params.get("iface_bandwidth_outbound", "{'average':'0'}")
    net_inbound = params.get("net_bandwidth_inbound", "{'average':'0'}")
    net_outbound = params.get("net_bandwidth_outbound", "{'average':'0'}")

    libvirt_version.is_libvirt_feature_supported(params)
    vms = params.get("vms").split()
    vm1_name = vms[0]
    vm1 = env.get_vm(vm1_name)

    if start_vm2:
        if len(vms) <= 1:
            test.cancel("Need two VMs to test")
        else:
            vm2_name = vms[1]
        vm2 = env.get_vm(vm2_name)
        vm2_xml_bak = vm_xml.VMXML.new_from_dumpxml(vm2_name)

    # Back up the interface script
    if os.path.exists(iface_script):
        process.run("cp %s %s" % (iface_script, iface_script_bk),
                    shell=True,
                    verbose=True)
    # Back up vm xml
    vm1_xml_bak = vm_xml.VMXML.new_from_dumpxml(vm1_name)

    # Stop NetworkManager service
    NM_service = service.Factory.create_service("NetworkManager")
    NM_status = NM_service.status()
    if not NM_status:
        NM_service.start()
    mac = utils_net.generate_mac_address_simple()

    try:
        if libvirt.check_iface(bridge_name, "exists", "--all"):
            test.cancel("The bridge %s already exist" % bridge_name)
        s, o = utils_net.create_linux_bridge_tmux(bridge_name, iface_name)
        if s:
            test.fail(
                "Failed to create linux bridge on the host. Status: %s Stdout: %s"
                % (s, o))
        define_nwfilter(filter_name)
        if create_network:
            create_bridge_network(bridge_name, iface_source["network"],
                                  net_inbound, net_outbound)
        if hotplug:
            err_msgs = ("No more available PCI slots",
                        "No more available PCI addresses")
            # delete the original interface on the vm before hot-plug
            if vm1.is_alive():
                vm1.destroy()
            vmxml = vm_xml.VMXML.new_from_dumpxml(vm1_name)
            iface_xml = vmxml.get_devices('interface')[0]
            logging.debug("Delete the original interface")
            vmxml.del_device(iface_xml)
            vmxml.sync()
            vm1.start()
            # do hot-plug
            if attach_interface:
                logging.info("Try to hot-plug interface")
                options = (
                    "%s %s --model %s --mac %s" %
                    (iface_type, iface_source['bridge'], iface_model, mac))
                if test_qos:
                    inbound_value = ','.join(eval(iface_inbound).values())
                    outbound_value = ','.join(eval(iface_outbound).values())
                    options = (
                        "%s %s --model %s --mac %s --inbound %s --outbound %s"
                        % (iface_type, iface_source['bridge'], iface_model,
                           mac, inbound_value, outbound_value))
                ret = virsh.attach_interface(vm1_name,
                                             options,
                                             ignore_status=True)
            else:
                logging.info("Try to hot-plug device")
                target = str({'dev': iface_target})
                iface_alias = str({'name': iface_alias})
                vm_iface_source = str(iface_source)
                iface_params = {
                    "type": iface_type,
                    "source": vm_iface_source,
                    "filter": filter_name,
                    "mac": mac,
                    'alias': iface_alias,
                    'target': target,
                    'model': iface_model,
                    'driver': iface_driver
                }
                attach_xml = interface.Interface(iface_params['type'])
                attach_xml.xml = libvirt.modify_vm_iface(
                    vm1_name, 'get_xml', iface_params)
                ret = virsh.attach_device(vm1_name,
                                          attach_xml.xml,
                                          ignore_status=True,
                                          debug=True)
            if ret.exit_status:
                if any([msg in ret.stderr for msg in err_msgs]):
                    test.error("No more pci slots, can't attach more devices")
                else:
                    test.fail("Failed to attach-interface: %s" %
                              ret.stderr.strip())
            else:
                logging.debug("Hot-plug interface or device pass")
                if update_device:
                    # As the interface type will change to actual type "bridge" in live xml, we need to ensure
                    # the update with original "network" type will not fail.
                    # Try to delete the nwfilter with original type in iface_params
                    update_xml = interface.Interface(iface_type)
                    iface_params_update = {
                        "del_filter": "yes",
                        "type": "network",
                        "source": vm_iface_source
                    }
                    update_xml.xml = libvirt.modify_vm_iface(
                        vm1_name, 'get_xml', iface_params_update)
                    ret = virsh.update_device(vm1_name,
                                              update_xml.xml,
                                              ignore_status=True,
                                              debug=True)
                    libvirt.check_exit_status(ret)

        else:
            vm_iface_source = str(iface_source)
            vm1_iface_params = {
                "type": iface_type,
                "source": vm_iface_source,
                "filter": filter_name,
                "mac": mac,
                'driver': iface_driver,
                "iface_model": iface_model,
                "inbound": iface_inbound,
                "outbound": iface_outbound
            }
            libvirt.modify_vm_iface(vm1_name, "update_iface", vm1_iface_params)

            if vm1.is_alive():
                vm1.destroy()

            vm1.start()
        # apply ip address as it may not be initialized
        session1 = session2 = None
        session1 = vm1.wait_for_serial_login()
        utils_net.restart_guest_network(session1)
        output = session1.cmd_output("ifconfig || ip a")
        logging.debug("guest1 ip info %s" % output)

        # Check guest's network function
        host_ip = utils_net.get_ip_address_by_interface(bridge_name)
        remote_url = params.get("remote_ip", "www.google.com")

        try:
            vm1_ip = utils_net.get_guest_ip_addr(session1, mac)
        except Exception as errs:
            test.fail("vm1 can't get IP with the new create bridge: %s" % errs)
        if test_qos:
            if test_net_qos:
                logging.debug("Test network inbound:")
                res1 = utils_net.check_class_rules(
                    bridge_name, "1:2", ast.literal_eval(net_inbound))
                logging.debug("Test network outbound:")
                res2 = utils_net.check_filter_rules(
                    bridge_name, ast.literal_eval(net_outbound))
            else:
                iface_mac = vm_xml.VMXML.get_first_mac_by_name(vm1_name)
                tap_name = libvirt.get_ifname_host(vm1_name, iface_mac)
                logging.debug("Test inbound:")
                res1 = utils_net.check_class_rules(
                    tap_name, "1:1", ast.literal_eval(iface_inbound))
                logging.debug("Test outbound:")
                res2 = utils_net.check_filter_rules(
                    tap_name, ast.literal_eval(iface_outbound))
            if not res1 or not res2:
                test.fail("Qos test fail!")
        if hotplug:
            # reboot vm1 then check network function to ensure the interface still there and works fine
            logging.info("reboot the vm")
            virsh.reboot(vm1)
            if session1 is None:
                session1 = vm1.wait_for_serial_login()
            ping(vm1_ip,
                 remote_url,
                 ping_count,
                 ping_timeout,
                 session=session1)
            # restart libvirtd service then check the interface still works fine
            libvirtd = utils_libvirtd.Libvirtd()
            libvirtd.restart()
            vm1.cleanup_serial_console()
            vm1.create_serial_console()
            session1 = vm1.wait_for_serial_login()
            ping(vm1_ip,
                 remote_url,
                 ping_count,
                 ping_timeout,
                 session=session1)
            logging.info(
                "after reboot and restart libvirtd, the network works fine")
            if iface_driver:
                try:
                    driver_dict = eval(iface_driver)
                    if session1 is None:
                        session1 = vm1.wait_for_serial_login()
                    guest_iface_info = session1.cmd_output("ip l").strip()
                    guest_iface_name = re.findall(
                        r"^\d+: (\S+?)[@:].*state UP.*$", guest_iface_info,
                        re.MULTILINE)[0]
                    comb_size = driver_dict.get('queues')
                    rx_size = driver_dict.get('rx_queue_size')
                    session1.cmd_status("ethtool -L %s combined %s" %
                                        (guest_iface_name, comb_size))
                    ret, outp = session1.cmd_status_output("ethtool -l %s" %
                                                           guest_iface_name)
                    logging.debug("ethtool cmd output:%s" % outp)
                    if not ret:
                        pre_comb = re.search(
                            "Pre-set maximums:[\s\S]*?Combined:.*?(\d+)",
                            outp).group(1)
                        cur_comb = re.search(
                            "Current hardware settings:[\s\S]*?Combined:.*?(\d+)",
                            outp).group(1)
                        if int(pre_comb) != int(comb_size) or int(
                                cur_comb) != int(comb_size):
                            test.fail(
                                "Fail to check the combined size: setting: %s,"
                                "Pre-set: %s, Current-set: %s" %
                                (comb_size, pre_comb, cur_comb))
                        else:
                            logging.info(
                                "Getting correct Pre-set and Current set value"
                            )
                    else:
                        test.error("ethtool list fail: %s" % outp)
                    # as tx_queue size is only supported for vhost-user interface, only check rx_queue size
                    ret1, outp1 = session1.cmd_status_output("ethtool -g %s" %
                                                             guest_iface_name)
                    logging.debug("guest queue size setting is %s" % outp1)
                    if not ret1:
                        pre_set = re.search(r"Pre-set maximums:\s*RX:\s*(\d+)",
                                            outp1).group(1)
                        cur_set = re.search(
                            r"Current hardware settings:\s*RX:\s*(\d+)",
                            outp1).group(1)
                        if int(pre_set) != int(rx_size) or int(cur_set) != int(
                                rx_size):
                            test.fail("Fail to check the rx_queue_size!")
                except Exception as errs:
                    test.fail("fail to get driver info")
            # hot-unplug interface/device
            if attach_interface:
                ret = virsh.detach_interface(vm1_name,
                                             "bridge",
                                             ignore_status=True)
            else:
                ret = virsh.detach_device(vm1_name,
                                          attach_xml.xml,
                                          ignore_status=True,
                                          debug=True)
            if ret.exit_status:
                test.fail("Hot-unplug interface/device fail")
            else:
                logging.info("hot-unplug interface/device succeed")

        else:
            if start_vm2:
                # Start vm2 connect to the same bridge
                mac2 = utils_net.generate_mac_address_simple()
                vm2_iface_params = {
                    "type": "bridge",
                    "source": vm_iface_source,
                    "filter": filter_name,
                    "mac": mac2
                }
                libvirt.modify_vm_iface(vm2_name, "update_iface",
                                        vm2_iface_params)
                if vm2.is_alive():
                    vm2.destroy()
                vm2.start()

                # Check if vm1 and vm2 can ping each other
                try:
                    utils_net.update_mac_ip_address(vm2, timeout=120)
                    vm2_ip = vm2.get_address()
                except Exception as errs:
                    test.fail(
                        "vm2 can't get IP with the new create bridge: %s" %
                        errs)
                session2 = vm2.wait_for_login()
                # make sure guest has got ip address
                utils_net.restart_guest_network(session2)
                output2 = session2.cmd_output("ifconfig || ip a")
                logging.debug("guest ip info %s" % output2)
                # check 2 guests' network functions
                check_net_functions(vm1_ip, ping_count, ping_timeout, session1,
                                    host_ip, remote_url, vm2_ip)
                check_net_functions(vm2_ip, ping_count, ping_timeout, session2,
                                    host_ip, remote_url, vm1_ip)
        if reconnect_tap:
            iface_mac = vm_xml.VMXML.get_first_mac_by_name(vm1_name)
            tap_name = libvirt.get_ifname_host(vm1_name, iface_mac)
            # For network with shared host bridge, destroy the network will not
            # impact the connection
            if create_network and restart_net:
                virsh.net_destroy(iface_source["network"])
                out1 = libvirt_network.check_tap_connected(
                    tap_name, True, bridge_name)
                virsh.net_start(iface_source["network"])
                out2 = libvirt_network.check_tap_connected(
                    tap_name, True, bridge_name)
                if not out1 or not out2:
                    test.fail("Network destroy and restart should not impact "
                              "tap connection from bridge network!")
            else:
                # Delete and re-create bridge, check the tap is not connected
                utils_net.delete_linux_bridge_tmux(bridge_name, iface_name)
                utils_net.create_linux_bridge_tmux(bridge_name, iface_name)
                out3 = libvirt_network.check_tap_connected(
                    tap_name, False, bridge_name)
                # Check restart libvirtd will recover the connection
                libvirtd = utils_libvirtd.Libvirtd()
                libvirtd.restart()
                out4 = utils_misc.wait_for(
                    lambda: libvirt_network.check_tap_connected(
                        tap_name, True, bridge_name), 20)
                if not out3 or not out4:
                    test.fail("Delete and create linux bridge and check tap "
                              "connection is not as expected!")
    finally:
        logging.debug("Start to restore")
        vm1_xml_bak.sync()
        if start_vm2:
            vm2_xml_bak.sync()
        virsh.nwfilter_undefine(filter_name, ignore_status=True)
        if os.path.exists(iface_script_bk):
            process.run("mv %s %s" % (iface_script_bk, iface_script),
                        shell=True,
                        verbose=True)
        if os.path.exists(bridge_script):
            process.run("rm -rf %s" % bridge_script, shell=True, verbose=True)
        br_path = "/sys/class/net/%s" % bridge_name
        if os.path.exists(br_path):
            utils_net.delete_linux_bridge_tmux(bridge_name, iface_name)
        # reload network configuration
        NM_service.restart()
        # recover NetworkManager
        if NM_status is True:
            NM_service.start()
        if 'network' in iface_source and iface_source[
                "network"] in virsh.net_state_dict():
            virsh.net_destroy(iface_source["network"], ignore_status=False)
Exemplo n.º 41
0
def run(test, params, env):
    """
    Test vsock device:

    1.Edit/start guest with vsock device.
    2.Hotplug/hotunplug vsock device.
    3.Coldplug/Coldunplug vsock device
    """
    def env_setup():
        session = vm.wait_for_login()
        cmd = [
            'rpm -q lsof || yum -y install lsof',
            'rpm -q git || yum -y install git',
            'rpm -q make || yum -y install make',
            'rpm -q gcc && yum -y reinstall gcc || yum -y install gcc',
            'rm -rf /tmp/nc-vsock',
            'git clone %s /tmp/nc-vsock' % git_repo,
            'cd /tmp/nc-vsock/ && make',
        ]
        for i in range(len(cmd)):
            status1 = session.cmd_status(cmd[i])
            logging.debug(status1)
            status2 = process.run(cmd[i], shell=True).exit_status
            logging.debug(status2)
            if (status1 + status2) != 0:
                test.cancel("Failed to install pkg %s" % cmd[i])
        session.close()

    def validate_data_transfer_by_vsock():
        env_setup()

        def _vsock_server():
            session.cmd("/tmp/nc-vsock/nc-vsock -l 1234 > /tmp/testfile")

        server = threading.Thread(target=_vsock_server)
        session = vm.wait_for_login()
        server.start()
        process.run('echo "test file" > /tmp/testfile', shell=True)
        output = process.run("/tmp/nc-vsock/nc-vsock %d 1234 < /tmp/testfile" %
                             int(cid),
                             shell=True).stdout_text
        logging.debug(output)
        server.join(5)
        data_host = process.run("sha256sum /tmp/testfile",
                                shell=True).stdout_text
        logging.debug(session.cmd_status_output("cat /tmp/testfile")[1])
        data_guest = session.cmd_status_output("sha256sum /tmp/testfile")[1]
        logging.debug(data_guest[1])
        if data_guest != data_host:
            test.fail("Data transfer error with vsock device\n")
        session.close()

    def managedsave_restore():
        result = virsh.managedsave(vm_name, debug=True)
        utils_test.libvirt.check_exit_status(result, expect_error=False)
        result = virsh.start(vm_name)
        utils_test.libvirt.check_exit_status(result, expect_error=False)

    start_vm = params.get("start_vm", "no")
    vm_name = params.get("main_vm", "avocado-vt-vm1")
    vm = env.get_vm(params["main_vm"])
    auto_cid = params.get("auto_cid", "no")
    status_error = params.get("status_error", "no") == "yes"
    edit_xml = params.get("edit_xml", "no") == "yes"
    option = params.get("option", "")
    git_repo = params.get("git_repo", "")
    invalid_cid = params.get("invalid_cid", "no") == "yes"
    managedsave = params.get("managedsave", "no") == "yes"
    no_vsock = params.get("no_vsock", "no") == "yes"
    vsock_num = params.get("num")
    commuication = params.get("communication", "no") == "yes"

    # Backup xml file
    vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    backup_xml = vmxml.copy()

    # Add vsock device to domain
    vmxml.remove_all_device_by_type('vsock')
    vsock_dev = Vsock()
    vsock_dev.model_type = "virtio"
    if process.run("modprobe vhost_vsock").exit_status != 0:
        test.fail("Failed to load vhost_vsock module")
    if invalid_cid:
        cid = "-1"
    else:
        cid = random.randint(3, 10)
        vsock_dev.cid = {'auto': auto_cid, 'address': cid}
        chars = string.ascii_letters + string.digits + '-_'
        alias_name = 'ua-' + ''.join(
            random.choice(chars) for _ in list(range(64)))
        vsock_dev.alias = {'name': alias_name}
    logging.debug(vsock_dev)
    if start_vm == "no" and vm.is_alive():
        virsh.destroy()
    try:
        if edit_xml:
            edit_status1 = libvirt.exec_virsh_edit(
                vm_name, [(r":/<devices>/s/$/%s" %
                           re.findall(r"<vsock.*<\/vsock>", str(vsock_dev),
                                      re.M)[0].replace("/", "\/"))])
            edit_status2 = True
            if vsock_num == 2:
                edit_status2 = libvirt.exec_virsh_edit(
                    vm_name, [(r":/<devices>/s/$/%s" %
                               re.findall(r"<vsock.*<\/vsock>", str(vsock_dev),
                                          re.M)[0].replace("/", "\/"))])
                logging.debug(vm_xml.VMXML.new_from_inactive_dumpxml(vm_name))

            if status_error:
                if edit_status1 or edit_status2:
                    test.fail("virsh edit should fail\n")
            else:
                if not edit_status1:
                    test.fail("Failed to edit vm xml with vsock device\n")
                else:
                    result = virsh.start(vm_name, debug=True)
                    utils_test.libvirt.check_exit_status(result,
                                                         expect_error=False)
        else:
            session = vm.wait_for_login()
            session.close()
            result = virsh.attach_device(vm_name,
                                         file_opt=vsock_dev.xml,
                                         flagstr=option,
                                         debug=True)
            utils_test.libvirt.check_exit_status(result, expect_error=False)
            if option == "--config":
                result = virsh.start(vm_name, debug=True)
                utils_test.libvirt.check_exit_status(result,
                                                     expect_error=False)
            vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
            logging.debug(vmxml)
            vsock_list = vmxml.devices.by_device_tag("vsock")
            cid = vsock_list[0].cid['address']
            if 0 == len(vsock_list):
                test.fail("No vsock device found in live xml\n")
            if commuication:
                validate_data_transfer_by_vsock()
            if managedsave and not no_vsock:
                managedsave_restore()

            def _detach_completed():
                status = process.run("lsof /dev/vhost-vsock",
                                     ignore_status=True,
                                     shell=True).exit_status
                return status == 1

            result = virsh.detach_device(vm_name,
                                         file_opt=vsock_dev.xml,
                                         debug=True)
            utils_test.libvirt.check_exit_status(result, expect_error=False)
            utils_misc.wait_for(_detach_completed, timeout=20)
            vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
            vsock_list = vmxml.get_devices("vsock")
            if vsock_list:
                test.fail(
                    "Still find vsock device in live xml after hotunplug\n")
            if managedsave and no_vsock:
                managedsave_restore()
    finally:
        if vm.is_alive():
            vm.destroy(gracefully=False)
        backup_xml.sync()
Exemplo n.º 42
0
def run(test, params, env):
    """
    Test vsock device:

    1.Edit/start guest with vsock device.
    2.Hotplug/hotunplug vsock device.
    3.Coldplug/Coldunplug vsock device
    4.Check if hotplugged vsock communicates.
    """
    def remove_all_vsocks(vm, vmxml):
        """
        Removes all vsock devices from current domain
        to avoid failures due to original domain definition
        which must have been backed up earlier for correct
        restore.

        :param vm: the current test domain
        :param vmxml: VMXML of the current test domain
        :return: None
        """

        vmxml.remove_all_device_by_type('vsock')
        was_alive = vm.is_alive()
        vmxml.sync()
        if was_alive:
            vm.start()

    def env_setup():
        """
        1. Install build dependency for nc-vsock both on guest
        and host.
        2. Get nc-vsock code
        3. Build nc-vsock on both guest and host.
        :return: None
        """

        session = vm.wait_for_login()
        cmds = [
            'rpm -q lsof || yum -y install lsof',
            'rpm -q git || yum -y install git',
            'rpm -q make || yum -y install make',
            'rpm -q gcc && yum -y reinstall gcc || yum -y install gcc',
            'rm -rf %s' % NC_VSOCK_DIR,
            'git clone %s %s' % (git_repo, NC_VSOCK_DIR),
            'cd %s && make' % NC_VSOCK_DIR,
        ]
        for cmd in cmds:
            for where in ["guest", "host"]:
                session_arg = session if where == "guest" else None
                status, output = utils_misc.cmd_status_output(
                    cmd, shell=True, timeout=CMD_TIMEOUT, session=session_arg)
                cancel_if_failed(cmd, status, output, where)

        session.close()

    def cancel_if_failed(cmd, status, output, where):
        """
        Cancel test execution as soon as command failed reporting output

        :param cmd: Command that was executed
        :param status: Exit status of command
        :param output: Output of command
        :param where: "guest" or "host"
        :return:
        """

        if status:
            test.cancel("Failed to run %s on %s output: %s" %
                        (cmd, where, output))

    def write_from_host_to_guest():
        """
        1. Create file for stdin to nc-vsock
        2. Invoke nc-vsock as client writing to guest cid
        :return msg: The message sent to the server
        """

        msg = "message from client"
        process.run('echo %s > %s' % (msg, NC_VSOCK_CLI_TXT), shell=True)
        output = process.run(
            "%s %d %s < %s" %
            (NC_VSOCK_CMD, int(cid), VSOCK_PORT, NC_VSOCK_CLI_TXT),
            shell=True).stdout_text
        logging.debug(output)
        process.system_output('cat %s' % NC_VSOCK_CLI_TXT)
        return msg

    def wait_for_guest_to_receive(server):
        """
        nc-vsock server finishes as soon as it received data.
        We report if it's still running.

        :param server: The started server instance accepting requests
        :return: Nothing
        """

        server.join(5)
        if server.is_alive():
            logging.debug("The server thread is still running in the guest.")

    def validate_data_transfer_by_vsock():
        """
        1. Setup nc-vsock on host and guest
        2. Start vsock server on guest (wait a bit)
        3. Send message from host to guest using correct cid
        4. Get received message from vsock server
        5. Verify message was sent
        """

        env_setup()

        session = vm.wait_for_login()

        def _start_vsock_server_in_guest():
            """
            Starts the nc-vsock server to listen on VSOCK_PORT in the guest.
            The server will stop as soon as it received message from host.
            :return:
            """

            session.cmd("%s -l %s > %s" %
                        (NC_VSOCK_CMD, VSOCK_PORT, NC_VSOCK_SRV_OUT))

        server = Thread(target=_start_vsock_server_in_guest)
        server.start()
        time.sleep(5)

        sent_data = write_from_host_to_guest()
        wait_for_guest_to_receive(server)
        received_data = session.cmd_output('cat %s' % NC_VSOCK_SRV_OUT).strip()

        if not sent_data == received_data:
            test.fail("Data transfer error with vsock device\n"
                      "Sent: '%s'\n"
                      "Received:'%s'" % (sent_data, received_data))

        session.close()

    def managedsave_restore():
        """
        Check that vm can be saved and restarted with current configuration
        """

        result = virsh.managedsave(vm_name, debug=True)
        utils_test.libvirt.check_exit_status(result, expect_error=False)
        result = virsh.start(vm_name)
        utils_test.libvirt.check_exit_status(result, expect_error=False)

    start_vm = params.get("start_vm", "no")
    vm_name = params.get("main_vm", "avocado-vt-vm1")
    vm = env.get_vm(params["main_vm"])
    auto_cid = params.get("auto_cid", "no")
    status_error = params.get("status_error", "no") == "yes"
    edit_xml = params.get("edit_xml", "no") == "yes"
    option = params.get("option", "")
    git_repo = params.get("git_repo", "")
    invalid_cid = params.get("invalid_cid", "no") == "yes"
    managedsave = params.get("managedsave", "no") == "yes"
    no_vsock = params.get("no_vsock", "no") == "yes"
    vsock_num = params.get("num")
    commuication = params.get("communication", "no") == "yes"

    # Backup xml file
    vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    backup_xml = vmxml.copy()

    remove_all_vsocks(vm, vmxml)

    # Define vsock device xml
    vsock_dev = Vsock()
    vsock_dev.model_type = "virtio"
    if process.run("modprobe vhost_vsock").exit_status != 0:
        test.fail("Failed to load vhost_vsock module")
    if invalid_cid:
        cid = "-1"
    else:
        cid = random.randint(3, 10)
        vsock_dev.cid = {'auto': auto_cid, 'address': cid}
        vsock_dev.alias = {'name': str(uuid.uuid1())}
    logging.debug(vsock_dev)

    if start_vm == "no" and vm.is_alive():
        virsh.destroy()

    try:
        if edit_xml:
            edit_status1 = libvirt.exec_virsh_edit(
                vm_name, [(r":/<devices>/s/$/%s" %
                           re.findall(r"<vsock.*<\/vsock>", str(vsock_dev),
                                      re.M)[0].replace("/", "\/"))])
            edit_status2 = True
            if vsock_num == 2:
                edit_status2 = libvirt.exec_virsh_edit(
                    vm_name, [(r":/<devices>/s/$/%s" %
                               re.findall(r"<vsock.*<\/vsock>", str(vsock_dev),
                                          re.M)[0].replace("/", "\/"))])
                logging.debug(vm_xml.VMXML.new_from_inactive_dumpxml(vm_name))

            if status_error:
                if edit_status1 or edit_status2:
                    test.fail("virsh edit should fail\n")
            else:
                if not edit_status1:
                    test.fail("Failed to edit vm xml with vsock device\n")
                else:
                    result = virsh.start(vm_name, debug=True)
                    utils_test.libvirt.check_exit_status(result,
                                                         expect_error=False)
        else:
            session = vm.wait_for_login()
            session.close()
            result = virsh.attach_device(vm_name,
                                         vsock_dev.xml,
                                         flagstr=option,
                                         debug=True)
            utils_test.libvirt.check_exit_status(result, expect_error=False)
            if option == "--config":
                result = virsh.start(vm_name, debug=True)
                utils_test.libvirt.check_exit_status(result,
                                                     expect_error=False)
            vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
            logging.debug(vmxml)
            vsock_list = vmxml.devices.by_device_tag("vsock")
            cid = vsock_list[0].cid['address']
            if 0 == len(vsock_list):
                test.fail("No vsock device found in live xml\n")
            if commuication:
                validate_data_transfer_by_vsock()
            if managedsave and not no_vsock:
                managedsave_restore()

            def _detach_completed():
                status = process.run("lsof /dev/vhost-vsock",
                                     ignore_status=True,
                                     shell=True).exit_status
                return status == 1

            result = virsh.detach_device(vm_name, vsock_dev.xml, debug=True)
            utils_test.libvirt.check_exit_status(result, expect_error=False)
            utils_misc.wait_for(_detach_completed, timeout=20)
            vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
            vsock_list = vmxml.get_devices("vsock")
            if vsock_list:
                test.fail(
                    "Still find vsock device in live xml after hotunplug\n")
            if managedsave and no_vsock:
                managedsave_restore()
    finally:
        if vm.is_alive():
            vm.destroy(gracefully=False)
        backup_xml.sync()
Exemplo n.º 43
0
def run(test, params, env):
    """
    Test interafce xml options.

    1.Prepare test environment,destroy or suspend a VM.
    2.Edit xml and start the domain.
    3.Perform test operation.
    4.Recover test environment.
    5.Confirm the test result.
    """
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    if vm.is_alive():
        vm.wait_for_login()

    def create_iface_xml(mac):
        """
        Create interface xml file
        """
        iface = Interface(type_name=iface_type)
        iface.source = iface_source
        iface.model = iface_model if iface_model else "virtio"
        if iface_target:
            iface.target = {'dev': iface_target}
        iface.mac_address = mac
        logging.debug("Create new interface xml: %s", iface)
        return iface

    status_error = "yes" == params.get("status_error", "no")

    # Interface specific attributes.
    iface_num = params.get("iface_num", '1')
    iface_type = params.get("iface_type", "network")
    iface_source = eval(params.get("iface_source",
                                   "{'network':'default'}"))
    iface_model = params.get("iface_model")
    iface_target = params.get("iface_target")
    iface_mac = params.get("iface_mac")
    attach_device = "yes" == params.get("attach_device", "no")
    attach_iface = "yes" == params.get("attach_iface", "no")
    attach_option = params.get("attach_option", "")
    detach_device = "yes" == params.get("detach_device")
    stress_test = "yes" == params.get("stress_test")
    restart_libvirtd = "yes" == params.get("restart_libvirtd",
                                           "no")
    username = params.get("username")
    password = params.get("password")
    poll_timeout = int(params.get("poll_timeout", 10))

    # stree_test require detach operation
    stress_test_detach_device = False
    stress_test_detach_interface = False
    if stress_test:
        if attach_device:
            stress_test_detach_device = True
        if attach_iface:
            stress_test_detach_interface = True

    # The following detach-device step also using attach option
    detach_option = attach_option

    # Back up xml file.
    vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    #iface_mac = vm_xml.VMXML.get_first_mac_by_name(vm_name)
    libvirtd = utils_libvirtd.Libvirtd()

    # Check virsh command option
    check_cmds = []
    if not status_error:
        if attach_device and attach_option:
            check_cmds.append(('attach-device', attach_option))
        if attach_iface and attach_option:
            check_cmds.append(('attach-interface', attach_option))
        if (detach_device or stress_test_detach_device) and detach_option:
            check_cmds.append(('detach-device', detach_option))
        if stress_test_detach_interface and detach_option:
            check_cmds.append(('detach-device', detach_option))
    for cmd, option in check_cmds:
        libvirt.virsh_cmd_has_option(cmd, option)

    try:
        try:
            # Attach an interface when vm is running
            iface_list = []
            err_msgs = ("No more available PCI slots",
                        "No more available PCI addresses")
            if attach_device:
                for i in range(int(iface_num)):
                    logging.info("Try to attach interface loop %s" % i)
                    if iface_mac:
                        mac = iface_mac
                    else:
                        mac = utils_net.generate_mac_address_simple()
                    iface_xml_obj = create_iface_xml(mac)
                    iface_xml_obj.xmltreefile.write()
                    ret = virsh.attach_device(vm_name, iface_xml_obj.xml,
                                              flagstr=attach_option,
                                              ignore_status=True)
                    if ret.exit_status:
                        if any([msg in ret.stderr for msg in err_msgs]):
                            logging.debug("No more pci slots, can't"
                                          " attach more devices")
                            break
                        elif (ret.stderr.count("doesn't support option %s"
                                               % attach_option)):
                            raise error.TestNAError(ret.stderr)
                        elif status_error:
                            continue
                        else:
                            logging.error("Command output %s" %
                                          ret.stdout.strip())
                            raise error.TestFail("Failed to attach-device")
                    elif stress_test:
                        # Detach the device immediately for stress test
                        ret = virsh.detach_device(vm_name, iface_xml_obj.xml,
                                                  flagstr=detach_option,
                                                  ignore_status=True)
                        libvirt.check_exit_status(ret)
                    else:
                        iface_list.append({'mac': mac,
                                           'iface_xml': iface_xml_obj})
                # Need sleep here for attachment take effect
                time.sleep(5)
            elif attach_iface:
                for i in range(int(iface_num)):
                    logging.info("Try to attach interface loop %s" % i)
                    mac = utils_net.generate_mac_address_simple()
                    options = ("%s %s --model %s --mac %s %s" %
                               (iface_type, iface_source['network'],
                                iface_model, mac, attach_option))
                    ret = virsh.attach_interface(vm_name, options,
                                                 ignore_status=True)
                    if ret.exit_status:
                        if any([msg in ret.stderr for msg in err_msgs]):
                            logging.debug("No more pci slots, can't"
                                          " attach more devices")
                            break
                        elif (ret.stderr.count("doesn't support option %s"
                                               % attach_option)):
                            raise error.TestNAError(ret.stderr)
                        elif status_error:
                            continue
                        else:
                            logging.error("Command output %s" %
                                          ret.stdout.strip())
                            raise error.TestFail("Failed to attach-interface")
                    elif stress_test:
                        # Detach the device immediately for stress test
                        options = ("--type %s --mac %s %s" %
                                   (iface_type, mac, detach_option))
                        ret = virsh.detach_interface(vm_name, options,
                                                     ignore_status=True)
                        libvirt.check_exit_status(ret)
                    else:
                        iface_list.append({'mac': mac})
                # Need sleep here for attachment take effect
                time.sleep(5)

            # Restart libvirtd service
            if restart_libvirtd:
                libvirtd.restart()
                vm.cleanup_serial_console()

            # Start the domain if needed
            if vm.is_dead():
                vm.start()
            session = vm.wait_for_serial_login(username=username,
                                               password=password)

            # Check if interface was attached
            for iface in iface_list:
                if iface.has_key('mac'):
                    # Check interface in dumpxml output
                    if_attr = vm_xml.VMXML.get_iface_by_mac(vm_name,
                                                            iface['mac'])
                    if not if_attr:
                        raise error.TestFail("Can't see interface "
                                             " in dumpxml")
                    if (if_attr['type'] != iface_type or
                            if_attr['source'] != iface_source['network']):
                        raise error.TestFail("Interface attribute doesn't"
                                             " match attachment opitons")
                    # Check interface on guest
                    if not utils_net.get_linux_ifname(session,
                                                      iface['mac']):
                        raise error.TestFail("Can't see interface"
                                             " on guest")

            # Detach hot/cold-plugged interface at last
            if detach_device:
                for iface in iface_list:
                    if iface.has_key('iface_xml'):
                        ret = virsh.detach_device(vm_name,
                                                  iface['iface_xml'].xml,
                                                  flagstr="",
                                                  ignore_status=True)
                        libvirt.check_exit_status(ret)
                    else:
                        options = ("%s --mac %s" %
                                   (iface_type, iface['mac']))
                        ret = virsh.detach_interface(vm_name, options,
                                                     ignore_status=True)
                        libvirt.check_exit_status(ret)

                # Check if interface was detached
                for iface in iface_list:
                    if iface.has_key('mac'):
                        polltime = time.time() + poll_timeout
                        while True:
                            # Check interface in dumpxml output
                            if not vm_xml.VMXML.get_iface_by_mac(vm_name,
                                                                 iface['mac']):
                                break
                            else:
                                time.sleep(2)
                                if time.time() > polltime:
                                    test.fail("Interface still "
                                              "exists after detachment")
            session.close()
        except virt_vm.VMStartError as e:
            logging.info(str(e))
            raise error.TestFail('VM failed to start:\n%s' % e)

    finally:
        # Recover VM.
        logging.info("Restoring vm...")
        if vm.is_alive():
            vm.destroy(gracefully=False)
        vmxml_backup.sync()
Exemplo n.º 44
0
def run(test, params, env):
    """
    Test command: virsh pool-define; pool-define-as; pool-start;
    vol-list pool; attach-device LUN to guest; mount the device;
    dd to the mounted device; unmount; pool-destroy; pool-undefine;

    Pre-requiste:
    Host needs to have a wwpn and wwnn of a vHBA which is zoned and mapped to
    SAN controller.
    """
    pool_xml_f = params.get("pool_create_xml_file", "/PATH/TO/POOL.XML")
    pool_name = params.get("pool_create_name", "virt_test_pool_tmp")
    pre_def_pool = params.get("pre_def_pool", "no")
    define_pool = params.get("define_pool", "no")
    define_pool_as = params.get("define_pool_as", "no")
    pool_create_as = params.get("pool_create_as", "no")
    need_pool_build = params.get("need_pool_build", "no")
    need_vol_create = params.get("need_vol_create", "no")
    pool_type = params.get("pool_type", "dir")
    source_format = params.get("pool_src_format", "")
    source_name = params.get("pool_source_name", "")
    source_path = params.get("pool_source_path", "/")
    pool_target = params.get("pool_target", "pool_target")
    pool_adapter_type = params.get("pool_adapter_type", "")
    pool_adapter_parent = params.get("pool_adapter_parent", "")
    target_device = params.get("disk_target_dev", "sdc")
    pool_wwnn = params.get("pool_wwnn", "POOL_WWNN_EXAMPLE")
    pool_wwpn = params.get("pool_wwpn", "POOL_WWPN_EXAMPLE")
    vhba_wwnn = params.get("vhba_wwnn", "VHBA_WWNN_EXAMPLE")
    vhba_wwpn = params.get("vhba_wwpn", "VHBA_WWPN_EXAMPLE")
    volume_name = params.get("volume_name", "imagefrommapper.qcow2")
    volume_capacity = params.get("volume_capacity", '1G')
    allocation = params.get("allocation", '1G')
    vol_format = params.get("volume_format", 'raw')
    attach_method = params.get("attach_method", "hot")
    test_unit = None
    mount_disk = None
    pool_kwargs = {}
    pool_extra_args = ""
    emulated_image = "emulated-image"
    disk_xml = ""
    new_vhbas = []
    source_dev = ""
    mpath_vol_path = ""
    old_mpath_conf = ""
    mpath_conf_path = "/etc/multipath.conf"
    original_mpath_conf_exist = os.path.exists(mpath_conf_path)

    if pool_type == "scsi":
        if ('EXAMPLE' in pool_wwnn) or ('EXAMPLE' in pool_wwpn):
            raise exceptions.TestSkipError(
                "No wwpn and wwnn provided for npiv scsi pool.")
    if pool_type == "logical":
        if ('EXAMPLE' in vhba_wwnn) or ('EXAMPLE' in vhba_wwpn):
            raise exceptions.TestSkipError(
                "No wwpn and wwnn provided for vhba.")
    online_hbas_list = utils_npiv.find_hbas("hba")
    logging.debug("The online hbas are: %s", online_hbas_list)
    old_mpath_conf = utils_npiv.prepare_multipath_conf(
        conf_path=mpath_conf_path, replace_existing=True)
    if not online_hbas_list:
        raise exceptions.TestSkipError("Host doesn't have online hba cards")
    old_vhbas = utils_npiv.find_hbas("vhba")

    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    vmxml_backup = vmxml.copy()
    if not vm.is_alive():
        vm.start()
    libvirt_vm = lib_vm.VM(vm_name, vm.params, vm.root_dir, vm.address_cache)
    pool_ins = libvirt_storage.StoragePool()
    if pool_ins.pool_exists(pool_name):
        raise exceptions.TestFail("Pool %s already exist" % pool_name)
    if pool_type == "scsi":
        if define_pool == "yes":
            if pool_adapter_parent == "":
                pool_adapter_parent = online_hbas_list[0]
            pool_kwargs = {
                'source_path': source_path,
                'source_name': source_name,
                'source_format': source_format,
                'pool_adapter_type': pool_adapter_type,
                'pool_adapter_parent': pool_adapter_parent,
                'pool_wwnn': pool_wwnn,
                'pool_wwpn': pool_wwpn
            }
    elif pool_type == "logical":
        if (not vhba_wwnn) or (not vhba_wwpn):
            raise exceptions.TestFail("No wwnn/wwpn provided to create vHBA.")
        old_mpath_devs = utils_npiv.find_mpath_devs()
        new_vhba = utils_npiv.nodedev_create_from_xml({
            "nodedev_parent":
            online_hbas_list[0],
            "scsi_wwnn":
            vhba_wwnn,
            "scsi_wwpn":
            vhba_wwpn
        })
        utils_misc.wait_for(lambda: utils_npiv.is_vhbas_added(old_vhbas),
                            timeout=_DELAY_TIME * 2)
        if not new_vhba:
            raise exceptions.TestFail("vHBA not successfully generated.")
        new_vhbas.append(new_vhba)
        utils_misc.wait_for(
            lambda: utils_npiv.is_mpath_devs_added(old_mpath_devs),
            timeout=_DELAY_TIME * 5)
        if not utils_npiv.is_mpath_devs_added(old_mpath_devs):
            raise exceptions.TestFail("mpath dev not generated.")
        cur_mpath_devs = utils_npiv.find_mpath_devs()
        new_mpath_devs = list(
            set(cur_mpath_devs).difference(set(old_mpath_devs)))
        logging.debug("The newly added mpath dev is: %s", new_mpath_devs)
        source_dev = "/dev/mapper/" + new_mpath_devs[0]
        logging.debug(
            "We are going to use \"%s\" as our source device"
            " to create a logical pool", source_dev)
        try:
            cmd = "parted %s mklabel msdos -s" % source_dev
            cmd_result = process.run(cmd, shell=True)
        except Exception as e:
            raise exceptions.TestError("Error occurred when parted mklable")
        if define_pool_as == "yes":
            pool_extra_args = ""
            if source_dev:
                pool_extra_args = ' --source-dev %s' % source_dev
    elif pool_type == "mpath":
        if (not vhba_wwnn) or (not vhba_wwpn):
            raise exceptions.TestFail("No wwnn/wwpn provided to create vHBA.")
        old_mpath_devs = utils_npiv.find_mpath_devs()
        new_vhba = utils_npiv.nodedev_create_from_xml({
            "nodedev_parent":
            online_hbas_list[0],
            "scsi_wwnn":
            vhba_wwnn,
            "scsi_wwpn":
            vhba_wwpn
        })
        utils_misc.wait_for(lambda: utils_npiv.is_vhbas_added(old_vhbas),
                            timeout=_DELAY_TIME * 2)
        if not new_vhba:
            raise exceptions.TestFail("vHBA not successfully generated.")
        new_vhbas.append(new_vhba)
        utils_misc.wait_for(
            lambda: utils_npiv.is_mpath_devs_added(old_mpath_devs),
            timeout=_DELAY_TIME * 2)
        if not utils_npiv.is_mpath_devs_added(old_mpath_devs):
            raise exceptions.TestFail("mpath dev not generated.")
        cur_mpath_devs = utils_npiv.find_mpath_devs()
        new_mpath_devs = list(
            set(cur_mpath_devs).difference(set(old_mpath_devs)))
        logging.debug("The newly added mpath dev is: %s", new_mpath_devs)
        mpath_vol_path = "/dev/mapper/" + new_mpath_devs[0]
        try:
            cmd = "parted %s mklabel msdos -s" % mpath_vol_path
            cmd_result = process.run(cmd, shell=True)
        except Exception as e:
            raise exceptions.TestError("Error occurred when parted mklable")
    if pre_def_pool == "yes":
        try:
            pvt = utlv.PoolVolumeTest(test, params)
            pvt.pre_pool(pool_name, pool_type, pool_target, emulated_image,
                         **pool_kwargs)
            utils_misc.wait_for(lambda: utils_npiv.is_vhbas_added(old_vhbas),
                                _DELAY_TIME * 2)
            virsh.pool_dumpxml(pool_name, to_file=pool_xml_f)
            virsh.pool_destroy(pool_name)
        except Exception as e:
            pvt.cleanup_pool(pool_name, pool_type, pool_target, emulated_image,
                             **pool_kwargs)
            raise exceptions.TestError(
                "Error occurred when prepare pool xml:\n %s" % e)
        if os.path.exists(pool_xml_f):
            with open(pool_xml_f, 'r') as f:
                logging.debug("Create pool from file: %s", f.read())
    try:
        # define/create/start the pool
        if (pre_def_pool == "yes") and (define_pool == "yes"):
            pool_define_status = virsh.pool_define(pool_xml_f,
                                                   ignore_status=True,
                                                   debug=True)
            utlv.check_exit_status(pool_define_status)
        if define_pool_as == "yes":
            pool_define_as_status = virsh.pool_define_as(pool_name,
                                                         pool_type,
                                                         pool_target,
                                                         pool_extra_args,
                                                         ignore_status=True,
                                                         debug=True)
            utlv.check_exit_status(pool_define_as_status)
        if pool_create_as == "yes":
            if pool_type != "scsi":
                raise exceptions.TestSkipError("pool-create-as only needs to "
                                               "be covered by scsi pool for "
                                               "NPIV test.")
            cmd = "virsh pool-create-as %s %s \
                   --adapter-wwnn %s --adapter-wwpn %s \
                   --adapter-parent %s --target %s"\
                   % (pool_name, pool_type, pool_wwnn, pool_wwpn,
                      online_hbas_list[0], pool_target)
            cmd_status = process.system(cmd, verbose=True)
            if cmd_status:
                raise exceptions.TestFail("pool-create-as scsi pool failed.")
        if need_pool_build == "yes":
            pool_build_status = virsh.pool_build(pool_name, "--overwrite")
            utlv.check_exit_status(pool_build_status)

        pool_ins = libvirt_storage.StoragePool()
        if not pool_ins.pool_exists(pool_name):
            raise exceptions.TestFail("define or create pool failed.")
        else:
            if not pool_ins.is_pool_active(pool_name):
                pool_start_status = virsh.pool_start(pool_name)
                utlv.check_exit_status(pool_start_status)
                utlv.check_actived_pool(pool_name)
                pool_detail = libvirt_xml.PoolXML.get_pool_details(pool_name)
                logging.debug("Pool detail: %s", pool_detail)

        # create vol if required
        if need_vol_create == "yes":
            vol_create_as_status = virsh.vol_create_as(volume_name,
                                                       pool_name,
                                                       volume_capacity,
                                                       allocation,
                                                       vol_format,
                                                       "",
                                                       debug=True)
            utlv.check_exit_status(vol_create_as_status)
        virsh.pool_refresh(pool_name)
        vol_list = utlv.get_vol_list(pool_name,
                                     vol_check=True,
                                     timeout=_DELAY_TIME * 3)
        logging.debug('Volume list is: %s' % vol_list)

        # use test_unit to save the first vol in pool
        if pool_type == "mpath":
            cmd = "virsh vol-list %s | grep \"%s\" |\
                   awk '{FS=\" \"} {print $1}'" % (pool_name, mpath_vol_path)
            cmd_result = process.run(cmd, shell=True)
            status = cmd_result.exit_status
            output = cmd_result.stdout_text.strip()
            if cmd_result.exit_status:
                raise exceptions.TestFail("vol-list pool %s failed", pool_name)
            if not output:
                raise exceptions.TestFail("Newly added mpath dev not in pool.")
            test_unit = output
            logging.info("Using %s to attach to a guest", test_unit)
        else:
            test_unit = list(vol_list.keys())[0]
            logging.info("Using the first volume %s to attach to a guest",
                         test_unit)

        vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
        session = vm.wait_for_login()
        output = session.cmd_status_output('lsblk')
        logging.debug("%s", output[1])
        old_count = vmxml.get_disk_count(vm_name)
        bf_disks = libvirt_vm.get_disks()

        # prepare disk xml which will be hot/cold attached to vm
        disk_params = {
            'type_name': 'volume',
            'target_dev': target_device,
            'target_bus': 'virtio',
            'source_pool': pool_name,
            'source_volume': test_unit,
            'driver_type': vol_format
        }
        disk_xml = os.path.join(data_dir.get_tmp_dir(), 'disk_xml.xml')
        lun_disk_xml = utlv.create_disk_xml(disk_params)
        copyfile(lun_disk_xml, disk_xml)
        disk_xml_str = open(lun_disk_xml).read()
        logging.debug("The disk xml is: %s", disk_xml_str)

        # hot attach disk xml to vm
        if attach_method == "hot":
            copyfile(lun_disk_xml, disk_xml)
            dev_attach_status = virsh.attach_device(vm_name,
                                                    disk_xml,
                                                    debug=True)
            # Pool/vol virtual disk is not supported by mpath pool yet.
            if dev_attach_status.exit_status and pool_type == "mpath":
                raise exceptions.TestSkipError(
                    "mpath pool vol is not "
                    "supported in virtual disk yet,"
                    "the error message is: %s", dev_attach_status.stderr)
                session.close()
            utlv.check_exit_status(dev_attach_status)
        # cold attach disk xml to vm
        elif attach_method == "cold":
            if vm.is_alive():
                vm.destroy(gracefully=False)
            new_disk = disk.Disk()
            new_disk.xml = disk_xml_str
            vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
            vmxml.devices = vmxml.devices.append(new_disk)
            vmxml.sync()
            logging.debug(vmxml)
            try:
                vm.start()
            except virt_vm.VMStartError as e:
                logging.debug(e)
                if pool_type == "mpath":
                    raise exceptions.TestSkipError("'mpath' pools for backing "
                                                   "'volume' disks isn't "
                                                   "supported for now")
                else:
                    raise exceptions.TestFail("Failed to start vm")
            session = vm.wait_for_login()
        else:
            pass

        # checking attached disk in vm
        logging.info("Checking disk availability in domain")
        if not vmxml.get_disk_count(vm_name):
            raise exceptions.TestFail("No disk in domain %s." % vm_name)
        new_count = vmxml.get_disk_count(vm_name)

        if new_count <= old_count:
            raise exceptions.TestFail("Failed to attach disk %s" %
                                      lun_disk_xml)
        logging.debug("Disks before attach: %s", bf_disks)

        af_disks = libvirt_vm.get_disks()
        logging.debug("Disks after attach: %s", af_disks)

        mount_disk = "".join(list(set(bf_disks) ^ set(af_disks)))
        if not mount_disk:
            raise exceptions.TestFail("Can not get attached device in vm.")
        logging.debug("Attached device in vm:%s", mount_disk)

        logging.debug("Creating file system for %s", mount_disk)
        output = session.cmd_status_output('echo yes | mkfs.ext4 %s' %
                                           mount_disk)
        logging.debug("%s", output[1])
        if mount_disk:
            mount_success = mount_and_dd(session, mount_disk)
            if not mount_success:
                raise exceptions.TestFail("Mount failed")
        else:
            raise exceptions.TestFail("Partition not available for disk")
        logging.debug("Unmounting disk")
        session.cmd_status_output('umount %s' % mount_disk)
        output = session.cmd_status_output('mount')
        logging.debug("%s", output[1])
        mount_success = mount_and_dd(session, mount_disk)
        if not mount_success:
            raise exceptions.TestFail("Mount failed")
        logging.debug("Unmounting disk")
        session.cmd_status_output('umount %s' % mount_disk)
        session.close()

        # detach disk from vm
        dev_detach_status = virsh.detach_device(vm_name, disk_xml, debug=True)
        utlv.check_exit_status(dev_detach_status)

    finally:
        vm.destroy(gracefully=False)
        vmxml_backup.sync()
        logging.debug('Destroying pool %s', pool_name)
        virsh.pool_destroy(pool_name)
        logging.debug('Undefining pool %s', pool_name)
        virsh.pool_undefine(pool_name)
        if os.path.exists(pool_xml_f):
            os.remove(pool_xml_f)
        if os.path.exists(disk_xml):
            data_dir.clean_tmp_files()
            logging.debug("Cleanup disk xml")
        if pre_def_pool == "yes":
            # Do not apply cleanup_pool for logical pool, logical pool will
            # be cleaned below
            pvt.cleanup_pool(pool_name, pool_type, pool_target, emulated_image,
                             **pool_kwargs)
        if (test_unit and (need_vol_create == "yes" and (pre_def_pool == "no"))
                and (pool_type == "logical")):
            process.system('lvremove -f %s/%s' % (pool_name, test_unit),
                           verbose=True)
            process.system('vgremove -f %s' % pool_name, verbose=True)
            process.system('pvremove -f %s' % source_dev, verbose=True)
        if new_vhbas:
            utils_npiv.vhbas_cleanup(new_vhbas)
        # Restart multipathd, this is to avoid bz1399075
        if source_dev:
            utils_misc.wait_for(
                lambda: utils_npiv.restart_multipathd(source_dev),
                _DELAY_TIME * 5, 0.0, 5.0)
        elif mpath_vol_path:
            utils_misc.wait_for(
                lambda: utils_npiv.restart_multipathd(mpath_vol_path),
                _DELAY_TIME * 5, 0.0, 5.0)
        else:
            utils_npiv.restart_multipathd()
        if old_mpath_conf:
            utils_npiv.prepare_multipath_conf(conf_path=mpath_conf_path,
                                              conf_content=old_mpath_conf,
                                              replace_existing=True)
        if not original_mpath_conf_exist and os.path.exists(mpath_conf_path):
            os.remove(mpath_conf_path)
Exemplo n.º 45
0
def run(test, params, env):
    """
    Stress test for the hotplug feature of usb device.
    """
    # get the params from params
    vm_name = params.get("main_vm", "avocado-vt-vm1")
    vm = env.get_vm(vm_name)

    keyboard = "yes" == params.get("usb_hotplug_keyboard", "no")
    mouse = "yes" == params.get("usb_hotplug_mouse", "no")
    tablet = "yes" == params.get("usb_hotplug_tablet", "no")
    disk = "yes" == params.get("usb_hotplug_disk", "no")

    attach_count = int(params.get("attach_count", "1"))
    attach_type = params.get("attach_type", "attach_device")
    bench_type = params.get("guest_bench", None)
    control_file = params.get("control_file", None)

    status_error = "yes" == params.get("status_error", "no")

    vm_xml = VMXML.new_from_inactive_dumpxml(vm_name)
    vm_xml_backup = vm_xml.copy()

    tmp_dir = os.path.join(data_dir.get_tmp_dir(), "usb_hotplug_files")

    if control_file is not None:
        params["test_control_file"] = control_file
        params["main_vm"] = vm_name
        control_path = os.path.join(test.virtdir, "control", control_file)

        session = vm.wait_for_login()
        command = utils_test.run_autotest(vm, session, control_path, None, None, params, copy_only=True)
        session.cmd("%s &" % command)

        def _is_iozone_running():
            session_tmp = vm.wait_for_login()
            return not session_tmp.cmd_status("ps -ef|grep iozone|grep -v grep")

        def _is_stress_running():
            session_tmp = vm.wait_for_login()
            return not session_tmp.cmd_status("ps -ef|grep stress|grep -v grep")

        if bench_type == "stress":
            if not utils_misc.wait_for(_is_stress_running, timeout=160):
                raise error.TestNAError(
                    "Failed to run stress in guest.\n"
                    "Since we need to run a autotest of iozone "
                    "in guest, so please make sure there are "
                    "some necessary packages in guest,"
                    "such as gcc, tar, bzip2"
                )
        elif bench_type == "iozone":
            if not utils_misc.wait_for(_is_iozone_running, timeout=160):
                raise error.TestNAError(
                    "Failed to run iozone in guest.\n"
                    "Since we need to run a autotest of iozone "
                    "in guest, so please make sure there are "
                    "some necessary packages in guest,"
                    "such as gcc, tar, bzip2"
                )
        logging.debug("bench is already running in guest.")
    try:
        try:
            result = None
            disk_xml = None
            tablet_xml = None
            mouse_xml = None
            if not os.path.isdir(tmp_dir):
                os.mkdir(tmp_dir)
            for i in range(attach_count):
                path = os.path.join(tmp_dir, "%s.img" % i)
                if attach_type == "qemu_monitor":
                    options = "--hmp"
                    if disk:
                        utils_test.libvirt.create_local_disk("file", path, size="1M")
                        attach_cmd = "drive_add"
                        attach_cmd += " 0 id=drive-usb-disk%s,if=none,file=%s" % (i, path)

                        result = virsh.qemu_monitor_command(vm_name, attach_cmd, options=options)
                        if result.exit_status:
                            raise process.CmdError(result.command, result)
                    if keyboard:
                        attach_cmd = "device_add"
                        attach_cmd += " usb-kdb,bus=usb1.0,id=kdb"

                        result = virsh.qemu_monitor_command(vm_name, attach_cmd, options=options)
                        if result.exit_status:
                            raise process.CmdError(result.command, result)
                    if mouse:
                        attach_cmd = "device_add"
                        attach_cmd += " usb-mouse,bus=usb1.0,id=mouse"

                        result = virsh.qemu_monitor_command(vm_name, attach_cmd, options=options)
                        if result.exit_status:
                            raise process.CmdError(result.command, result)
                    if tablet:
                        attach_cmd = "device_add"
                        attach_cmd += " usb-tablet,bus=usb1.0,id=tablet"

                        result = virsh.qemu_monitor_command(vm_name, attach_cmd, options=options)
                        if result.exit_status:
                            raise process.CmdError(result.command, result)
                else:
                    if disk:
                        utils_test.libvirt.create_local_disk("file", path, size="1M")
                        os.chmod(path, 0666)
                        disk_xml = Disk(type_name="file")
                        disk_xml.device = "disk"
                        disk_xml.source = disk_xml.new_disk_source(**{"attrs": {"file": path}})
                        disk_xml.driver = {"name": "qemu", "type": "raw", "cache": "none"}
                        disk_xml.target = {"dev": "sdb", "bus": "usb"}

                        attributes = {"type_name": "usb", "bus": "1", "port": "0"}
                        disk_xml.address = disk_xml.new_disk_address(**{"attrs": attributes})

                        result = virsh.attach_device(vm_name, disk_xml.xml)
                        if result.exit_status:
                            raise process.CmdError(result.command, result)
                    if mouse:
                        mouse_xml = Input("mouse")
                        mouse_xml.input_bus = "usb"
                        attributes = {"type_name": "usb", "bus": "1", "port": "0"}
                        mouse_xml.address = mouse_xml.new_input_address(**{"attrs": attributes})

                        result = virsh.attach_device(vm_name, mouse_xml.xml)
                        if result.exit_status:
                            raise process.CmdError(result.command, result)
                    if tablet:
                        tablet_xml = Input("tablet")
                        tablet_xml.input_bus = "usb"
                        attributes = {"type_name": "usb", "bus": "1", "port": "0"}
                        tablet_xml.address = tablet_xml.new_input_address(**{"attrs": attributes})

                        result = virsh.attach_device(vm_name, tablet_xml.xml)
                        if result.exit_status:
                            raise process.CmdError(result.command, result)
                    if keyboard:
                        kbd_xml = Input("keyboard")
                        kbd_xml.input_bus = "usb"
                        attributes = {"type_name": "usb", "bus": "1", "port": "0"}
                        kbd_xml.address = kbd_xml.new_input_address(**{"attrs": attributes})

                        result = virsh.attach_device(vm_name, kbd_xml.xml)
                        if result.exit_status:
                            raise process.CmdError(result.command, result)

                if attach_type == "qemu_monitor":
                    options = "--hmp"
                    if disk:
                        attach_cmd = "drive_del"
                        attach_cmd += " drive-usb-disk"

                        result = virsh.qemu_monitor_command(vm_name, attach_cmd, options=options)
                        if result.exit_status:
                            raise process.CmdError(result.command, result)
                    if mouse:
                        attach_cmd = "device_del"
                        attach_cmd += " mouse"

                        result = virsh.qemu_monitor_command(vm_name, attach_cmd, options=options)
                        if result.exit_status:
                            raise process.CmdError(result.command, result)
                    if keyboard:
                        attach_cmd = "device_del"
                        attach_cmd += " keyboard"

                        result = virsh.qemu_monitor_command(vm_name, attach_cmd, options=options)
                        if result.exit_status:
                            raise process.CmdError(result.command, result)
                    if tablet:
                        attach_cmd = "device_del"
                        attach_cmd += " tablet"

                        result = virsh.qemu_monitor_command(vm_name, attach_cmd, options=options)
                        if result.exit_status:
                            raise process.CmdError(result.command, result)
                else:
                    if disk:
                        result = virsh.detach_device(vm_name, disk_xml.xml)
                        if result.exit_status:
                            raise process.CmdError(result.command, result)
                    if mouse:
                        result = virsh.detach_device(vm_name, mouse_xml.xml)
                        if result.exit_status:
                            raise process.CmdError(result.command, result)
                    if keyboard:
                        result = virsh.detach_device(vm_name, kbd_xml.xml)
                        if result.exit_status:
                            raise process.CmdError(result.command, result)
                    if tablet:
                        result = virsh.detach_device(vm_name, tablet_xml.xml)
                        if result.exit_status:
                            raise process.CmdError(result.command, result)
        except process.CmdError, e:
            if not status_error:
                raise error.TestFail("failed to attach device.\n" "Detail: %s." % result)
    finally:
        if os.path.isdir(tmp_dir):
            shutil.rmtree(tmp_dir)
        vm_xml_backup.sync()
Exemplo n.º 46
0
def run(test, params, env):
    """
    Test pci/pcie-to-pci bridge

    Hotplug interface to pci/pcie-to-pci bridge, then check xml and
    inside vm.
    Hotunplug interface, then check xml and inside vm
    Other test scenarios of pci/pcie-to-pci bridge
    """
    def create_pci_device(pci_model, pci_model_name, **kwargs):
        """
        Create a pci/pcie bridge

        :param pci_model: model of pci controller device
        :param pci_model_name: model name of pci controller device
        :param kwargs: other k-w args that needed to create device
        :return: the newly created device object
        """
        pci_bridge = Controller('pci')
        pci_bridge.model = pci_model
        pci_bridge.model_name = {'name': pci_model_name}
        if 'index' in kwargs:
            pci_bridge.index = kwargs['index']
        if 'address' in kwargs:
            pci_bridge.address = pci_bridge.new_controller_address(
                attrs=eval(kwargs['address']))

        logging.debug('pci_bridge: %s', pci_bridge)
        return pci_bridge

    def create_iface(iface_model, iface_source, **kwargs):
        """
        Create an interface to be attached to vm

        :param iface_model: model of the interface device
        :param iface_source: source of the interface device
        :param kwargs: other k-w args that needed to create device
        :return: the newly created interface object
        """
        iface = Interface('network')
        iface.model = iface_model
        iface.source = eval(iface_source)

        if 'mac' in kwargs:
            iface.mac_address = kwargs['mac']
        else:
            mac = utils_net.generate_mac_address_simple()
            iface.mac_address = mac

        if 'address' in kwargs:
            iface.address = iface.new_iface_address(
                attrs=eval(kwargs['address']))

        logging.debug('iface: %s', iface)
        return iface

    vm_name = params.get('main_vm')
    status_error = 'yes' == params.get('status_error', 'no')
    err_msg = params.get('err_msg', '').split(';')
    case = params.get('case', '')
    hotplug = 'yes' == params.get('hotplug', 'no')

    need_pci_br = 'yes' == params.get('need_pci_br', 'no')
    pci_model = params.get('pci_model', 'pci')
    pci_model_name = params.get('pci_model_name')
    pci_br_kwargs = eval(params.get('pci_br_kwargs', '{}'))

    pci_br_has_device = 'yes' == params.get('pci_br_has_device', 'no')
    sound_dev_model_type = params.get('sound_dev_model_type', '')
    sound_dev_address = params.get('sound_dev_address', '')

    iface_model = params.get('iface_model', '')
    iface_source = params.get('iface_source', '')
    iface_kwargs = eval(params.get('iface_kwargs', '{}'))

    max_slots = int(params.get('max_slots', 31))
    pcie_br_count = int(params.get('pcie_br_count', 3))

    vmxml = VMXML.new_from_inactive_dumpxml(vm_name)
    bkxml = vmxml.copy()
    vm = env.get_vm(vm_name)

    try:

        # Check if there is a pci/pcie-to-pci bridge, if so,
        # just use the existing pci/pcie-to-pci-bridge to test
        ori_pci_br = [
            dev for dev in vmxml.get_devices('controller')
            if dev.type == 'pci' and dev.model == pci_model
        ]

        if need_pci_br:
            # If there is not a pci/pcie-to-pci bridge to test,
            # create one and add to vm
            if not ori_pci_br:
                logging.info('No %s on vm, create one', pci_model)
                pci_bridge = create_pci_device(pci_model, pci_model_name)
                vmxml.add_device(pci_bridge)
                vmxml.sync()
                logging.debug(virsh.dumpxml(vm_name))

            # Check if pci/pcie-to-pci bridge is successfully added
            vmxml = VMXML.new_from_inactive_dumpxml(vm_name)
            cur_pci_br = [
                dev for dev in vmxml.get_devices('controller')
                if dev.type == 'pci' and dev.model == pci_model
            ]
            if not cur_pci_br:
                test.error('Failed to add %s controller to vm xml' % pci_model)

            pci_br = cur_pci_br[0]
            logging.debug('pci_br: %s', pci_br)
            pci_br_index = pci_br.index

        # If test scenario requires another pci device on pci/pcie-to-pci
        # bridge before hotplug, add a sound device and make sure
        # the 'bus' is same with pci bridge index
        if need_pci_br and pci_br_has_device:
            sound_dev = Sound()
            sound_dev.model_type = sound_dev_model_type
            sound_dev.address = eval(sound_dev_address % pci_br_index)
            logging.debug('sound_dev.address: %s', sound_dev.address)
            vmxml.add_device(sound_dev)
            if case != 'vm_with_pcie_br_1_br':
                vmxml.sync()

        # Test hotplug scenario
        if hotplug:
            vm.start()
            vm.wait_for_login().close()

            # Create interface to be hotplugged
            logging.info('Create interface to be hotplugged')
            iface = create_iface(iface_model, iface_source)
            mac = iface.mac_address

            result = virsh.attach_device(vm_name, iface.xml, debug=True)
            libvirt.check_exit_status(result)

            xml_after_attach = VMXML.new_from_dumpxml(vm_name)
            logging.debug(virsh.dumpxml(vm_name))

            # Check if the iface with given mac address is successfully
            # attached with address bus equal to pcie/pci bridge's index
            iface_list = [
                iface for iface in xml_after_attach.get_devices('interface')
                if iface.mac_address == mac
                and int(iface.address['attrs']['bus'], 16) == int(
                    pci_br_index, 16)
            ]

            logging.debug('iface list after attach: %s', iface_list)
            if not iface_list:
                test.error('Failed to attach interface %s' % iface)

            # Check inside vm
            def check_inside_vm(session, expect=True):
                ip_output = session.cmd('ip a')
                logging.debug('output of "ip a": %s', ip_output)

                return expect if mac in ip_output else not expect

            session = vm.wait_for_serial_login()
            if not utils_misc.wait_for(lambda: check_inside_vm(session, True),
                                       timeout=60,
                                       step=5):
                test.fail('Check interface inside vm failed,'
                          'interface not successfully attached:'
                          'not found mac address %s' % mac)
            session.close()

            # Test hotunplug
            result = virsh.detach_device(vm_name, iface.xml, debug=True)
            libvirt.check_exit_status(result)

            logging.debug(virsh.dumpxml(vm_name))

            # Check if the iface with given mac address has been
            # successfully detached
            xml_after_detach = VMXML.new_from_dumpxml(vm_name)
            iface_list_after_detach = [
                iface for iface in xml_after_detach.get_devices('interface')
                if iface.mac_address == mac
            ]

            logging.debug('iface list after detach: %s',
                          iface_list_after_detach)
            if iface_list_after_detach:
                test.fail('Failed to detach device: %s' % iface)

            # Check again inside vm
            session = vm.wait_for_serial_login()
            if not utils_misc.wait_for(lambda: check_inside_vm(session, False),
                                       timeout=60,
                                       step=5):
                test.fail('Check interface inside vm failed,'
                          'interface not successfully detached:'
                          'found mac address %s' % mac)
            session.close()

        # Other test scenarios of pci/pcie
        if case:
            logging.debug('iface_kwargs: %s', iface_kwargs)

            # Setting pcie-to-pci-bridge model name !=pcie-pci-bridge.
            # or Invalid controller index for pcie-to-pci-bridge.
            if case in ('wrong_model_name', 'invalid_index'):
                pci_bridge = create_pci_device(pci_model, pci_model_name,
                                               **pci_br_kwargs)
                vmxml.add_device(pci_bridge)
                result_to_check = virsh.define(vmxml.xml, debug=True)

            # Attach device with invalid slot to pcie-to-pci-bridge
            if case == 'attach_with_invalid_slot':
                iface = create_iface(iface_model, iface_source, **iface_kwargs)
                vmxml.add_device(iface)
                result_to_check = virsh.define(vmxml.xml, debug=True)

            # Test that pcie-to-pci-bridge has 31 available slots
            if case == 'max_slots':
                target_bus = cur_pci_br[0].index
                target_bus = hex(int(target_bus))
                logging.debug('target_bus: %s', target_bus)

                # Attach 32 interfaces
                for i in range(max_slots + 1):
                    logging.debug('address: %s', iface_kwargs['address'])
                    new_iface_kwargs = {
                        'address':
                        iface_kwargs['address'] % (target_bus, hex(i + 1))
                    }
                    iface = create_iface(iface_model, iface_source,
                                         **new_iface_kwargs)
                    logging.info('Attaching the %d th interface', i + 1)
                    result_in_loop = virsh.attach_device(vm_name,
                                                         iface.xml,
                                                         flagstr='--config',
                                                         debug=True)

                    # Attaching the 32rd interfaces will fail
                    if i == max_slots:
                        status_error = True
                    libvirt.check_exit_status(result_in_loop,
                                              expect_error=status_error)
                logging.debug(virsh.dumpxml(vm_name))

                # Get all devices on pcie-to-pci-bridge from new xml
                # Test if it matches with value of max_slots
                new_xml = VMXML.new_from_dumpxml(vm_name)
                device_on_pci_br = [
                    dev for dev in new_xml.get_devices('interface')
                    if dev.address['type_name'] == 'pci'
                    and int(dev.address['attrs']['bus'], 16) == int(
                        target_bus, 16)
                ]

                logging.info('All slots of pcie-to-pci-bridge is %d',
                             len(device_on_pci_br))
                if len(device_on_pci_br) != max_slots:
                    test.fail('Max slots is %d instead of %d' %
                              (len(device_on_pci_br), max_slots))

            # Define a guest with pcie-to-pci-bridge controller's index <=bus
            if case.startswith('index_v_bus'):
                last_pci_index = max([
                    int(dev.index) for dev in vmxml.get_devices('controller')
                    if dev.type == 'pci'
                ])

                # New index of new pcie-bridge should be +1
                new_index = last_pci_index + 1
                if case.endswith('less_than'):
                    new_bus = new_index + 1
                elif case.endswith('equal_to'):
                    new_bus = new_index
     vm.wait_for_login()
     if status_error:
         if hotplug:
             logging.debug("attaching disk, expecting error...")
             result = virsh.attach_device(vm_name, disk_xml.xml)
             libvirt.check_exit_status(result, status_error)
         else:
             test.fail("VM started unexpectedly.")
     else:
         if hotplug:
             result = virsh.attach_device(vm_name, disk_xml.xml,
                                          debug=True)
             libvirt.check_exit_status(result)
             if not check_in_vm(vm, device_target, old_parts):
                 test.fail("Check encryption disk in VM failed")
             result = virsh.detach_device(vm_name, disk_xml.xml,
                                          debug=True)
             libvirt.check_exit_status(result)
         else:
             if not check_in_vm(vm, device_target, old_parts):
                 test.fail("Check encryption disk in VM failed")
 except virt_vm.VMStartError, e:
     if status_error:
         if hotplug:
             test.fail("In hotplug scenario, VM should "
                       "start successfully but not."
                       "Error: %s", str(e))
         else:
             logging.debug("VM failed to start as expected."
                           "Error: %s", str(e))
     else:
         # Libvirt2.5.0 onward,AES-CBC encrypted qcow2 images is no
Exemplo n.º 48
0
                raise exceptions.TestFail("Mount failed")
        else:
            raise exceptions.TestFail("Partition not available for disk")
        logging.debug("Unmounting disk")
        session.cmd_status_output('umount %s' % mount_disk)
        output = session.cmd_status_output('mount')
        logging.debug("%s", output[1])
        mount_success = mount_and_dd(session, mount_disk)
        if not mount_success:
            raise exceptions.TestFail("Mount failed")
        logging.debug("Unmounting disk")
        session.cmd_status_output('umount %s' % mount_disk)
        session.close()

        # detach disk from vm
        dev_detach_status = virsh.detach_device(vm_name, disk_xml,
                                                debug=True)
        utlv.check_exit_status(dev_detach_status)

    finally:
        vm.destroy(gracefully=False)
        vmxml_backup.sync()
        logging.debug('Destroying pool %s', pool_name)
        virsh.pool_destroy(pool_name)
        logging.debug('Undefining pool %s', pool_name)
        virsh.pool_undefine(pool_name)
        if os.path.exists(pool_xml_f):
            os.remove(pool_xml_f)
        if os.path.exists(disk_xml):
            data_dir.clean_tmp_files()
            logging.debug("Cleanup disk xml")
        if pre_def_pool == "yes":
Exemplo n.º 49
0
def run(test, params, env):
    """
    Stress test for the hotplug feature of usb device.
    """
    # get the params from params
    vm_name = params.get("main_vm", "avocado-vt-vm1")
    vm = env.get_vm(vm_name)

    keyboard = "yes" == params.get("usb_hotplug_keyboard", "no")
    mouse = "yes" == params.get("usb_hotplug_mouse", "no")
    tablet = "yes" == params.get("usb_hotplug_tablet", "no")
    disk = "yes" == params.get("usb_hotplug_disk", "no")

    attach_count = int(params.get("attach_count", "1"))
    attach_type = params.get("attach_type", "attach_device")
    bench_type = params.get("guest_bench", None)
    control_file = params.get("control_file", None)

    status_error = ("yes" == params.get("status_error", "no"))

    vm_xml = VMXML.new_from_inactive_dumpxml(vm_name)
    vm_xml_backup = vm_xml.copy()

    tmp_dir = os.path.join(data_dir.get_tmp_dir(), "usb_hotplug_files")

    if control_file is not None:
        params["test_control_file"] = control_file
        params["main_vm"] = vm_name
        control_path = os.path.join(test.virtdir, "control", control_file)

        session = vm.wait_for_login()
        command = utils_test.run_autotest(vm,
                                          session,
                                          control_path,
                                          None,
                                          None,
                                          params,
                                          copy_only=True)
        session.cmd("%s &" % command)

        def _is_iozone_running():
            session_tmp = vm.wait_for_login()
            return (
                not session_tmp.cmd_status("ps -ef|grep iozone|grep -v grep"))

        def _is_stress_running():
            session_tmp = vm.wait_for_login()
            return (
                not session_tmp.cmd_status("ps -ef|grep stress|grep -v grep"))

        if bench_type == "stress":
            if not utils_misc.wait_for(_is_stress_running, timeout=160):
                test.cancel("Failed to run stress in guest.\n"
                            "Since we need to run a autotest of iozone "
                            "in guest, so please make sure there are "
                            "some necessary packages in guest,"
                            "such as gcc, tar, bzip2")
        elif bench_type == "iozone":
            if not utils_misc.wait_for(_is_iozone_running, timeout=160):
                test.cancel("Failed to run iozone in guest.\n"
                            "Since we need to run a autotest of iozone "
                            "in guest, so please make sure there are "
                            "some necessary packages in guest,"
                            "such as gcc, tar, bzip2")
        logging.debug("bench is already running in guest.")
    try:
        try:
            result = None
            disk_xml = None
            tablet_xml = None
            mouse_xml = None
            if not os.path.isdir(tmp_dir):
                os.mkdir(tmp_dir)
            for i in range(attach_count):
                path = os.path.join(tmp_dir, "%s.img" % i)
                if attach_type == "qemu_monitor":
                    options = "--hmp"
                    if disk:
                        utils_test.libvirt.create_local_disk("file",
                                                             path,
                                                             size="1M")
                        attach_cmd = "drive_add"
                        attach_cmd += (
                            " 0 id=drive-usb-disk%s,if=none,file=%s" %
                            (i, path))

                        result = virsh.qemu_monitor_command(vm_name,
                                                            attach_cmd,
                                                            options=options)
                        if result.exit_status:
                            raise process.CmdError(result.command, result)
                    if keyboard:
                        attach_cmd = "device_add"
                        attach_cmd += " usb-kdb,bus=usb1.0,id=kdb"

                        result = virsh.qemu_monitor_command(vm_name,
                                                            attach_cmd,
                                                            options=options)
                        if result.exit_status:
                            raise process.CmdError(result.command, result)
                    if mouse:
                        attach_cmd = "device_add"
                        attach_cmd += " usb-mouse,bus=usb1.0,id=mouse"

                        result = virsh.qemu_monitor_command(vm_name,
                                                            attach_cmd,
                                                            options=options)
                        if result.exit_status:
                            raise process.CmdError(result.command, result)
                    if tablet:
                        attach_cmd = "device_add"
                        attach_cmd += " usb-tablet,bus=usb1.0,id=tablet"

                        result = virsh.qemu_monitor_command(vm_name,
                                                            attach_cmd,
                                                            options=options)
                        if result.exit_status:
                            raise process.CmdError(result.command, result)
                else:
                    if disk:
                        utils_test.libvirt.create_local_disk("file",
                                                             path,
                                                             size="1M")
                        os.chmod(path, 0o666)
                        disk_xml = Disk(type_name="file")
                        disk_xml.device = "disk"
                        disk_xml.source = disk_xml.new_disk_source(
                            **{"attrs": {
                                'file': path
                            }})
                        disk_xml.driver = {
                            "name": "qemu",
                            "type": 'raw',
                            "cache": "none"
                        }
                        disk_xml.target = {"dev": 'sdb', "bus": "usb"}

                        attributes = {
                            'type_name': "usb",
                            'bus': "1",
                            'port': "0"
                        }
                        disk_xml.address = disk_xml.new_disk_address(
                            **{"attrs": attributes})

                        result = virsh.attach_device(vm_name, disk_xml.xml)
                        if result.exit_status:
                            raise process.CmdError(result.command, result)
                    if mouse:
                        mouse_xml = Input("mouse")
                        mouse_xml.input_bus = "usb"
                        attributes = {
                            'type_name': "usb",
                            'bus': "1",
                            'port': "0"
                        }
                        mouse_xml.address = mouse_xml.new_input_address(
                            **{"attrs": attributes})

                        result = virsh.attach_device(vm_name, mouse_xml.xml)
                        if result.exit_status:
                            raise process.CmdError(result.command, result)
                    if tablet:
                        tablet_xml = Input("tablet")
                        tablet_xml.input_bus = "usb"
                        attributes = {
                            'type_name': "usb",
                            'bus': "1",
                            'port': "0"
                        }
                        tablet_xml.address = tablet_xml.new_input_address(
                            **{"attrs": attributes})

                        result = virsh.attach_device(vm_name, tablet_xml.xml)
                        if result.exit_status:
                            raise process.CmdError(result.command, result)
                    if keyboard:
                        kbd_xml = Input("keyboard")
                        kbd_xml.input_bus = "usb"
                        attributes = {
                            'type_name': "usb",
                            'bus': "1",
                            'port': "0"
                        }
                        kbd_xml.address = kbd_xml.new_input_address(
                            **{"attrs": attributes})

                        result = virsh.attach_device(vm_name, kbd_xml.xml)
                        if result.exit_status:
                            raise process.CmdError(result.command, result)

                if attach_type == "qemu_monitor":
                    options = "--hmp"
                    if disk:
                        attach_cmd = "drive_del"
                        attach_cmd += (" drive-usb-disk")

                        result = virsh.qemu_monitor_command(vm_name,
                                                            attach_cmd,
                                                            options=options)
                        if result.exit_status:
                            raise process.CmdError(result.command, result)
                    if mouse:
                        attach_cmd = "device_del"
                        attach_cmd += (" mouse")

                        result = virsh.qemu_monitor_command(vm_name,
                                                            attach_cmd,
                                                            options=options)
                        if result.exit_status:
                            raise process.CmdError(result.command, result)
                    if keyboard:
                        attach_cmd = "device_del"
                        attach_cmd += (" keyboard")

                        result = virsh.qemu_monitor_command(vm_name,
                                                            attach_cmd,
                                                            options=options)
                        if result.exit_status:
                            raise process.CmdError(result.command, result)
                    if tablet:
                        attach_cmd = "device_del"
                        attach_cmd += (" tablet")

                        result = virsh.qemu_monitor_command(vm_name,
                                                            attach_cmd,
                                                            options=options)
                        if result.exit_status:
                            raise process.CmdError(result.command, result)
                else:
                    if disk:
                        result = virsh.detach_device(vm_name, disk_xml.xml)
                        if result.exit_status:
                            raise process.CmdError(result.command, result)
                    if mouse:
                        result = virsh.detach_device(vm_name, mouse_xml.xml)
                        if result.exit_status:
                            raise process.CmdError(result.command, result)
                    if keyboard:
                        result = virsh.detach_device(vm_name, kbd_xml.xml)
                        if result.exit_status:
                            raise process.CmdError(result.command, result)
                    if tablet:
                        result = virsh.detach_device(vm_name, tablet_xml.xml)
                        if result.exit_status:
                            raise process.CmdError(result.command, result)
        except process.CmdError as e:
            if not status_error:
                test.fail("failed to attach device.\n" "Detail: %s." % result)
    finally:
        if os.path.isdir(tmp_dir):
            shutil.rmtree(tmp_dir)
        vm_xml_backup.sync()
Exemplo n.º 50
0
                    dt_options = "--config"
                ret = virsh.detach_disk(vm_name, device_targets[i],
                                        dt_options)
                libvirt.check_exit_status(ret)
            # Check disks in VM after hotunplug.
            if check_patitions_hotunplug:
                if not check_vm_partitions(devices,
                                           device_targets, False):
                    raise error.TestFail("See device in VM after hotunplug")

        elif hotplug:
            for i in range(len(disks_xml)):
                if len(device_attach_error) > i:
                    if device_attach_error[i] == "yes":
                        continue
                ret = virsh.detach_device(vm_name, disks_xml[i].xml,
                                          flagstr=attach_option)
                os.remove(disks_xml[i].xml)
                libvirt.check_exit_status(ret)

            # Check disks in VM after hotunplug.
            if check_patitions_hotunplug:
                if not check_vm_partitions(devices,
                                           device_targets, False):
                    raise error.TestFail("See device in VM after hotunplug")

    finally:
        # Delete snapshots.
        snapshot_lists = virsh.snapshot_list(vm_name)
        if len(snapshot_lists) > 0:
            libvirt.clean_up_snapshots(vm_name, snapshot_lists)
            for snapshot in snapshot_lists:
Exemplo n.º 51
0
def run(test, params, env):
    """
    Test rng device options.

    1.Prepare test environment, destroy or suspend a VM.
    2.Edit xml and start the domain.
    3.Perform test operation.
    4.Recover test environment.
    5.Confirm the test result.
    """
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)

    def check_rng_xml(xml_set, exists=True):
        """
        Check rng xml in/not in domain xml
        :param xml_set: rng xml object for setting
        :param exists: Check xml exists or not in domain xml

        :return: boolean
        """
        vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
        # Get all current xml rng devices
        xml_devices = vmxml.devices
        rng_devices = xml_devices.by_device_tag("rng")
        logging.debug("rng_devices is %s", rng_devices)

        # check if xml attr same with checking
        try:
            rng_index = xml_devices.index(rng_devices[0])
            xml_get = xml_devices[rng_index]

            if not exists:
                # should be detach device check
                return False
        except IndexError:
            if exists:
                # should be attach device check
                return False
            else:
                logging.info("Can not find rng xml as expected")
                return True

        def get_compare_values(xml_set, xml_get, rng_attr):
            """
            Get set and get value to compare

            :param xml_set: seting xml object
            :param xml_get: getting xml object
            :param rng_attr: attribute of rng device
            :return: set and get value in xml
            """
            try:
                set_value = xml_set[rng_attr]
            except xcepts.LibvirtXMLNotFoundError:
                set_value = None
            try:
                get_value = xml_get[rng_attr]
            except xcepts.LibvirtXMLNotFoundError:
                get_value = None
            logging.debug(
                "get xml_set value(%s) is %s, get xml_get value is %s",
                rng_attr, set_value, get_value)
            return (set_value, get_value)

        match = True
        for rng_attr in xml_set.__slots__:
            set_value, get_value = get_compare_values(xml_set, xml_get,
                                                      rng_attr)
            logging.debug("rng_attr=%s, set_value=%s, get_value=%s", rng_attr,
                          set_value, get_value)
            if set_value and set_value != get_value:
                if rng_attr == 'backend':
                    for bak_attr in xml_set.backend.__slots__:
                        set_backend, get_backend = get_compare_values(
                            xml_set.backend, xml_get.backend, bak_attr)
                        if set_backend and set_backend != get_backend:
                            if bak_attr == 'source':
                                set_source = xml_set.backend.source
                                get_source = xml_get.backend.source
                                find = False
                                for i in range(len(set_source)):
                                    for j in get_source:
                                        if set(set_source[i].items()).issubset(
                                                j.items()):
                                            find = True
                                            break
                                    if not find:
                                        logging.debug(
                                            "set source(%s) not in get source(%s)",
                                            set_source[i], get_source)
                                        match = False
                                        break
                                    else:
                                        continue
                            else:
                                logging.debug(
                                    "set backend(%s)- %s not equal to get backend-%s",
                                    rng_attr, set_backend, get_backend)
                                match = False
                                break
                        else:
                            continue
                        if not match:
                            break
                else:
                    logging.debug("set value(%s)-%s not equal to get value-%s",
                                  rng_attr, set_value, get_value)
                    match = False
                    break
            else:
                continue
            if not match:
                break

        if match:
            logging.info("Find same rng xml as hotpluged")
        else:
            test.fail("Rng xml in VM not same with attached xml")

        return True

    def modify_rng_xml(dparams, sync=True, get_xml=False):
        """
        Modify interface xml options

        :params dparams: parameters for organize xml
        :params sync: whether sync to domain xml, if get_xml is True,
                      then sync will not take effect
        :params get_xml: whether get device xml
        :return: if get_xml=True, return xml file
        """
        rng_model = dparams.get("rng_model", "virtio")
        rng_rate = dparams.get("rng_rate")
        backend_model = dparams.get("backend_model", "random")
        backend_type = dparams.get("backend_type")
        backend_dev = dparams.get("backend_dev", "")
        backend_source_list = dparams.get("backend_source", "").split()
        backend_protocol = dparams.get("backend_protocol")
        rng_alias = dparams.get("rng_alias")
        vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
        rng_xml = rng.Rng()
        rng_xml.rng_model = rng_model
        if rng_rate:
            rng_xml.rate = ast.literal_eval(rng_rate)
        backend = rng.Rng.Backend()
        backend.backend_model = backend_model
        if backend_type:
            backend.backend_type = backend_type
        if backend_dev:
            backend.backend_dev = backend_dev
        if backend_source_list:
            source_list = [
                ast.literal_eval(source) for source in backend_source_list
            ]
            backend.source = source_list
        if backend_protocol:
            backend.backend_protocol = backend_protocol
        rng_xml.backend = backend
        if detach_alias:
            rng_xml.alias = dict(name=rng_alias)
        if with_packed:
            rng_xml.driver = dict(packed=driver_packed)

        logging.debug("Rng xml: %s", rng_xml)
        if get_xml:
            return rng_xml
        if sync:
            vmxml.add_device(rng_xml)
            vmxml.xmltreefile.write()
            vmxml.sync()
        else:
            status = libvirt.exec_virsh_edit(
                vm_name, [(r":/<devices>/s/$/%s" %
                           re.findall(r"<rng.*<\/rng>", str(rng_xml),
                                      re.M)[0].replace("/", "\/"))])
            if not status:
                test.fail("Failed to edit vm xml")

    def check_qemu_cmd(dparams):
        """
        Verify qemu-kvm command line.
        """
        rng_model = dparams.get("rng_model", "virtio")
        rng_rate = dparams.get("rng_rate")
        backend_type = dparams.get("backend_type")
        backend_model = dparams.get("backend_model")
        backend_source_list = dparams.get("backend_source", "").split()
        cmd = ("ps -ef | grep %s | grep -v grep" % vm_name)
        chardev = src_host = src_port = None
        if backend_type == "tcp":
            chardev = "socket"
        elif backend_type == "udp":
            chardev = "udp"
        for bc_source in backend_source_list:
            source = ast.literal_eval(bc_source)
            if "mode" in source and source['mode'] == "connect":
                src_host = source['host']
                src_port = source['service']

        if backend_model == "builtin":
            cmd += (" | grep rng-builtin")
        if chardev and src_host and src_port:
            cmd += (" | grep 'chardev %s,.*host=%s,port=%s'" %
                    (chardev, src_host, src_port))
        if rng_model == "virtio":
            cmd += (" | grep 'device %s'" % dparams.get("rng_device"))
        if rng_rate:
            rate = ast.literal_eval(rng_rate)
            cmd += (" | grep 'max-bytes=%s,period=%s'" %
                    (rate['bytes'], rate['period']))
        if with_packed:
            cmd += (" | grep 'packed=%s'" % driver_packed)
        if process.run(cmd, ignore_status=True, shell=True).exit_status:
            test.fail("Can't see rng option" " in command line")

    def check_host():
        """
        Check random device on host
        """
        backend_dev = params.get("backend_dev")
        if backend_dev:
            cmd = "lsof |grep %s" % backend_dev
            ret = process.run(cmd, ignore_status=True, shell=True)
            if ret.exit_status or not ret.stdout_text.count("qemu"):
                test.fail("Failed to check random device"
                          " on host, command output: %s" % ret.stdout_text)

    def check_snapshot(bgjob=None):
        """
        Do snapshot operation and check the results
        """
        snapshot_name1 = "snap.s1"
        snapshot_name2 = "snap.s2"
        if not snapshot_vm_running:
            vm.destroy(gracefully=False)
        ret = virsh.snapshot_create_as(vm_name, snapshot_name1, debug=True)
        libvirt.check_exit_status(ret)
        snap_lists = virsh.snapshot_list(vm_name, debug=True)
        if snapshot_name not in snap_lists:
            test.fail("Snapshot %s doesn't exist" % snapshot_name)

        if snapshot_vm_running:
            options = "--force"
        else:
            options = ""
        ret = virsh.snapshot_revert(vm_name,
                                    ("%s %s" % (snapshot_name, options)),
                                    debug=True)
        libvirt.check_exit_status(ret)
        ret = virsh.dumpxml(vm_name, debug=True)
        if ret.stdout.strip().count("<rng model="):
            test.fail("Found rng device in xml")

        if snapshot_with_rng:
            if vm.is_alive():
                vm.destroy(gracefully=False)
            if bgjob:
                bgjob.kill_func()
            modify_rng_xml(params, False)

        # Start the domain before disk-only snapshot
        if vm.is_dead():
            # Add random server
            if params.get("backend_type") == "tcp":
                cmd = "cat /dev/random | nc -4 -l localhost 1024"
                bgjob = utils_misc.AsyncJob(cmd)
            vm.start()
            vm.wait_for_login().close()
        err_msgs = ("live disk snapshot not supported"
                    " with this QEMU binary")
        ret = virsh.snapshot_create_as(vm_name,
                                       "%s --disk-only" % snapshot_name2,
                                       debug=True)
        if ret.exit_status:
            if ret.stderr.count(err_msgs):
                test.cancel(err_msgs)
            else:
                test.fail("Failed to create external snapshot")
        snap_lists = virsh.snapshot_list(vm_name, debug=True)
        if snapshot_name2 not in snap_lists:
            test.fail("Failed to check snapshot list")

        ret = virsh.domblklist(vm_name, debug=True)
        if not ret.stdout.strip().count(snapshot_name2):
            test.fail("Failed to find snapshot disk")

    def check_guest_dump(session, exists=True):
        """
        Check guest with hexdump

        :param session: ssh session to guest
        :param exists: check rng device exists/not exists
        """
        check_cmd = "hexdump /dev/hwrng -n 100"
        try:
            status = session.cmd_status(check_cmd, 60)

            if status != 0 and exists:
                test.fail("Fail to check hexdump in guest")
            elif not exists:
                logging.info("hexdump cmd failed as expected")
        except aexpect.exceptions.ShellTimeoutError:
            if not exists:
                test.fail("Still can find rng device in guest")
            else:
                logging.info("Hexdump do not fail with error")

    def check_guest(session, expect_fail=False):
        """
        Check random device on guest

        :param session: ssh session to guest
        :param expect_fail: expect the dd cmd pass or fail
        """
        rng_files = ("/sys/devices/virtual/misc/hw_random/rng_available",
                     "/sys/devices/virtual/misc/hw_random/rng_current")
        rng_avail = session.cmd_output("cat %s" % rng_files[0],
                                       timeout=timeout).strip()
        rng_currt = session.cmd_output("cat %s" % rng_files[1],
                                       timeout=timeout).strip()
        logging.debug("rng avail:%s, current:%s", rng_avail, rng_currt)
        if not rng_currt.count("virtio") or rng_currt not in rng_avail:
            test.fail("Failed to check rng file on guest")

        # Read the random device
        rng_rate = params.get("rng_rate")
        # For rng rate test this command and return in a short time
        # but for other test it will hang
        cmd = ("dd if=/dev/hwrng of=rng.test count=100" " && rm -f rng.test")
        try:
            ret, output = session.cmd_status_output(cmd, timeout=timeout)
            if ret and expect_fail:
                logging.info("dd cmd failed as expected")
            elif ret:
                test.fail("Failed to read the random device")
        except aexpect.exceptions.ShellTimeoutError:
            logging.info("dd cmd timeout")
            # Close session as the current session still hang on last cmd
            session.close()
            session = vm.wait_for_login()

            if expect_fail:
                test.fail("Still can find rng device in guest")
            else:
                logging.info("dd cmd do not fail with error")
                # Check if file have data
                size = session.cmd_output("wc -c rng.test").split()[0]
                if int(size) > 0:
                    logging.info("/dev/hwrng is not empty, size %s", size)
                else:
                    test.fail("/dev/hwrng is empty")
        finally:
            session.cmd("rm -f rng.test")

        if rng_rate:
            rate_bytes, rate_period = list(ast.literal_eval(rng_rate).values())
            rate_conf = float(rate_bytes) / (float(rate_period) / 1000)
            ret = re.search(r"(\d+) bytes.*copied, (\d+.\d+) s", output, re.M)
            if not ret:
                test.fail("Can't find rate from output")
            rate_real = float(ret.group(1)) / float(ret.group(2))
            logging.debug("Find rate: %s, config rate: %s", rate_real,
                          rate_conf)
            if rate_real > rate_conf * 1.2:
                test.fail("The rate of reading exceed"
                          " the limitation of configuration")
        if device_num > 1:
            rng_dev = rng_avail.split()
            if len(rng_dev) != device_num:
                test.cancel("Multiple virtio-rng devices are not"
                            " supported on this guest kernel. "
                            "Bug: https://bugzilla.redhat.com/"
                            "show_bug.cgi?id=915335")
            session.cmd("echo -n %s > %s" % (rng_dev[1], rng_files[1]))
            # Read the random device
            if session.cmd_status(cmd, timeout=timeout):
                test.fail("Failed to read the random device")

    def get_rng_device(guest_arch, rng_model):
        """
        Return the expected rng device in qemu cmd
        :param guest_arch: e.g. x86_64
        :param rng_model: the value for //rng@model, e.g. "virtio"
        :return: expected device type in qemu cmd
        """
        if "virtio" in rng_model:
            return "virtio-rng-pci" if "s390x" not in guest_arch else "virtio-rng-ccw"
        else:
            test.fail("Unknown rng model %s" % rng_model)

    start_error = "yes" == params.get("start_error", "no")
    status_error = "yes" == params.get("status_error", "no")

    test_host = "yes" == params.get("test_host", "no")
    test_guest = "yes" == params.get("test_guest", "no")
    test_guest_dump = "yes" == params.get("test_guest_dump", "no")
    test_qemu_cmd = "yes" == params.get("test_qemu_cmd", "no")
    test_snapshot = "yes" == params.get("test_snapshot", "no")
    snapshot_vm_running = "yes" == params.get("snapshot_vm_running", "no")
    snapshot_with_rng = "yes" == params.get("snapshot_with_rng", "no")
    snapshot_name = params.get("snapshot_name")
    device_num = int(params.get("device_num", 1))
    detach_alias = "yes" == params.get("rng_detach_alias", "no")
    detach_alias_options = params.get("rng_detach_alias_options")
    attach_rng = "yes" == params.get("rng_attach_device", "no")
    attach_options = params.get("rng_attach_options", "")
    random_source = "yes" == params.get("rng_random_source", "yes")
    timeout = int(params.get("timeout", 600))
    wait_timeout = int(params.get("wait_timeout", 60))
    with_packed = "yes" == params.get("with_packed", "no")
    driver_packed = params.get("driver_packed", "on")

    if params.get("backend_model"
                  ) == "builtin" and not libvirt_version.version_compare(
                      6, 2, 0):
        test.cancel("Builtin backend is not supported on this libvirt version")

    if device_num > 1 and not libvirt_version.version_compare(1, 2, 7):
        test.cancel("Multiple virtio-rng devices not "
                    "supported on this libvirt version")

    if with_packed and not libvirt_version.version_compare(6, 3, 0):
        test.cancel("The virtio packed attribute is not supported in"
                    " current libvirt version.")

    guest_arch = params.get("vm_arch_name", "x86_64")

    # Back up xml file.
    vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    logging.debug("vm xml is %s", vmxml_backup)

    # Try to install rng-tools on host, it can speed up random rate
    # if installation failed, ignore the error and continue the test
    if utils_package.package_install(["rng-tools"]):
        rngd_conf = "/etc/sysconfig/rngd"
        rngd_srv = "/usr/lib/systemd/system/rngd.service"
        if os.path.exists(rngd_conf):
            # For rhel6 host, add extraoptions
            with open(rngd_conf, 'w') as f_rng:
                f_rng.write('EXTRAOPTIONS="--rng-device /dev/urandom"')
        elif os.path.exists(rngd_srv):
            # For rhel7 host, modify start options
            rngd_srv_conf = "/etc/systemd/system/rngd.service"
            if not os.path.exists(rngd_srv_conf):
                shutil.copy(rngd_srv, rngd_srv_conf)
            process.run("sed -i -e 's#^ExecStart=.*#ExecStart=/sbin/rngd"
                        " -f -r /dev/urandom -o /dev/random#' %s" %
                        rngd_srv_conf,
                        shell=True)
            process.run('systemctl daemon-reload')
        process.run("service rngd start")

    # Build the xml and run test.
    try:
        bgjob = None

        # Prepare xml, make sure no extra rng dev.
        vmxml = vmxml_backup.copy()
        vmxml.remove_all_device_by_type('rng')
        vmxml.sync()
        logging.debug("Prepared vm xml without rng dev is %s", vmxml)

        # Take snapshot if needed
        if snapshot_name:
            if snapshot_vm_running:
                vm.start()
                vm.wait_for_login().close()
            ret = virsh.snapshot_create_as(vm_name, snapshot_name, debug=True)
            libvirt.check_exit_status(ret)

        # Destroy VM first
        if vm.is_alive():
            vm.destroy(gracefully=False)

        # Build vm xml.
        dparams = {}
        if device_num > 1:
            for i in xrange(device_num):
                rng_model = params.get("rng_model_%s" % i, "virtio")
                dparams[i] = {"rng_model": rng_model}
                dparams[i].update({
                    "backend_model":
                    params.get("backend_model_%s" % i, "random")
                })
                dparams[i].update(
                    {"rng_device": get_rng_device(guest_arch, rng_model)})
                bk_type = params.get("backend_type_%s" % i)
                if bk_type:
                    dparams[i].update({"backend_type": bk_type})
                bk_dev = params.get("backend_dev_%s" % i)
                if bk_dev:
                    dparams[i].update({"backend_dev": bk_dev})
                bk_src = params.get("backend_source_%s" % i)
                if bk_src:
                    dparams[i].update({"backend_source": bk_src})
                bk_pro = params.get("backend_protocol_%s" % i)
                if bk_pro:
                    dparams[i].update({"backend_protocol": bk_pro})
                modify_rng_xml(dparams[i], False)
        else:
            params.update({
                "rng_device":
                get_rng_device(guest_arch, params.get("rng_model", "virtio"))
            })

            if detach_alias:
                device_alias = "ua-" + str(uuid.uuid4())
                params.update({"rng_alias": device_alias})

            rng_xml = modify_rng_xml(params, not test_snapshot, attach_rng)

        try:
            # Add random server
            if random_source and params.get(
                    "backend_type") == "tcp" and not test_guest_dump:
                cmd = "cat /dev/random | nc -4 -l localhost 1024"
                bgjob = utils_misc.AsyncJob(cmd)

            vm.start()
            if attach_rng:
                ret = virsh.attach_device(vm_name,
                                          rng_xml.xml,
                                          flagstr=attach_options,
                                          wait_remove_event=True,
                                          debug=True,
                                          ignore_status=True)
                libvirt.check_exit_status(ret, status_error)
                if status_error:
                    return
                if not check_rng_xml(rng_xml, True):
                    test.fail("Can not find rng device in xml")

            else:
                # Start the VM.
                if start_error:
                    test.fail("VM started unexpectedly")

            if test_qemu_cmd and not attach_rng:
                if device_num > 1:
                    for i in xrange(device_num):
                        check_qemu_cmd(dparams[i])
                else:
                    check_qemu_cmd(params)
            if test_host:
                check_host()
            session = vm.wait_for_login()
            if test_guest:
                check_guest(session)
            if test_guest_dump:
                if params.get("backend_type") == "tcp":
                    cmd = "cat /dev/random | nc -4 localhost 1024"
                    bgjob = utils_misc.AsyncJob(cmd)
                check_guest_dump(session, True)
            if test_snapshot:
                check_snapshot(bgjob)

            if detach_alias:
                result = virsh.detach_device_alias(vm_name,
                                                   device_alias,
                                                   detach_alias_options,
                                                   debug=True)
                if "--config" in detach_alias_options:
                    vm.destroy()

                def have_rng_xml():
                    """
                    check if xml have rng item
                    """
                    output = virsh.dumpxml(vm_name)
                    return not output.stdout.strip().count("<rng model=")

                if utils_misc.wait_for(have_rng_xml, wait_timeout):
                    logging.info("Cannot find rng device in xml after detach")
                else:
                    test.fail("Found rng device in xml after detach")

            # Detach after attach
            if attach_rng:
                ret = virsh.detach_device(vm_name,
                                          rng_xml.xml,
                                          flagstr=attach_options,
                                          debug=True,
                                          ignore_status=True)
                libvirt.check_exit_status(ret, status_error)
                if utils_misc.wait_for(lambda: check_rng_xml(rng_xml, False),
                                       wait_timeout):
                    logging.info("Find same rng xml as hotpluged")
                else:
                    test.fail("Rng device still exists after detach!")

                if test_guest_dump:
                    check_guest_dump(session, False)

            session.close()
        except virt_vm.VMStartError as details:
            logging.info(str(details))
            if not start_error:
                test.fail('VM failed to start, '
                          'please refer to https://bugzilla.'
                          'redhat.com/show_bug.cgi?id=1220252:'
                          '\n%s' % details)
    finally:
        # Delete snapshots.
        snapshot_lists = virsh.snapshot_list(vm_name, debug=True)
        if len(snapshot_lists) > 0:
            libvirt.clean_up_snapshots(vm_name, snapshot_lists)
            for snapshot in snapshot_lists:
                virsh.snapshot_delete(vm_name,
                                      snapshot,
                                      "--metadata",
                                      debug=True)

        # Recover VM.
        if vm.is_alive():
            vm.destroy(gracefully=False)
        logging.info("Restoring vm...")
        vmxml_backup.sync()
        if bgjob:
            bgjob.kill_func()
            if attach_type == "qemu_monitor":
                if usb_type == "storage":
                    attach_cmd += (" drive-usb-%s" % i)
                else:
                    if usb_type == "mouse":
                        attach_cmd += " mouse"
                    elif usb_type == "tablet":
                        attach_cmd += " tablet"
                    else:
                        attach_cmd += " keyboard"

                result = virsh.qemu_monitor_command(vm_name, attach_cmd, options=opt)
                if result.exit_status:
                    raise error.CmdError(result.command, result)
            else:
                result = virsh.detach_device(vm_name, dev_xml.xml)
                if result.exit_status:
                    raise error.CmdError(result.command, result)
    except error.CmdError, e:
        if not status_error:
            # live attach of device 'input' is not supported
            ret = result.stderr.find("Operation not supported")
            if usb_type != "storage" and ret > -1:
                pass
            else:
                raise error.TestFail("failed to attach device.\nDetail: %s." % result)
    finally:
        session.close()
        if os.path.isdir(tmp_dir):
            shutil.rmtree(tmp_dir)
        utils_selinux.set_status(backup_sestatus)
Exemplo n.º 53
0
def run(test, params, env):
    """
    Test memory management of nvdimm
    """
    vm_name = params.get('main_vm')

    nvdimm_file = params.get('nvdimm_file')
    check = params.get('check', '')
    status_error = "yes" == params.get('status_error', 'no')
    error_msg = params.get('error_msg', '')
    qemu_checks = params.get('qemu_checks', '').split('`')
    wait_sec = int(params.get('wait_sec', 5))
    test_str = 'This is a test'

    def check_boot_config(session):
        """
        Check /boot/config-$KVER file
        """
        check_list = [
            'CONFIG_LIBNVDIMM=m', 'CONFIG_BLK_DEV_PMEM=m', 'CONFIG_ACPI_NFIT=m'
        ]
        current_boot = session.cmd('uname -r').strip()
        content = session.cmd('cat /boot/config-%s' % current_boot).strip()
        for item in check_list:
            if item in content:
                logging.info(item)
            else:
                logging.error(item)
                test.fail('/boot/config content not correct')

    def check_file_in_vm(session, path, expect=True):
        """
        Check whether the existence of file meets expectation
        """
        exist = session.cmd_status('ls %s' % path)
        logging.debug(exist)
        exist = True if exist == 0 else False
        status = '' if exist else 'NOT'
        logging.info('File %s does %s exist', path, status)
        if exist != expect:
            err_msg = 'Existance doesn\'t meet expectation: %s ' % path
            if expect:
                err_msg += 'should exist.'
            else:
                err_msg += 'should not exist'
            test.fail(err_msg)

    def create_cpuxml():
        """
        Create cpu xml for test
        """
        cpu_params = {
            k: v
            for k, v in params.items() if k.startswith('cpuxml_')
        }
        logging.debug(cpu_params)
        cpu_xml = vm_xml.VMCPUXML()
        cpu_xml.xml = "<cpu><numa/></cpu>"
        if 'cpuxml_numa_cell' in cpu_params:
            cpu_params['cpuxml_numa_cell'] = cpu_xml.dicts_to_cells(
                eval(cpu_params['cpuxml_numa_cell']))
        for attr_key in cpu_params:
            val = cpu_params[attr_key]
            logging.debug('Set cpu params')
            setattr(cpu_xml, attr_key.replace('cpuxml_', ''),
                    eval(val) if ':' in val else val)
        logging.debug(cpu_xml)
        return cpu_xml.copy()

    def create_nvdimm_xml(**mem_param):
        """
        Create xml of nvdimm memory device
        """
        mem_xml = utils_hotplug.create_mem_xml(
            tg_size=mem_param['target_size'],
            mem_addr={'slot': mem_param['address_slot']},
            tg_sizeunit=mem_param['target_size_unit'],
            tg_node=mem_param['target_node'],
            mem_discard=mem_param.get('discard'),
            mem_model="nvdimm",
            lb_size=mem_param.get('label_size'),
            lb_sizeunit=mem_param.get('label_size_unit'),
            mem_access=mem_param['mem_access'],
            uuid=mem_param.get('uuid'))

        source_xml = memory.Memory.Source()
        source_xml.path = mem_param['source_path']
        mem_xml.source = source_xml
        logging.debug(mem_xml)

        return mem_xml.copy()

    def check_nvdimm_file(file_name):
        """
        check if the file exists in nvdimm memory device

        :param file_name: the file name in nvdimm device
        """
        vm_session = vm.wait_for_login()
        if test_str not in vm_session.cmd('cat /mnt/%s ' % file_name):
            test.fail('"%s" should be in output' % test_str)

    bkxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)

    IS_PPC_TEST = 'ppc64le' in platform.machine().lower()
    if IS_PPC_TEST:
        if not libvirt_version.version_compare(6, 2, 0):
            test.cancel('Libvirt version should be > 6.2.0'
                        ' to support nvdimm on pseries')

    try:
        vm = env.get_vm(vm_name)
        # Create nvdimm file on the host
        process.run('truncate -s 512M %s' % nvdimm_file, verbose=True)
        vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)

        # Set cpu according to params
        cpu_xml = create_cpuxml()
        vmxml.cpu = cpu_xml

        # Update other vcpu, memory info according to params
        update_vm_args = {
            k: params[k]
            for k in params if k.startswith('setvm_')
        }
        logging.debug(update_vm_args)
        for key, value in list(update_vm_args.items()):
            attr = key.replace('setvm_', '')
            logging.debug('Set %s = %s', attr, value)
            setattr(vmxml, attr, int(value) if value.isdigit() else value)
        logging.debug(virsh.dumpxml(vm_name).stdout_text)

        # Add an nvdimm mem device to vm xml
        nvdimm_params = {
            k.replace('nvdimmxml_', ''): v
            for k, v in params.items() if k.startswith('nvdimmxml_')
        }
        nvdimm_xml = create_nvdimm_xml(**nvdimm_params)
        vmxml.add_device(nvdimm_xml)
        check_define_list = ['ppc_no_label', 'discard']
        if libvirt_version.version_compare(7, 0, 0):
            check_define_list.append('less_than_256')
        if check in check_define_list:
            result = virsh.define(vmxml.xml, debug=True)
            libvirt.check_result(result, expected_fails=[error_msg])
            return
        vmxml.sync()
        logging.debug(virsh.dumpxml(vm_name).stdout_text)

        if IS_PPC_TEST:
            # Check whether uuid is automatically created
            new_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
            if not new_xml.xml.find('/devices/memory/uuid'):
                test.fail('uuid should be generated automatically.')
            vm_nvdimm_xml = new_xml.get_devices('memory')[0]
            qemu_checks.append('uuid=%s' % vm_nvdimm_xml.uuid)

            # Check memory target size
            target_size = vm_nvdimm_xml.target.size
            logging.debug('Target size: %s', target_size)

            if check == 'less_than_256':
                if not libvirt_version.version_compare(7, 0, 0):
                    result = virsh.start(vm_name, debug=True)
                    libvirt.check_exit_status(result, status_error)
                    libvirt.check_result(result, error_msg)
                    return

        virsh.start(vm_name, debug=True, ignore_status=False)

        # Check qemu command line one by one
        if IS_PPC_TEST:
            list(map(libvirt.check_qemu_cmd_line, qemu_checks))

        alive_vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)

        # Check if the guest support NVDIMM:
        # check /boot/config-$KVER file
        vm_session = vm.wait_for_login()
        if not IS_PPC_TEST:
            check_boot_config(vm_session)

        # ppc test requires ndctl
        if IS_PPC_TEST:
            if not utils_package.package_install('ndctl', session=vm_session):
                test.error('Cannot install ndctl to vm')
            logging.debug(
                vm_session.cmd_output(
                    'ndctl create-namespace --mode=fsdax --region=region0'))

        # check /dev/pmem0 existed inside guest
        check_file_in_vm(vm_session, '/dev/pmem0')

        if check == 'back_file':
            # Create a file system on /dev/pmem0
            if any(platform.platform().find(ver) for ver in ('el8', 'el9')):
                vm_session.cmd('mkfs.xfs -f /dev/pmem0 -m reflink=0')
            else:
                vm_session.cmd('mkfs.xfs -f /dev/pmem0')

            vm_session.cmd('mount -o dax /dev/pmem0 /mnt')
            vm_session.cmd('echo \"%s\" >/mnt/foo' % test_str)
            vm_session.cmd('umount /mnt')
            vm_session.close()

            # Shutdown the guest, then start it, remount /dev/pmem0,
            # check if the test file is still on the file system
            vm.destroy()
            vm.start()
            vm_session = vm.wait_for_login()

            vm_session.cmd('mount -o dax /dev/pmem0 /mnt')
            if test_str not in vm_session.cmd('cat /mnt/foo'):
                test.fail('\"%s\" should be in /mnt/foo' % test_str)

            # From the host, check the file has changed:
            host_output = process.run('hexdump -C /tmp/nvdimm',
                                      shell=True,
                                      verbose=True).stdout_text
            if test_str not in host_output:
                test.fail('\"%s\" should be in output' % test_str)

            # Shutdown the guest, and edit the xml,
            # include: access='private'
            vm_session.close()
            vm.destroy()
            vm_devices = vmxml.devices
            nvdimm_device = vm_devices.by_device_tag('memory')[0]
            nvdimm_index = vm_devices.index(nvdimm_device)
            vm_devices[nvdimm_index].mem_access = 'private'
            vmxml.devices = vm_devices
            vmxml.sync()

            # Login to the guest, mount the /dev/pmem0 and .
            # create a file: foo-private
            vm.start()
            vm_session = vm.wait_for_login()

            if IS_PPC_TEST:
                libvirt.check_qemu_cmd_line('mem-path=/tmp/nvdimm,share=no')

            private_str = 'This is a test for foo-private'
            vm_session.cmd('mount -o dax /dev/pmem0 /mnt/')

            file_private = 'foo-private'
            vm_session.cmd("echo '%s' >/mnt/%s" % (private_str, file_private))
            if private_str not in vm_session.cmd('cat /mnt/%s' % file_private):
                test.fail('"%s" should be in output' % private_str)

            # Shutdown the guest, then start it,
            # check the file: foo-private is no longer existed
            vm_session.close()
            vm.destroy()

            vm.start()
            vm_session = vm.wait_for_login()
            vm_session.cmd('mount -o dax /dev/pmem0 /mnt/')
            if file_private in vm_session.cmd('ls /mnt/'):
                test.fail('%s should not exist, for it was '
                          'created when access=private' % file_private)

        if check == 'label_back_file':
            # Create an xfs file system on /dev/pmem0
            if any(platform.platform().find(ver) for ver in ('el8', 'el9')):
                vm_session.cmd(
                    'mkfs.xfs -f -b size=4096 /dev/pmem0 -m reflink=0')
            else:
                vm_session.cmd('mkfs.xfs -f -b size=4096 /dev/pmem0')

            # Mount the file system with DAX enabled for page cache bypass
            output = vm_session.cmd_output('mount -o dax /dev/pmem0 /mnt/')
            logging.info(output)

            # Create a file on the nvdimm device.
            test_str = 'This is a test with label'
            vm_session.cmd('echo "%s" >/mnt/foo-label' % test_str)
            if test_str not in vm_session.cmd('cat /mnt/foo-label '):
                test.fail('"%s" should be in the output of cat cmd' % test_str)

            vm_session.cmd('umount /mnt')
            # Reboot the guest, and remount the nvdimm device in the guest.
            # Check the file foo-label is exited
            vm_session.close()
            virsh.reboot(vm_name, debug=True)
            vm_session = vm.wait_for_login()

            vm_session.cmd('mount -o dax /dev/pmem0  /mnt')
            if test_str not in vm_session.cmd('cat /mnt/foo-label '):
                test.fail('"%s" should be in output' % test_str)

            if params.get('check_life_cycle', 'no') == 'yes':
                virsh.managedsave(vm_name, ignore_status=False, debug=True)
                vm.start()
                check_nvdimm_file('foo-label')

                vm_s1 = vm_name + ".s1"
                virsh.save(vm_name, vm_s1, ignore_status=False, debug=True)
                virsh.restore(vm_s1, ignore_status=False, debug=True)
                check_nvdimm_file('foo-label')

                virsh.snapshot_create_as(vm_name,
                                         vm_s1,
                                         ignore_status=False,
                                         debug=True)
                virsh.snapshot_revert(vm_name,
                                      vm_s1,
                                      ignore_status=False,
                                      debug=True)
                virsh.snapshot_delete(vm_name,
                                      vm_s1,
                                      ignore_status=False,
                                      debug=True)

        if check == 'hot_plug':
            # Create file for 2nd nvdimm device
            nvdimm_file_2 = params.get('nvdimm_file_2')
            process.run('truncate -s 512M %s' % nvdimm_file_2)

            # Add 2nd nvdimm device to vm xml
            nvdimm2_params = {
                k.replace('nvdimmxml2_', ''): v
                for k, v in params.items() if k.startswith('nvdimmxml2_')
            }
            nvdimm2_xml = create_nvdimm_xml(**nvdimm2_params)

            ori_devices = vm_xml.VMXML.new_from_dumpxml(vm_name).get_devices(
                'memory')
            logging.debug('Starts with %d memory devices', len(ori_devices))

            result = virsh.attach_device(vm_name, nvdimm2_xml.xml, debug=True)
            libvirt.check_exit_status(result)

            # After attach, there should be an extra memory device
            devices_after_attach = vm_xml.VMXML.new_from_dumpxml(
                vm_name).get_devices('memory')
            logging.debug('After detach, vm has %d memory devices',
                          len(devices_after_attach))
            if len(ori_devices) != len(devices_after_attach) - 1:
                test.fail(
                    'Number of memory devices after attach is %d, should be %d'
                    % (len(devices_after_attach), len(ori_devices) + 1))

            # Create namespace for ppc tests
            if IS_PPC_TEST:
                logging.debug(
                    vm_session.cmd_output(
                        'ndctl create-namespace --mode=fsdax --region=region1')
                )

            time.sleep(wait_sec)
            check_file_in_vm(vm_session, '/dev/pmem1')

            nvdimm_detach = alive_vmxml.get_devices('memory')[-1]
            logging.debug(nvdimm_detach)

            # Hot-unplug nvdimm device
            result = virsh.detach_device(vm_name,
                                         nvdimm_detach.xml,
                                         debug=True)
            libvirt.check_exit_status(result)

            vm_session.close()
            vm_session = vm.wait_for_login()

            logging.debug(virsh.dumpxml(vm_name).stdout_text)

            left_devices = vm_xml.VMXML.new_from_dumpxml(vm_name).get_devices(
                'memory')
            logging.debug(left_devices)

            if len(left_devices) != len(ori_devices):
                test.fail(
                    'Number of memory devices after detach is %d, should be %d'
                    % (len(left_devices), len(ori_devices)))

            time.sleep(5)
            check_file_in_vm(vm_session, '/dev/pmem1', expect=False)
    finally:
        if vm.is_alive():
            vm.destroy(gracefully=False)
        bkxml.sync()
        os.remove(nvdimm_file)
        if 'nvdimm_file_2' in locals():
            os.remove(nvdimm_file_2)
    # Confirm how to reference a VM.
    if vm_ref == "name":
        vm_ref = vm_name
    elif vm_ref.find("invalid") != -1:
        vm_ref = params.get(vm_ref)
    elif vm_ref == "id":
        vm_ref = domid
    elif vm_ref == "hex_id":
        vm_ref = hex(int(domid))
    elif vm_ref == "uuid":
        vm_ref = domuuid
    else:
        vm_ref = ""

    status = virsh.detach_device(vm_ref, device_xml, flagstr=dt_options,
                                 debug=True).exit_status

    # Resume guest after command. On newer libvirt this is fixed as it has
    # been a bug. The change in xml file is done after the guest is resumed.
    if pre_vm_state == "paused":
        vm.resume()

    # Check disk count after command.
    check_count_after_cmd = True
    if device in ['disk', 'cdrom']:
        device_count_after_cmd = vm_xml.VMXML.get_disk_count(vm_name)
    else:
        vm_cls = vm_xml.VMXML.new_from_dumpxml(vm_name)
        device_count_after_cmd = len(vm_cls.devices)
    
    if dt_options.count("current") and pre_vm_state == "shut off":
Exemplo n.º 55
0
def run(test, params, env):
    """
    Test interafce xml options.

    1.Prepare test environment,destroy or suspend a VM.
    2.Edit xml and start the domain.
    3.Perform test operation.
    4.Recover test environment.
    5.Confirm the test result.
    """
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    if vm.is_alive():
        vm.wait_for_login()

    def create_iface_xml(mac=None):
        """
        Create interface xml file
        """
        iface = Interface(type_name=iface_type)
        iface.source = iface_source
        iface.model = iface_model if iface_model else "virtio"
        if iface_target:
            iface.target = {'dev': iface_target}
        if mac:
            iface.mac_address = mac
        if iface_rom:
            iface.rom = eval(iface_rom)
        logging.debug("Create new interface xml: %s", iface)
        return iface

    def get_all_mac_in_vm():
        """
        get the mac address list of all the interfaces from a running vm os

        return: a list of the mac addresses
        """
        mac_list = []
        interface_list = vm.get_interfaces()
        for iface_ in interface_list:
            mac_ = vm.get_interface_mac(iface_)
            mac_list.append(mac_)
        return mac_list

    # Interface specific attributes.
    iface_num = params.get("iface_num", '1')
    iface_type = params.get("iface_type", "network")
    iface_source = eval(params.get("iface_source",
                                   "{'network':'default'}"))
    iface_model = params.get("iface_model")
    iface_target = params.get("iface_target")
    iface_mac = params.get("iface_mac")
    iface_rom = params.get("iface_rom")
    attach_device = "yes" == params.get("attach_device", "no")
    attach_iface = "yes" == params.get("attach_iface", "no")
    attach_option = params.get("attach_option", "")
    detach_device = "yes" == params.get("detach_device")
    stress_test = "yes" == params.get("stress_test")
    restart_libvirtd = "yes" == params.get("restart_libvirtd",
                                           "no")
    start_vm = "yes" == params.get("start_vm", "yes")
    options_test = "yes" == params.get("options_test", "no")
    username = params.get("username")
    password = params.get("password")
    poll_timeout = int(params.get("poll_timeout", 10))
    err_msgs1 = params.get("err_msgs1")
    err_msgs2 = params.get("err_msgs2")
    err_msg_rom = params.get("err_msg_rom")
    del_pci = "yes" == params.get("del_pci", "no")
    del_mac = "yes" == params.get("del_mac", "no")
    del_alias = "yes" == params.get("del_alias", "no")
    set_pci = "yes" == params.get("set_pci", "no")
    set_mac = "yes" == params.get("set_mac", "no")
    set_alias = "yes" == params.get("set_alias", "no")
    status_error = "yes" == params.get("status_error", "no")
    pci_addr = params.get("pci")
    check_mac = "yes" == params.get("check_mac", "no")
    vnet_mac = params.get("vnet_mac", None)
    customer_alias = "yes" == params.get("customer_alias", "no")
    detach_error = params.get("detach_error", None)

    # stree_test require detach operation
    stress_test_detach_device = False
    stress_test_detach_interface = False
    if stress_test:
        if attach_device:
            stress_test_detach_device = True
        if attach_iface:
            stress_test_detach_interface = True

    # The following detach-device step also using attach option
    detach_option = attach_option

    # Back up xml file.
    vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    #iface_mac = vm_xml.VMXML.get_first_mac_by_name(vm_name)
    libvirtd = utils_libvirtd.Libvirtd()

    # Check virsh command option
    check_cmds = []
    sep_options = attach_option.split()
    logging.debug("sep_options: %s" % sep_options)
    for sep_option in sep_options:
        if attach_device and sep_option:
            check_cmds.append(('attach-device', sep_option))
        if attach_iface and sep_option:
            check_cmds.append(('attach-interface', sep_option))
        if (detach_device or stress_test_detach_device) and sep_option:
            check_cmds.append(('detach-device', sep_option))
        if stress_test_detach_interface and sep_option:
            check_cmds.append(('detach-device', sep_option))

    for cmd, option in check_cmds:
        libvirt.virsh_cmd_has_option(cmd, option)

    try:
        try:
            # Attach an interface when vm is running
            iface_list = []
            err_msgs = ("No more available PCI slots",
                        "No more available PCI addresses")
            if not start_vm:
                virsh.destroy(vm_name)
            for i in range(int(iface_num)):
                if attach_device:
                    logging.info("Try to attach device loop %s" % i)
                    if iface_mac:
                        mac = iface_mac
                        iface_xml_obj = create_iface_xml(mac)
                    elif check_mac:
                        iface_xml_obj = create_iface_xml()
                    else:
                        mac = utils_net.generate_mac_address_simple()
                        iface_xml_obj = create_iface_xml(mac)
                    if customer_alias:
                        random_id = process.run("uuidgen", ignore_status=True, shell=True).stdout_text.strip()
                        alias_str = "ua-" + random_id
                        iface_xml_obj.alias = {"name": alias_str}
                        logging.debug("Update number %s interface xml: %s", i, iface_xml_obj)
                    iface_xml_obj.xmltreefile.write()
                    if check_mac:
                        mac_bef = get_all_mac_in_vm()
                    ret = virsh.attach_device(vm_name, iface_xml_obj.xml,
                                              flagstr=attach_option,
                                              ignore_status=True,
                                              debug=True)
                elif attach_iface:
                    logging.info("Try to attach interface loop %s" % i)
                    if iface_mac:
                        mac = iface_mac
                    else:
                        mac = utils_net.generate_mac_address_simple()
                    options = ("%s %s --model %s --mac %s %s" %
                               (iface_type, iface_source['network'],
                                iface_model, mac, attach_option))
                    ret = virsh.attach_interface(vm_name, options,
                                                 ignore_status=True)
                if ret.exit_status:
                    if any([msg in ret.stderr for msg in err_msgs]):
                        logging.debug("No more pci slots, can't attach more devices")
                        break
                    elif ret.stderr.count("doesn't support option %s" % attach_option):
                        test.cancel(ret.stderr)
                    elif err_msgs1 in ret.stderr:
                        logging.debug("option %s is not supported when domain running is %s" % (attach_option, vm.is_alive()))
                        if start_vm or ("--live" not in sep_options and attach_option):
                            test.fail("return not supported, but it is unexpected")
                    elif err_msgs2 in ret.stderr:
                        logging.debug("options %s are mutually exclusive" % attach_option)
                        if not ("--current" in sep_options and len(sep_options) > 1):
                            test.fail("return mutualy exclusive, but it is unexpected")
                    elif err_msg_rom and err_msg_rom in ret.stderr:
                        logging.debug("Attach failed with expect err msg: %s" % err_msg_rom)
                    else:
                        test.fail("Failed to attach-interface: %s" % ret.stderr.strip())
                elif stress_test:
                    if attach_device:
                        # Detach the device immediately for stress test
                        ret = virsh.detach_device(vm_name, iface_xml_obj.xml,
                                                  flagstr=detach_option,
                                                  ignore_status=True)
                    elif attach_iface:
                        # Detach the device immediately for stress test
                        options = ("--type %s --mac %s %s" %
                                   (iface_type, mac, detach_option))
                        ret = virsh.detach_interface(vm_name, options,
                                                     ignore_status=True)
                    libvirt.check_exit_status(ret)
                else:
                    if attach_device:
                        if check_mac:
                            mac_aft = get_all_mac_in_vm()
                            add_mac = list(set(mac_aft).difference(set(mac_bef)))
                            try:
                                mac = add_mac[0]
                                logging.debug("The mac address of the attached interface is %s" % mac)
                            except IndexError:
                                test.fail("Can not find the new added interface in the guest os!")
                        iface_list.append({'mac': mac,
                                           'iface_xml': iface_xml_obj})
                    elif attach_iface:
                        iface_list.append({'mac': mac})
            # Restart libvirtd service
            if restart_libvirtd:
                libvirtd.restart()
                # After restart libvirtd, the old console was invalidated,
                # so we need create a new serial console
                vm.cleanup_serial_console()
                vm.create_serial_console()
            # in options test, check if the interface is attached
            # in current state when attach return true

            def check_iface_exist():
                try:
                    session = vm.wait_for_serial_login(username=username,
                                                       password=password)
                    if utils_net.get_linux_ifname(session, iface['mac']):
                        return True
                    else:
                        logging.debug("can not find interface in vm")
                except Exception:
                    return False
            if options_test:
                for iface in iface_list:
                    if 'mac' in iface:
                        # Check interface in dumpxml output
                        if_attr = vm_xml.VMXML.get_iface_by_mac(vm_name,
                                                                iface['mac'])
                        if vm.is_alive() and attach_option == "--config":
                            if if_attr:
                                test.fail("interface should not exists "
                                          "in current live vm while "
                                          "attached by --config")
                        else:
                            if if_attr:
                                logging.debug("interface %s found current "
                                              "state in xml" % if_attr['mac'])
                            else:
                                test.fail("no interface found in "
                                          "current state in xml")

                        if if_attr:
                            if if_attr['type'] != iface_type or \
                                    if_attr['source'] != \
                                    iface_source['network']:
                                test.fail("Interface attribute doesn't "
                                          "match attachment options")
                        # check interface in vm only when vm is active
                        if vm.is_alive():
                            logging.debug("check interface in current state "
                                          "in vm")

                            if not utils_misc.wait_for(check_iface_exist, timeout=20):
                                if not attach_option == "--config":
                                    test.fail("Can't see interface "
                                              "in current state in vm")
                                else:
                                    logging.debug("find interface in "
                                                  "current state in vm")
                            else:
                                logging.debug("find interface in "
                                              "current state in vm")
                            # in options test, if the attach is performed
                            # when the vm is running
                            # need to destroy and start to check again
                            vm.destroy()

            # Start the domain if needed
            if vm.is_dead():
                vm.start()
            session = vm.wait_for_serial_login(username=username,
                                               password=password)

            # check if interface is attached
            for iface in iface_list:
                if 'mac' in iface:
                    logging.debug("check interface in xml")
                    # Check interface in dumpxml output
                    if_attr = vm_xml.VMXML.get_iface_by_mac(vm_name,
                                                            iface['mac'])
                    logging.debug(if_attr)
                    if if_attr:
                        logging.debug("interface {} is found in xml".
                                      format(if_attr['mac']))
                        if (if_attr['type'] != iface_type or
                                if_attr['source'] != iface_source['network']):
                            test.fail("Interface attribute doesn't "
                                      "match attachment options")
                        if options_test and start_vm and attach_option \
                                in ("--current", "--live", ""):
                            test.fail("interface should not exists when "
                                      "restart vm in options_test")
                    else:
                        logging.debug("no interface found in xml")
                        if options_test and start_vm and attach_option in \
                                ("--current", "--live", ""):
                            logging.debug("interface not exists next state "
                                          "in xml with %s" % attach_option)
                        else:
                            test.fail("Can't see interface in dumpxml")

                    # Check interface on guest
                    if not utils_misc.wait_for(check_iface_exist, timeout=20):
                        logging.debug("can't see interface next state in vm")
                        if start_vm and attach_option in \
                                ("--current", "--live", ""):
                            logging.debug("it is expected")
                        else:
                            test.fail("should find interface "
                                      "but no seen in next state in vm")
                    if vnet_mac:
                        # get the name of the backend tap device
                        iface_params = vm_xml.VMXML.get_iface_by_mac(vm_name, mac)
                        target_name = iface_params['target']['dev']
                        # check the tap device mac on host
                        tap_info = process.run("ip l show %s" % target_name, shell=True, ignore_status=True).stdout_text
                        logging.debug("vnet_mac should be %s" % vnet_mac)
                        logging.debug("check on host for the details of tap device %s: %s" % (target_name, tap_info))
                        if vnet_mac not in tap_info:
                            test.fail("The mac address of the tap device do not match!")
            # Detach hot/cold-plugged interface at last
            if detach_device:
                logging.debug("detach interface here:")
                if attach_device:
                    vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
                    iface_xml_ls = vmxml.get_devices("interface")
                    iface_xml_ls_det = iface_xml_ls[1:]
                    for iface_xml_det in iface_xml_ls_det:
                        if del_mac:
                            iface_xml_det.del_mac_address()
                        if del_pci:
                            iface_xml_det.del_address()
                        if del_alias:
                            iface_xml_det.del_alias()
                        if set_mac:
                            mac = utils_net.generate_mac_address_simple()
                            iface_xml_det.set_mac_address(mac)
                        if set_pci:
                            pci_dict = ast.literal_eval(pci_addr)
                            addr = iface_xml_det.new_iface_address(**{"attrs": pci_dict})
                            iface_xml_det.set_address(addr)
                        if set_alias:
                            random_id = process.run("uuidgen", ignore_status=True, shell=True).stdout_text.strip()
                            alias_str = "ua-" + random_id
                            iface_xml_det.set_alias({"name": alias_str})
                        ori_pid_libvirtd = process.getoutput("pidof libvirtd")
                        logging.debug("The xml of the interface to detach is %s", iface_xml_det)
                        ret = virsh.detach_device(vm_name,
                                                  iface_xml_det.xml,
                                                  flagstr="",
                                                  ignore_status=True)
                        libvirt.check_exit_status(ret, status_error)
                        if detach_device and status_error and detach_error:
                            if not ret.stderr.count(detach_error):
                                test.error("Detach fail as expected, but the error msg %s can not match!" % ret.stderr)
                        aft_pid_libvirtd = process.getoutput("pidof libvirtd")
                        if not utils_libvirtd.Libvirtd.is_running or ori_pid_libvirtd != aft_pid_libvirtd:
                            test.fail("Libvirtd crash after detach non-exists interface")
                else:
                    for iface in iface_list:
                        options = ("%s --mac %s" %
                                   (iface_type, iface['mac']))
                        ret = virsh.detach_interface(vm_name, options,
                                                     ignore_status=True)
                        libvirt.check_exit_status(ret)

                # Check if interface was detached
                if not status_error:
                    for iface in iface_list:
                        if 'mac' in iface:
                            polltime = time.time() + poll_timeout
                            while True:
                                # Check interface in dumpxml output
                                if not vm_xml.VMXML.get_iface_by_mac(vm_name,
                                                                     iface['mac']):
                                    break
                                else:
                                    time.sleep(2)
                                    if time.time() > polltime:
                                        test.fail("Interface still "
                                                  "exists after detachment")
            session.close()
        except virt_vm.VMStartError as e:
            logging.info(str(e))
            test.fail('VM failed to start:\n%s' % e)

    finally:
        # Recover VM.
        logging.info("Restoring vm...")
        if vm.is_alive():
            vm.destroy(gracefully=False)
        vmxml_backup.sync()
Exemplo n.º 56
0
def test_active_nodedev_reset(device, vm, expect_succeed):
    """
    Test nodedev-reset when the specified device is attached to a VM

    :param devices        : Specified node device to be tested.
    :param vm             : VM the device is to be attached to.
    :param expect_succeed : 'yes' for expect command run successfully
                            and 'no' for fail.
    :raise TestFail       : If result doesn't meet expectation.
    :raise TestError      : If failed to recover environment.
    """
    # Split device name such as `pci_0000_00_19_0` and fill the XML.
    hostdev_xml = """
<hostdev mode='subsystem' type='%s' managed='yes'>
    <source>
        <address domain='0x%s' bus='0x%s' slot='0x%s' function='0x%s'/>
    </source>
</hostdev>""" % tuple(device.split('_'))

    try:
        # The device need to be detached before attach to VM.
        virsh.nodedev_detach(device)
        try:
            # Backup VM XML.
            vmxml = VMXML.new_from_inactive_dumpxml(vm.name)

            # Generate a temp file to store host device XML.
            dev_fd, dev_fname = tempfile.mkstemp(dir=data_dir.get_tmp_dir())
            os.close(dev_fd)

            dev_file = open(dev_fname, 'w')
            dev_file.write(hostdev_xml)
            dev_file.close()

            # Only live VM allows attach device.
            if not vm.is_alive():
                vm.start()

            try:
                result = virsh.attach_device(vm.name, dev_fname)
                logging.debug(result)

                test_nodedev_reset([device], expect_succeed)
            finally:
                # Detach device from VM.
                result = virsh.detach_device(vm.name, dev_fname)
                # Raise error when detach failed.
                if result.exit_status:
                    raise error.TestError(
                        'Failed to dettach device %s from %s. Result:\n %s'
                        % (device, vm.name, result))
        finally:
            # Cleanup temp XML file and recover test VM.
            os.remove(dev_fname)
            vmxml.sync()
    finally:
        # Reattach node device
        result = virsh.nodedev_reattach(device)
        # Raise error when reattach failed.
        if result.exit_status:
            raise error.TestError(
                'Failed to reattach nodedev %s. Result:\n %s'
                % (device, result))
Exemplo n.º 57
0
def run(test, params, env):
    """
    Test for hotplug usb device.
    """
    # get the params from params
    vm_name = params.get("main_vm", "avocado-vt-vm1")
    vm = env.get_vm(vm_name)

    usb_type = params.get("usb_type", "kbd")
    attach_type = params.get("attach_type", "attach_device")
    attach_count = int(params.get("attach_count", "1"))
    if usb_type == "storage":
        model = params.get("model", "nec-xhci")
        index = params.get("index", "1")
    status_error = ("yes" == params.get("status_error", "no"))

    vm_xml = VMXML.new_from_inactive_dumpxml(vm_name)
    vm_xml_backup = vm_xml.copy()

    # Set selinux of host.
    backup_sestatus = utils_selinux.get_status()
    utils_selinux.set_status("permissive")

    if usb_type == "storage":
        controllers = vm_xml.get_devices(device_type="controller")
        devices = vm_xml.get_devices()
        for dev in controllers:
            if dev.type == "usb" and dev.index == "1":
                devices.remove(dev)
        controller = Controller("controller")
        controller.type = "usb"
        controller.index = index
        controller.model = model
        devices.append(controller)
        vm_xml.set_devices(devices)

    try:
        session = vm.wait_for_login()
    except (LoginError, VMError, ShellError) as e:
        test.fail("Test failed: %s" % str(e))

    def is_hotplug_ok():
        try:
            output = session.cmd_output(
                "fdisk -l | grep -c '^Disk /dev/.* 1 M'")
            if int(output.strip()) != attach_count:
                return False
            else:
                return True
        except ShellTimeoutError as detail:
            test.fail("unhotplug failed: %s, " % detail)

    tmp_dir = os.path.join(data_dir.get_tmp_dir(), "usb_hotplug_files")
    if not os.path.isdir(tmp_dir):
        os.mkdir(tmp_dir)

    try:
        result = None
        dev_xml = None
        opt = "--hmp"
        for i in range(attach_count):
            if usb_type == "storage":
                path = os.path.join(tmp_dir, "%s.img" % i)
                libvirt.create_local_disk("file",
                                          path,
                                          size="1M",
                                          disk_format="qcow2")
                os.chmod(path, 0o666)

            if attach_type == "qemu_monitor":
                if usb_type == "storage":
                    attach_cmd = "drive_add"
                    attach_cmd += (" 0 id=drive-usb-%s,if=none,file=%s" %
                                   (i, path))

                    result = virsh.qemu_monitor_command(vm_name,
                                                        attach_cmd,
                                                        options=opt)
                    if result.exit_status or (result.stdout.strip().find("OK")
                                              == -1):
                        raise process.CmdError(result.command, result)

                    attach_cmd = "device_add usb-storage,"
                    attach_cmd += (
                        "id=drive-usb-%s,bus=usb1.0,drive=drive-usb-%s" %
                        (i, i))
                else:
                    attach_cmd = "device_add"
                    attach_cmd += " usb-%s,bus=usb1.0,id=%s%s" % (usb_type,
                                                                  usb_type, i)

                result = virsh.qemu_monitor_command(vm_name,
                                                    attach_cmd,
                                                    options=opt)
                if result.exit_status:
                    raise process.CmdError(result.command, result)
            else:
                attributes = {'type_name': "usb", 'bus': "1", 'port': "0"}
                if usb_type == "storage":
                    dev_xml = Disk(type_name="file")
                    dev_xml.device = "disk"
                    dev_xml.source = dev_xml.new_disk_source(
                        **{"attrs": {
                            'file': path
                        }})
                    dev_xml.driver = {
                        "name": "qemu",
                        "type": 'qcow2',
                        "cache": "none"
                    }
                    dev_xml.target = {"dev": 'sdb', "bus": "usb"}
                    dev_xml.address = dev_xml.new_disk_address(
                        **{"attrs": attributes})
                else:
                    if usb_type == "mouse":
                        dev_xml = Input("mouse")
                    elif usb_type == "tablet":
                        dev_xml = Input("tablet")
                    else:
                        dev_xml = Input("keyboard")

                    dev_xml.input_bus = "usb"
                    dev_xml.address = dev_xml.new_input_address(
                        **{"attrs": attributes})

                result = virsh.attach_device(vm_name, dev_xml.xml)
                if result.exit_status:
                    raise process.CmdError(result.command, result)

        if status_error and usb_type == "storage":
            if utils_misc.wait_for(is_hotplug_ok, timeout=30):
                # Sometimes we meet an error but the ret in $? is 0.
                test.fail("\nAttach device successfully in negative case."
                          "\nExcept it fail when attach count exceed maximum."
                          "\nDetail: %s" % result)

        for i in range(attach_count):
            attach_cmd = "device_del"
            if attach_type == "qemu_monitor":
                if usb_type == "storage":
                    attach_cmd += (" drive-usb-%s" % i)
                else:
                    if usb_type == "mouse":
                        attach_cmd += " mouse"
                    elif usb_type == "tablet":
                        attach_cmd += " tablet"
                    else:
                        attach_cmd += " keyboard"

                result = virsh.qemu_monitor_command(vm_name,
                                                    attach_cmd,
                                                    options=opt)
                if result.exit_status:
                    raise process.CmdError(result.command, result)
            else:
                result = virsh.detach_device(vm_name, dev_xml.xml)
                if result.exit_status:
                    raise process.CmdError(result.command, result)
    except process.CmdError as e:
        if not status_error:
            # live attach of device 'input' is not supported
            ret = result.stderr.find("Operation not supported")
            if usb_type != "storage" and ret > -1:
                pass
            else:
                test.fail("failed to attach device.\nDetail: %s." % result)
    finally:
        session.close()
        if os.path.isdir(tmp_dir):
            shutil.rmtree(tmp_dir)
        utils_selinux.set_status(backup_sestatus)
        vm_xml_backup.sync()
Exemplo n.º 58
0
def run(test, params, env):
    """
    Test interafce xml options.

    1.Prepare test environment,destroy or suspend a VM.
    2.Edit xml and start the domain.
    3.Perform test operation.
    4.Recover test environment.
    5.Confirm the test result.
    """
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    virsh_dargs = {'debug': True, 'ignore_status': False}

    def create_iface_xml(iface_mac):
        """
        Create interface xml file
        """
        iface = Interface(type_name=iface_type)
        source = ast.literal_eval(iface_source)
        if source:
            iface.source = source
        iface.model = iface_model if iface_model else "virtio"
        iface.mac_address = iface_mac
        driver_dict = {}
        driver_host = {}
        driver_guest = {}
        if iface_driver:
            driver_dict = ast.literal_eval(iface_driver)
        if iface_driver_host:
            driver_host = ast.literal_eval(iface_driver_host)
        if iface_driver_guest:
            driver_guest = ast.literal_eval(iface_driver_guest)
        iface.driver = iface.new_driver(driver_attr=driver_dict,
                                        driver_host=driver_host,
                                        driver_guest=driver_guest)
        logging.debug("Create new interface xml: %s", iface)
        return iface

    def modify_iface_xml(update, status_error=False):
        """
        Modify interface xml options
        """
        vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
        xml_devices = vmxml.devices
        iface_index = xml_devices.index(
            xml_devices.by_device_tag("interface")[0])
        iface = xml_devices[iface_index]
        if iface_model:
            iface.model = iface_model
        else:
            del iface.model
        if iface_type:
            iface.type_name = iface_type
        del iface.source
        source = ast.literal_eval(iface_source)
        if source:
            net_ifs = utils_net.get_net_if(state="UP")
            # Check source device is valid or not,
            # if it's not in host interface list, try to set
            # source device to first active interface of host
            if (iface.type_name == "direct" and
                    source.has_key('dev') and
                    source['dev'] not in net_ifs):
                logging.warn("Source device %s is not a interface"
                             " of host, reset to %s",
                             source['dev'], net_ifs[0])
                source['dev'] = net_ifs[0]
            iface.source = source
        backend = ast.literal_eval(iface_backend)
        if backend:
            iface.backend = backend
        driver_dict = {}
        driver_host = {}
        driver_guest = {}
        if iface_driver:
            driver_dict = ast.literal_eval(iface_driver)
        if iface_driver_host:
            driver_host = ast.literal_eval(iface_driver_host)
        if iface_driver_guest:
            driver_guest = ast.literal_eval(iface_driver_guest)
        iface.driver = iface.new_driver(driver_attr=driver_dict,
                                        driver_host=driver_host,
                                        driver_guest=driver_guest)
        if iface.address:
            del iface.address

        logging.debug("New interface xml file: %s", iface)
        if unprivileged_user:
            # Create disk image for unprivileged user
            disk_index = xml_devices.index(
                xml_devices.by_device_tag("disk")[0])
            disk_xml = xml_devices[disk_index]
            logging.debug("source: %s", disk_xml.source)
            disk_source = disk_xml.source.attrs["file"]
            cmd = ("cp -fZ {0} {1} && chown {2}:{2} {1}"
                   "".format(disk_source, dst_disk, unprivileged_user))
            utils.run(cmd)
            disk_xml.source = disk_xml.new_disk_source(
                attrs={"file": dst_disk})
            vmxml.devices = xml_devices
            # Remove all channels to avoid of permission problem
            channels = vmxml.get_devices(device_type="channel")
            for channel in channels:
                vmxml.del_device(channel)

            vmxml.xmltreefile.write()
            logging.debug("New VM xml: %s", vmxml)
            utils.run("chmod a+rw %s" % vmxml.xml)
            virsh.define(vmxml.xml, **virsh_dargs)
        # Try to modify interface xml by update-device or edit xml
        elif update:
            iface.xmltreefile.write()
            ret = virsh.update_device(vm_name, iface.xml,
                                      ignore_status=True)
            libvirt.check_exit_status(ret, status_error)
        else:
            vmxml.devices = xml_devices
            vmxml.xmltreefile.write()
            vmxml.sync()

    def check_offloads_option(if_name, driver_options, session=None):
        """
        Check interface offloads by ethtool output
        """
        offloads = {"csum": "tx-checksumming",
                    "gso": "generic-segmentation-offload",
                    "tso4": "tcp-segmentation-offload",
                    "tso6": "tx-tcp6-segmentation",
                    "ecn": "tx-tcp-ecn-segmentation",
                    "ufo": "udp-fragmentation-offload"}
        if session:
            ret, output = session.cmd_status_output("ethtool -k %s | head"
                                                    " -18" % if_name)
        else:
            out = utils.run("ethtool -k %s | head -18" % if_name)
            ret, output = out.exit_status, out.stdout
        if ret:
            raise error.TestFail("ethtool return error code")
        logging.debug("ethtool output: %s", output)
        for offload in driver_options.keys():
            if offloads.has_key(offload):
                if (output.count(offloads[offload]) and
                    not output.count("%s: %s" % (
                        offloads[offload], driver_options[offload]))):
                    raise error.TestFail("offloads option %s: %s isn't"
                                         " correct in ethtool output" %
                                         (offloads[offload],
                                          driver_options[offload]))

    def run_xml_test(iface_mac):
        """
        Test for interface options in vm xml
        """
        # Get the interface object according the mac address
        vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
        iface_devices = vmxml.get_devices(device_type="interface")
        iface = None
        for iface_dev in iface_devices:
            if iface_dev.mac_address == iface_mac:
                iface = iface_dev
        if not iface:
            raise error.TestFail("Can't find interface with mac"
                                 " '%s' in vm xml" % iface_mac)
        driver_dict = {}
        if iface_driver:
            driver_dict = ast.literal_eval(iface_driver)
        for driver_opt in driver_dict.keys():
            if not driver_dict[driver_opt] == iface.driver.driver_attr[driver_opt]:
                raise error.TestFail("Can't see driver option %s=%s in vm xml"
                                     % (driver_opt, driver_dict[driver_opt]))
        if iface_target:
            if (not iface.target.has_key("dev") or
                    not iface.target["dev"].startswith(iface_target)):
                raise error.TestFail("Can't see device target dev in vm xml")
            # Check macvtap mode by ip link command
            if iface_target == "macvtap" and iface.source.has_key("mode"):
                cmd = "ip -d link show %s" % iface.target["dev"]
                output = utils.run(cmd).stdout
                logging.debug("ip link output: %s", output)
                mode = iface.source["mode"]
                if mode == "passthrough":
                    mode = "passthru"
                if not output.count("macvtap  mode %s" % mode):
                    raise error.TestFail("Failed to verify macvtap mode")

    def run_cmdline_test(iface_mac):
        """
        Test for qemu-kvm command line options
        """
        cmd = ("ps -ef | grep %s | grep -v grep " % vm_name)
        if test_vhost_net:
            cmd += " | grep 'vhost=on'"
        ret = utils.run(cmd)
        if ret.exit_status:
            raise error.TestFail("Can't parse qemu-kvm command line")

        logging.debug("Command line %s", ret.stdout)
        if iface_model == "virtio":
            model_option = "device virtio-net-pci"
        else:
            model_option = "device rtl8139"
        iface_cmdline = re.findall(r"%s,(.+),mac=%s" %
                                   (model_option, iface_mac), ret.stdout)
        if not iface_cmdline:
            raise error.TestFail("Can't see %s with mac %s in command"
                                 " line" % (model_option, iface_mac))

        cmd_opt = {}
        for opt in iface_cmdline[0].split(','):
            tmp = opt.rsplit("=")
            cmd_opt[tmp[0]] = tmp[1]
        logging.debug("Command line options %s", cmd_opt)

        driver_dict = {}
        # Test <driver> xml options.
        if iface_driver:
            iface_driver_dict = ast.literal_eval(iface_driver)
            for driver_opt in iface_driver_dict.keys():
                if driver_opt == "name":
                    continue
                elif driver_opt == "txmode":
                    if iface_driver_dict["txmode"] == "iothread":
                        driver_dict["tx"] = "bh"
                    else:
                        driver_dict["tx"] = iface_driver_dict["txmode"]
                elif driver_opt == "queues":
                    driver_dict["mq"] = "on"
                    driver_dict["vectors"] = str(int(
                        iface_driver_dict["queues"]) * 2 + 2)
                else:
                    driver_dict[driver_opt] = iface_driver_dict[driver_opt]
        # Test <driver><host/><driver> xml options.
        if iface_driver_host:
            driver_dict.update(ast.literal_eval(iface_driver_host))
        # Test <driver><guest/><driver> xml options.
        if iface_driver_guest:
            driver_dict.update(ast.literal_eval(iface_driver_guest))

        for driver_opt in driver_dict.keys():
            if (not cmd_opt.has_key(driver_opt) or
                    not cmd_opt[driver_opt] == driver_dict[driver_opt]):
                raise error.TestFail("Can't see option '%s=%s' in qemu-kvm "
                                     " command line" %
                                     (driver_opt, driver_dict[driver_opt]))
        if test_backend:
            guest_pid = ret.stdout.rsplit()[1]
            cmd = "lsof %s | grep %s" % (backend["tap"], guest_pid)
            if utils.system(cmd, ignore_status=True):
                raise error.TestFail("Guest process didn't open backend file"
                                     % backend["tap"])
            cmd = "lsof %s | grep %s" % (backend["vhost"], guest_pid)
            if utils.system(cmd, ignore_status=True):
                raise error.TestFail("Guest process didn't open backend file"
                                     % backend["tap"])

    def get_guest_ip(session, mac):
        """
        Wrapper function to get guest ip address
        """
        utils_net.restart_guest_network(session, mac)
        # Wait for IP address is ready
        utils_misc.wait_for(
            lambda: utils_net.get_guest_ip_addr(session, mac), 10)
        return utils_net.get_guest_ip_addr(session, mac)

    def check_user_network(session):
        """
        Check user network ip address on guest
        """
        vm_ips = []
        vm_ips.append(get_guest_ip(session, iface_mac_old))
        if attach_device:
            vm_ips.append(get_guest_ip(session, iface_mac))
        logging.debug("IP address on guest: %s", vm_ips)
        if len(vm_ips) != len(set(vm_ips)):
            raise error.TestFail("Duplicated IP address on guest. "
                                 "Check bug: https://bugzilla.redhat."
                                 "com/show_bug.cgi?id=1147238")

        for vm_ip in vm_ips:
            if vm_ip is None or not vm_ip.startswith("10.0.2."):
                raise error.TestFail("Found wrong IP address"
                                     " on guest")
        # Check gateway address
        gateway = utils_net.get_net_gateway(session.cmd_output)
        if gateway != "10.0.2.2":
            raise error.TestFail("The gateway on guest is not"
                                 " right")
        # Check dns server address
        ns_list = utils_net.get_net_nameserver(session.cmd_output)
        if "10.0.2.3" not in ns_list:
            raise error.TestFail("The dns server can't be found"
                                 " on guest")

    def check_mcast_network(session):
        """
        Check multicast ip address on guests
        """
        src_addr = ast.literal_eval(iface_source)['address']
        add_session = additional_vm.wait_for_serial_login()
        vms_sess_dict = {vm_name: session,
                         additional_vm.name: add_session}

        # Check mcast address on host
        cmd = "netstat -g | grep %s" % src_addr
        if utils.run(cmd, ignore_status=True).exit_status:
            raise error.TestFail("Can't find multicast ip address"
                                 " on host")
        vms_ip_dict = {}
        # Get ip address on each guest
        for vms in vms_sess_dict.keys():
            vm_mac = vm_xml.VMXML.get_first_mac_by_name(vms)
            vm_ip = get_guest_ip(vms_sess_dict[vms], vm_mac)
            if not vm_ip:
                raise error.TestFail("Can't get multicast ip"
                                     " address on guest")
            vms_ip_dict.update({vms: vm_ip})
        if len(set(vms_ip_dict.values())) != len(vms_sess_dict):
            raise error.TestFail("Got duplicated multicast ip address")
        logging.debug("Found ips on guest: %s", vms_ip_dict)

        # Run omping server on host
        if not utils_misc.yum_install(["omping"]):
            raise error.TestError("Failed to install omping"
                                  " on host")
        cmd = ("iptables -F;omping -m %s %s" %
               (src_addr, "192.168.122.1 %s" %
                ' '.join(vms_ip_dict.values())))
        # Run a backgroup job waiting for connection of client
        bgjob = utils.AsyncJob(cmd)

        # Run omping client on guests
        for vms in vms_sess_dict.keys():
            # omping should be installed first
            if not utils_misc.yum_install(["omping"], vms_sess_dict[vms]):
                raise error.TestError("Failed to install omping"
                                      " on guest")
            cmd = ("iptables -F; omping -c 5 -T 5 -m %s %s" %
                   (src_addr, "192.168.122.1 %s" %
                    vms_ip_dict[vms]))
            ret, output = vms_sess_dict[vms].cmd_status_output(cmd)
            logging.debug("omping ret: %s, output: %s", ret, output)
            if (not output.count('multicast, xmt/rcv/%loss = 5/5/0%') or
                    not output.count('unicast, xmt/rcv/%loss = 5/5/0%')):
                raise error.TestFail("omping failed on guest")
        # Kill the backgroup job
        bgjob.kill_func()

    status_error = "yes" == params.get("status_error", "no")
    start_error = "yes" == params.get("start_error", "no")
    unprivileged_user = params.get("unprivileged_user")

    # Interface specific attributes.
    iface_type = params.get("iface_type", "network")
    iface_source = params.get("iface_source", "{}")
    iface_driver = params.get("iface_driver")
    iface_model = params.get("iface_model")
    iface_target = params.get("iface_target")
    iface_backend = params.get("iface_backend", "{}")
    iface_driver_host = params.get("iface_driver_host")
    iface_driver_guest = params.get("iface_driver_guest")
    attach_device = params.get("attach_iface_device")
    change_option = "yes" == params.get("change_iface_options", "no")
    update_device = "yes" == params.get("update_iface_device", "no")
    additional_guest = "yes" == params.get("additional_guest", "no")
    serial_login = "******" == params.get("serial_login", "no")
    test_option_cmd = "yes" == params.get(
                      "test_iface_option_cmd", "no")
    test_option_xml = "yes" == params.get(
                      "test_iface_option_xml", "no")
    test_vhost_net = "yes" == params.get(
                     "test_vhost_net", "no")
    test_option_offloads = "yes" == params.get(
                           "test_option_offloads", "no")
    test_iface_user = "******" == params.get(
                      "test_iface_user", "no")
    test_iface_mcast = "yes" == params.get(
                       "test_iface_mcast", "no")
    test_libvirtd = "yes" == params.get("test_libvirtd", "no")
    test_guest_ip = "yes" == params.get("test_guest_ip", "no")
    test_backend = "yes" == params.get("test_backend", "no")

    if iface_driver_host or iface_driver_guest or test_backend:
        if not libvirt_version.version_compare(1, 2, 8):
            raise error.TestNAError("Offloading/backend options not "
                                    "supported in this libvirt version")
    if iface_driver and "queues" in ast.literal_eval(iface_driver):
        if not libvirt_version.version_compare(1, 0, 6):
            raise error.TestNAError("Queues options not supported"
                                    " in this libvirt version")

    if unprivileged_user:
        virsh_dargs["unprivileged_user"] = unprivileged_user
        # Create unprivileged user if needed
        cmd = ("grep {0} /etc/passwd || "
               "useradd {0}".format(unprivileged_user))
        utils.run(cmd)
        # Need another disk image for unprivileged user to access
        dst_disk = "/tmp/%s.img" % unprivileged_user

    # Destroy VM first
    if vm.is_alive():
        vm.destroy(gracefully=False)

    # Back up xml file.
    vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    iface_mac_old = vm_xml.VMXML.get_first_mac_by_name(vm_name)
    # iface_mac will update if attach a new interface
    iface_mac = iface_mac_old
    # Additional vm for test
    additional_vm = None
    libvirtd = utils_libvirtd.Libvirtd()

    try:
        # Build the xml and run test.
        try:
            # Prepare interface backend files
            if test_backend:
                if not os.path.exists("/dev/vhost-net"):
                    utils.run("modprobe vhost-net")
                backend = ast.literal_eval(iface_backend)
                backend_tap = "/dev/net/tun"
                backend_vhost = "/dev/vhost-net"
                if not backend:
                    backend["tap"] = backend_tap
                    backend["vhost"] = backend_vhost
                if not start_error:
                    # Create backend files for normal test
                    if not os.path.exists(backend["tap"]):
                        os.rename(backend_tap, backend["tap"])
                    if not os.path.exists(backend["vhost"]):
                        os.rename(backend_vhost, backend["vhost"])
            # Edit the interface xml.
            if change_option:
                modify_iface_xml(update=False)
            # Check vhost driver.
            if test_vhost_net:
                if os.path.exists("/dev/vhost-net"):
                    cmd = ("modprobe -r {0}; lsmod | "
                           "grep {0}".format("vhost_net"))
                    if not utils.system(cmd, ignore_status=True):
                        raise error.TestError("Can't remove "
                                              "vhost_net driver")

            # Attach a interface when vm is shutoff
            if attach_device == 'config':
                iface_mac = utils_net.generate_mac_address_simple()
                iface_xml_obj = create_iface_xml(iface_mac)
                iface_xml_obj.xmltreefile.write()
                ret = virsh.attach_device(vm_name, iface_xml_obj.xml,
                                          flagstr="--config",
                                          ignore_status=True)
                libvirt.check_exit_status(ret)

            # Clone additional vm
            if additional_guest:
                guest_name = "%s_%s" % (vm_name, '1')
                # Clone additional guest
                timeout = params.get("clone_timeout", 360)
                utils_libguestfs.virt_clone_cmd(vm_name, guest_name,
                                                True, timeout=timeout)
                additional_vm = vm.clone(guest_name)
                additional_vm.start()
                #additional_vm.wait_for_login()

            # Start the VM.
            if unprivileged_user:
                virsh.start(vm_name, **virsh_dargs)
                cmd = ("su - %s -c 'virsh console %s'"
                       % (unprivileged_user, vm_name))
                session = aexpect.ShellSession(cmd)
                session.sendline()
                remote.handle_prompts(session, params.get("username"),
                                      params.get("password"), "[\#\$]", 30)
                # Get ip address on guest
                if not get_guest_ip(session, iface_mac):
                    raise error.TestError("Can't get ip address on guest")
            else:
                # Will raise VMStartError exception if start fails
                vm.start()
                if serial_login:
                    session = vm.wait_for_serial_login()
                else:
                    session = vm.wait_for_login()
            if start_error:
                raise error.TestFail("VM started unexpectedly")

            if test_vhost_net:
                if utils.system("lsmod | grep vhost_net", ignore_status=True):
                    raise error.TestFail("vhost_net module can't be"
                                         " loaded automatically")

            # Attach a interface when vm is running
            if attach_device == 'live':
                iface_mac = utils_net.generate_mac_address_simple()
                iface_xml_obj = create_iface_xml(iface_mac)
                iface_xml_obj.xmltreefile.write()
                ret = virsh.attach_device(vm_name, iface_xml_obj.xml,
                                          flagstr="--live",
                                          ignore_status=True)
                libvirt.check_exit_status(ret)
                # Need sleep here for attachment take effect
                time.sleep(5)

            # Update a interface options
            if update_device:
                modify_iface_xml(update=True, status_error=status_error)

            # Run tests for qemu-kvm command line options
            if test_option_cmd:
                run_cmdline_test(iface_mac)
            # Run tests for vm xml
            if test_option_xml:
                run_xml_test(iface_mac)
            # Run tests for offloads options
            if test_option_offloads:
                if iface_driver_host:
                    ifname_guest = utils_net.get_linux_ifname(
                        session, iface_mac)
                    check_offloads_option(
                        ifname_guest, ast.literal_eval(
                            iface_driver_host), session)
                if iface_driver_guest:
                    ifname_host = libvirt.get_ifname_host(vm_name,
                                                          iface_mac)
                    check_offloads_option(
                        ifname_host, ast.literal_eval(iface_driver_guest))

            if test_iface_user:
                # Test user type network
                check_user_network(session)
            if test_iface_mcast:
                # Test mcast type network
                check_mcast_network(session)
            # Check guest ip address
            if test_guest_ip:
                if not get_guest_ip(session, iface_mac):
                    raise error.TestFail("Guest can't get a"
                                         " valid ip address")

            session.close()
            # Restart libvirtd and guest, then test again
            if test_libvirtd:
                libvirtd.restart()
                vm.destroy()
                vm.start()
                if test_option_xml:
                    run_xml_test(iface_mac)

            # Detach hot/cold-plugged interface at last
            if attach_device:
                ret = virsh.detach_device(vm_name, iface_xml_obj.xml,
                                          flagstr="", ignore_status=True)
                libvirt.check_exit_status(ret)

        except virt_vm.VMStartError, e:
            logging.info(str(e))
            if start_error:
                pass
            else:
                raise error.TestFail('VM Failed to start for some reason!')

    finally:
        # Recover VM.
        logging.info("Restoring vm...")
        # Restore interface backend files
        if test_backend:
            if not os.path.exists(backend_tap):
                os.rename(backend["tap"], backend_tap)
            if not os.path.exists(backend_vhost):
                os.rename(backend["vhost"], backend_vhost)
        if unprivileged_user:
            virsh.remove_domain(vm_name, "--remove-all-storage",
                                **virsh_dargs)
        if additional_vm:
            virsh.remove_domain(additional_vm.name,
                                "--remove-all-storage")
            # Kill all omping server process on host
            utils.system("pidof omping && killall omping",
                         ignore_status=True)
        if vm.is_alive():
            vm.destroy(gracefully=False)
        vmxml_backup.sync()
def run(test, params, env):
    """
    Test blockcopy,snapshot,blockcommit on disk with metadata_cache attribute.

    1.Prepare test environment
    2.Prepare disk image with metadata_cache attribute
    3.Start the domain.
    4.Perform test operation and check metadata_cache size change
    5.Recover test environment.
    """
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    virsh_dargs = {'debug': True, 'ignore_status': True}
    # disk related parameters
    disk_format = params.get("disk_format")
    disk_path = params.get("disk_path")
    disk_device = params.get("disk_device", "disk")
    device_target = params.get("device_target", 'vdb')
    disk_format = params.get("disk_format", "qcow2")
    device_bus = params.get("device_bus", "virtio")

    # blockcopy,snapshot,blockcommit related parameter
    max_cache_size = params.get("max_cache_size", "10")
    max_blockcopy_size = params.get("mirror_max_cache_size", "1000")
    max_snapshot_size = params.get("snapshot_max_cache_size", "100")
    attach_option = params.get("attach_option")
    test_blockcopy_path = "%s.copy" % disk_path
    test_snapshot_path = "%s.s1" % disk_path
    blockcommit_option = params.get("blockcommit_option",
                                    "--active --wait --verbose")
    blockcopy_option = params.get("blockcopy_option",
                                  "--transient-job --wait --verbose")
    test_batch_block_operation = "yes" == params.get(
        "test_batch_block_operation", "no")
    test_batch_cdrom = "yes" == params.get("test_batch_cdrom_operation", "no")

    # Destroy VM first.
    if vm.is_alive():
        vm.destroy(gracefully=False)

    # Back up xml file.
    vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    vmxml = vmxml_backup.copy()

    try:
        if not libvirt_version.version_compare(7, 0, 0):
            test.cancel(
                "current libvirt version doesn't support metadata_cache feature"
            )
        # Build new disk.
        libvirt.create_local_disk('file', disk_path, '10', disk_format)
        custom_disk = libvirt_disk.create_custom_metadata_disk(
            disk_path, disk_format, disk_device, device_target, device_bus,
            max_cache_size)
        hot_plug = "yes" == params.get("hot_plug", "no")

        # If hot plug, start VM first, otherwise stop VM if running.
        if hot_plug:
            vm.start()
            virsh.attach_device(domainarg=vm_name,
                                filearg=custom_disk.xml,
                                flagstr=attach_option,
                                dargs=virsh_dargs,
                                debug=True,
                                ignore_status=False)
        else:
            vmxml.add_device(custom_disk)
            vmxml.sync()
            vm.start()
        vm.wait_for_login().close()

        if test_batch_block_operation:
            test_blockcopy_operation(vm_name, test_blockcopy_path, disk_format,
                                     disk_device, device_target, device_bus,
                                     max_blockcopy_size, blockcopy_option,
                                     test)
            test_snapshot_opearation(vm_name, test_snapshot_path, disk_format,
                                     max_snapshot_size, test)
            test_blockcommit_operation(vm_name, device_target,
                                       blockcommit_option, test)
        if test_batch_cdrom:
            test_batch_cdrom_operation(vm_name, disk_format, disk_device,
                                       device_target, device_bus, test)
        # Detach hot plugged device.
        if hot_plug:
            virsh.detach_device(vm_name,
                                custom_disk.xml,
                                flagstr=attach_option,
                                dargs=virsh_dargs,
                                ignore_status=False,
                                wait_for_event=True)
    finally:
        # Recover VM.
        if vm.is_alive():
            vm.destroy(gracefully=False)
        for file_path in [disk_path, test_blockcopy_path, test_snapshot_path]:
            if os.path.exists(file_path):
                os.remove(file_path)
        logging.info("Restoring vm...")
        vmxml_backup.sync()