예제 #1
0
def run(test, params, env):
    """
    Test rbd disk device.

    1.Prepare test environment,destroy or suspend a VM.
    2.Prepare disk image.
    3.Edit disks xml and start the domain.
    4.Perform test operation.
    5.Recover test environment.
    """
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    virsh_dargs = {'debug': True, 'ignore_status': True}
    # Global variable to store max/current memory,
    # it may change after attach/detach
    new_max_mem = None
    new_cur_mem = None

    def consume_vm_mem(size=1000, timeout=360):
        """
        To consume guest memory, default size is 1000M
        """
        session = vm.wait_for_login()
        # Mount tmpfs on /mnt and write to a file on it,
        # it is the memory operation
        sh_cmd = ("swapoff -a; mount -t tmpfs -o size={0}M tmpfs "
                  "/mnt; dd if=/dev/urandom of=/mnt/test bs=1M"
                  " count={0}".format(size))
        session.cmd(sh_cmd, timeout=timeout)
        session.close()

    def mount_hugepages(page_size):
        """
        To mount hugepages

        :param page_size: unit is kB, it can be 4,2048,1048576,etc
        """
        if page_size == 4:
            perm = ""
        else:
            perm = "pagesize=%dK" % page_size

        tlbfs_status = utils_misc.is_mounted("hugetlbfs", "/dev/hugepages",
                                             "hugetlbfs")
        if tlbfs_status:
            utils_misc.umount("hugetlbfs", "/dev/hugepages", "hugetlbfs")
        utils_misc.mount("hugetlbfs", "/dev/hugepages", "hugetlbfs", perm)

    def setup_hugepages(page_size=2048, shp_num=2000):
        """
        To setup hugepages

        :param page_size: unit is kB, it can be 4,2048,1048576,etc
        :param shp_num: number of hugepage, string type
        """
        mount_hugepages(page_size)
        utils_memory.set_num_huge_pages(shp_num)
        config.hugetlbfs_mount = ["/dev/hugepages"]
        utils_libvirtd.libvirtd_restart()

    def restore_hugepages(page_size=4):
        """
        To recover hugepages
        :param page_size: unit is kB, it can be 4,2048,1048576,etc
        """
        mount_hugepages(page_size)
        config.restore()
        utils_libvirtd.libvirtd_restart()

    def check_qemu_cmd(max_mem_rt, tg_size):
        """
        Check qemu command line options.
        :param max_mem_rt: size of max memory
        :param tg_size: Target hotplug memory size
        :return: None
        """
        cmd = ("ps -ef | grep %s | grep -v grep " % vm_name)
        if discard:
            if libvirt_version.version_compare(7, 3, 0):
                cmd = cmd + " | grep " + '\\"discard-data\\":true'
            else:
                cmd += " | grep 'discard-data=yes'"
        elif max_mem_rt:
            cmd += (" | grep 'slots=%s,maxmem=%sk'" %
                    (max_mem_slots, max_mem_rt))
            if tg_size:
                size = int(tg_size) * 1024
                if huge_pages or discard or cold_plug_discard:
                    cmd_str = 'memdimm.\|memory-backend-file,id=ram-node.'
                    cmd += (
                        " | grep 'memory-backend-file,id=%s' | grep 'size=%s" %
                        (cmd_str, size))
                else:
                    cmd_str = 'mem.\|memory-backend-ram,id=ram-node.'
                    cmd += (
                        " | grep 'memory-backend-ram,id=%s' | grep 'size=%s" %
                        (cmd_str, size))

                if pg_size:
                    cmd += ",host-nodes=%s" % node_mask
                    if numa_memnode:
                        for node in numa_memnode:
                            if ('nodeset' in node
                                    and node['nodeset'] in node_mask):
                                cmd += ",policy=%s" % node['mode']
                    cmd += ".*pc-dimm,node=%s" % tg_node
                if mem_addr:
                    cmd += (".*slot=%s" % (mem_addr['slot']))
                cmd += "'"
            if cold_plug_discard:
                cmd += " | grep 'discard-data=yes'"

        # Run the command
        result = process.run(cmd, shell=True, verbose=True, ignore_status=True)
        if result.exit_status:
            test.fail('Qemu command check fail.')

    def check_guest_meminfo(old_mem, check_option):
        """
        Check meminfo on guest.
        """
        assert old_mem is not None
        session = vm.wait_for_login()
        # Hot-plugged memory should be online by udev rules
        udev_file = "/lib/udev/rules.d/80-hotplug-cpu-mem.rules"
        udev_rules = ('SUBSYSTEM=="memory", ACTION=="add", TEST=="state",'
                      ' ATTR{state}=="offline", ATTR{state}="online"')
        cmd = ("grep memory %s || echo '%s' >> %s" %
               (udev_file, udev_rules, udev_file))
        session.cmd(cmd)
        # Wait a while for new memory to be detected.
        utils_misc.wait_for(
            lambda: vm.get_totalmem_sys(online) != int(old_mem),
            30,
            first=20.0)
        new_mem = vm.get_totalmem_sys(online)
        session.close()
        logging.debug("Memtotal on guest: %s", new_mem)
        no_of_times = 1
        if at_times:
            no_of_times = at_times
        if check_option == "attach":
            if new_mem != int(old_mem) + (int(tg_size) * no_of_times):
                test.fail("Total memory on guest couldn't changed after "
                          "attach memory device")

        if check_option == "detach":
            if new_mem != int(old_mem) - (int(tg_size) * no_of_times):
                test.fail("Total memory on guest couldn't changed after "
                          "detach memory device")

    def check_dom_xml(at_mem=False, dt_mem=False):
        """
        Check domain xml options.
        """
        # Global variable to store max/current memory
        global new_max_mem
        global new_cur_mem
        if attach_option.count("config"):
            dom_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
        else:
            dom_xml = vm_xml.VMXML.new_from_dumpxml(vm_name)
        try:
            xml_max_mem_rt = int(dom_xml.max_mem_rt)
            xml_max_mem = int(dom_xml.max_mem)
            xml_cur_mem = int(dom_xml.current_mem)
            assert int(max_mem_rt) == xml_max_mem_rt

            # Check attached/detached memory
            logging.info("at_mem=%s,dt_mem=%s", at_mem, dt_mem)
            logging.info("detach_device is %s", detach_device)
            if at_mem:
                if at_times:
                    assert int(max_mem) + (int(tg_size) *
                                           at_times) == xml_max_mem
                else:
                    assert int(max_mem) + int(tg_size) == xml_max_mem
                # Bug 1220702, skip the check for current memory
                if at_times:
                    assert int(cur_mem) + (int(tg_size) *
                                           at_times) == xml_cur_mem
                else:
                    assert int(cur_mem) + int(tg_size) == xml_cur_mem
                new_max_mem = xml_max_mem
                new_cur_mem = xml_cur_mem
                mem_dev = dom_xml.get_devices("memory")
                memory_devices = 1
                if at_times:
                    memory_devices = at_times
                if len(mem_dev) != memory_devices:
                    test.fail("Found wrong number of memory device")
                assert int(tg_size) == int(mem_dev[0].target.size)
                assert int(tg_node) == int(mem_dev[0].target.node)
            elif dt_mem:
                if at_times:
                    assert int(new_max_mem) - (int(tg_size) *
                                               at_times) == xml_max_mem
                    assert int(new_cur_mem) - (int(tg_size) *
                                               at_times) == xml_cur_mem
                else:
                    assert int(new_max_mem) - int(tg_size) == xml_max_mem
                    # Bug 1220702, skip the check for current memory
                    assert int(new_cur_mem) - int(tg_size) == xml_cur_mem
        except AssertionError:
            utils_misc.log_last_traceback()
            test.fail("Found unmatched memory setting from domain xml")

    def check_mem_align():
        """
        Check if set memory align to 256
        """
        dom_xml = vm_xml.VMXML.new_from_dumpxml(vm_name)
        dom_mem = {}
        dom_mem['maxMemory'] = int(dom_xml.max_mem_rt)
        dom_mem['memory'] = int(dom_xml.memory)
        dom_mem['currentMemory'] = int(dom_xml.current_mem)

        cpuxml = dom_xml.cpu
        numa_cell = cpuxml.numa_cell
        dom_mem['numacellMemory'] = int(numa_cell[0]['memory'])
        sum_numa_mem = sum([int(cell['memory']) for cell in numa_cell])

        attached_mem = dom_xml.get_devices(device_type='memory')[0]
        dom_mem['attached_mem'] = attached_mem.target.size

        all_align = True
        for key in dom_mem:
            logging.info('%-20s:%15d', key, dom_mem[key])
            if dom_mem[key] % 262144:
                logging.error('%s not align to 256', key)
                if key == 'currentMemory':
                    continue
                all_align = False

        if not all_align:
            test.fail('Memory not align to 256')

        if dom_mem['memory'] == sum_numa_mem + dom_mem['attached_mem']:
            logging.info(
                'Check Pass: Memory is equal to (all numa memory + memory device)'
            )
        else:
            test.fail(
                'Memory is not equal to (all numa memory + memory device)')

        return dom_mem

    def check_save_restore():
        """
        Test save and restore operation
        """
        save_file = os.path.join(data_dir.get_tmp_dir(), "%s.save" % vm_name)
        ret = virsh.save(vm_name, save_file, **virsh_dargs)
        libvirt.check_exit_status(ret)

        def _wait_for_restore():
            try:
                virsh.restore(save_file, debug=True, ignore_status=False)
                return True
            except Exception as e:
                logging.error(e)

        utils_misc.wait_for(_wait_for_restore, 30, step=5)
        if os.path.exists(save_file):
            os.remove(save_file)
        # Login to check vm status
        vm.wait_for_login().close()

    def add_device(dev_xml, attach, at_error=False):
        """
        Add memory device by attachment or modify domain xml.
        """
        if attach:
            ret = virsh.attach_device(vm_name,
                                      dev_xml.xml,
                                      flagstr=attach_option,
                                      debug=True)
            libvirt.check_exit_status(ret, at_error)
        else:
            vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm.name)
            if numa_cells:
                del vmxml.max_mem
                del vmxml.current_mem
            vmxml.add_device(dev_xml)
            vmxml.sync()

    def modify_domain_xml():
        """
        Modify domain xml and define it.
        """
        vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm.name)
        mem_unit = params.get("mem_unit", "KiB")
        vcpu = params.get("vcpu", "4")
        if max_mem_rt:
            vmxml.max_mem_rt = int(max_mem_rt)
            vmxml.max_mem_rt_slots = max_mem_slots
            vmxml.max_mem_rt_unit = mem_unit
        if max_mem:
            vmxml.max_mem = int(max_mem)
        if cur_mem:
            vmxml.current_mem = int(cur_mem)
        if memory_val:
            vmxml.memory = int(memory_val)
        if vcpu:
            vmxml.vcpu = int(vcpu)
            vcpu_placement = params.get("vcpu_placement", "static")
            vmxml.placement = vcpu_placement
        if numa_memnode:
            vmxml.numa_memory = {}
            vmxml.numa_memnode = numa_memnode
        else:
            try:
                del vmxml.numa_memory
                del vmxml.numa_memnode
            except Exception:
                # Not exists
                pass

        if numa_cells:
            cells = [ast.literal_eval(x) for x in numa_cells]
            # Rounding the numa memory values
            if align_mem_values:
                for cell in range(cells.__len__()):
                    memory_value = str(
                        utils_numeric.align_value(cells[cell]["memory"],
                                                  align_to_value))
                    cells[cell]["memory"] = memory_value
            cpu_xml = vm_xml.VMCPUXML()
            cpu_xml.xml = "<cpu mode='host-model'><numa/></cpu>"
            cpu_mode = params.get("cpu_mode")
            model_fallback = params.get("model_fallback")
            if cpu_mode:
                cpu_xml.mode = cpu_mode
            if model_fallback:
                cpu_xml.fallback = model_fallback
            cpu_xml.numa_cell = cpu_xml.dicts_to_cells(cells)
            vmxml.cpu = cpu_xml
            # Delete memory and currentMemory tag,
            # libvirt will fill it automatically
            del vmxml.max_mem
            del vmxml.current_mem

        # hugepages setting
        if huge_pages or discard or cold_plug_discard:
            membacking = vm_xml.VMMemBackingXML()
            membacking.discard = True
            membacking.source = ''
            membacking.source_type = 'file'
            if huge_pages:
                hugepages = vm_xml.VMHugepagesXML()
                pagexml_list = []
                for i in range(len(huge_pages)):
                    pagexml = hugepages.PageXML()
                    pagexml.update(huge_pages[i])
                    pagexml_list.append(pagexml)
                hugepages.pages = pagexml_list
                membacking.hugepages = hugepages
            vmxml.mb = membacking

        logging.debug("vm xml: %s", vmxml)
        vmxml.sync()

    pre_vm_state = params.get("pre_vm_state", "running")
    attach_device = "yes" == params.get("attach_device", "no")
    detach_device = "yes" == params.get("detach_device", "no")
    detach_alias = "yes" == params.get("detach_alias", "no")
    detach_alias_options = params.get("detach_alias_options")
    attach_error = "yes" == params.get("attach_error", "no")
    start_error = "yes" == params.get("start_error", "no")
    define_error = "yes" == params.get("define_error", "no")
    detach_error = "yes" == params.get("detach_error", "no")
    maxmem_error = "yes" == params.get("maxmem_error", "no")
    attach_option = params.get("attach_option", "")
    test_qemu_cmd = "yes" == params.get("test_qemu_cmd", "no")
    wait_before_save_secs = int(params.get("wait_before_save_secs", 0))
    test_managedsave = "yes" == params.get("test_managedsave", "no")
    test_save_restore = "yes" == params.get("test_save_restore", "no")
    test_mem_binding = "yes" == params.get("test_mem_binding", "no")
    restart_libvirtd = "yes" == params.get("restart_libvirtd", "no")
    add_mem_device = "yes" == params.get("add_mem_device", "no")
    test_dom_xml = "yes" == params.get("test_dom_xml", "no")
    max_mem = params.get("max_mem")
    max_mem_rt = params.get("max_mem_rt")
    max_mem_slots = params.get("max_mem_slots", "16")
    memory_val = params.get('memory_val', '')
    mem_align = 'yes' == params.get('mem_align', 'no')
    hot_plug = 'yes' == params.get('hot_plug', 'no')
    cur_mem = params.get("current_mem")
    numa_cells = params.get("numa_cells", "").split()
    set_max_mem = params.get("set_max_mem")
    align_mem_values = "yes" == params.get("align_mem_values", "no")
    align_to_value = int(params.get("align_to_value", "65536"))
    hot_reboot = "yes" == params.get("hot_reboot", "no")
    rand_reboot = "yes" == params.get("rand_reboot", "no")
    guest_known_unplug_errors = []
    guest_known_unplug_errors.append(params.get("guest_known_unplug_errors"))
    host_known_unplug_errors = []
    host_known_unplug_errors.append(params.get("host_known_unplug_errors"))
    discard = "yes" == params.get("discard", "no")
    cold_plug_discard = "yes" == params.get("cold_plug_discard", "no")
    if cold_plug_discard or discard:
        mem_discard = 'yes'
    else:
        mem_discard = 'no'

    # params for attached device
    mem_model = params.get("mem_model", "dimm")
    tg_size = params.get("tg_size")
    tg_sizeunit = params.get("tg_sizeunit", 'KiB')
    tg_node = params.get("tg_node", 0)
    pg_size = params.get("page_size")
    pg_unit = params.get("page_unit", "KiB")
    huge_page_num = int(params.get('huge_page_num', 2000))
    node_mask = params.get("node_mask", "0")
    mem_addr = ast.literal_eval(params.get("memory_addr", "{}"))
    huge_pages = [
        ast.literal_eval(x) for x in params.get("huge_pages", "").split()
    ]
    numa_memnode = [
        ast.literal_eval(x) for x in params.get("numa_memnode", "").split()
    ]
    at_times = int(params.get("attach_times", 1))
    online = params.get("mem_online", "no")

    config = utils_config.LibvirtQemuConfig()
    setup_hugepages_flag = params.get("setup_hugepages")
    if (setup_hugepages_flag == "yes"):
        cpu_arch = cpu_util.get_family() if hasattr(cpu_util, 'get_family')\
            else cpu_util.get_cpu_arch()
        if cpu_arch == 'power8':
            pg_size = '16384'
            huge_page_num = 200
        elif cpu_arch == 'power9':
            pg_size = '2048'
            huge_page_num = 2000
        [x.update({'size': pg_size}) for x in huge_pages]
        setup_hugepages(int(pg_size), shp_num=huge_page_num)

    # Back up xml file.
    vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)

    if not libvirt_version.version_compare(1, 2, 14):
        test.cancel("Memory hotplug not supported in current libvirt version.")

    if 'align_256m' in params.get('name', ''):
        arch = platform.machine()
        if arch.lower() != 'ppc64le':
            test.cancel('This case is for ppc64le only.')

    if align_mem_values:
        # Rounding the following values to 'align'
        max_mem = utils_numeric.align_value(max_mem, align_to_value)
        max_mem_rt = utils_numeric.align_value(max_mem_rt, align_to_value)
        cur_mem = utils_numeric.align_value(cur_mem, align_to_value)
        tg_size = utils_numeric.align_value(tg_size, align_to_value)

    try:
        # Drop caches first for host has enough memory
        drop_caches()
        # Destroy domain first
        if vm.is_alive():
            vm.destroy(gracefully=False)
        modify_domain_xml()
        numa_info = utils_misc.NumaInfo()
        logging.debug(numa_info.get_all_node_meminfo())

        # Start the domain any way if attach memory device
        old_mem_total = None
        if attach_device:
            vm.start()
            session = vm.wait_for_login()
            old_mem_total = vm.get_totalmem_sys(online)
            logging.debug("Memtotal on guest: %s", old_mem_total)
            session.close()
        elif discard:
            vm.start()
            session = vm.wait_for_login()
            check_qemu_cmd(max_mem_rt, tg_size)
        dev_xml = None

        # To attach the memory device.
        if (add_mem_device and not hot_plug) or cold_plug_discard:
            at_times = int(params.get("attach_times", 1))
            randvar = 0
            if rand_reboot:
                rand_value = random.randint(15, 25)
                logging.debug("reboots at %s", rand_value)
            for x in xrange(at_times):
                # If any error excepted, command error status should be
                # checked in the last time
                device_alias = "ua-" + str(uuid.uuid4())
                dev_xml = utils_hotplug.create_mem_xml(
                    tg_size, pg_size, mem_addr, tg_sizeunit, pg_unit, tg_node,
                    node_mask, mem_model, mem_discard, device_alias)
                randvar = randvar + 1
                logging.debug("attaching device count = %s", x)
                if x == at_times - 1:
                    add_device(dev_xml, attach_device, attach_error)
                else:
                    add_device(dev_xml, attach_device)
                if hot_reboot:
                    vm.reboot()
                    vm.wait_for_login()
                if rand_reboot and randvar == rand_value:
                    vm.reboot()
                    vm.wait_for_login()
                    randvar = 0
                    rand_value = random.randint(15, 25)
                    logging.debug("reboots at %s", rand_value)

        # Check domain xml after attach device.
        if test_dom_xml:
            check_dom_xml(at_mem=attach_device)

        # Set domain state
        if pre_vm_state == "transient":
            logging.info("Creating %s...", vm_name)
            vmxml_for_test = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
            if vm.is_alive():
                vm.destroy(gracefully=False)
            vm.undefine()
            if virsh.create(vmxml_for_test.xml, **virsh_dargs).exit_status:
                vmxml_backup.define()
                test.fail("Can't create the domain")
        elif vm.is_dead():
            try:
                vm.start()
                vm.wait_for_login().close()
            except virt_vm.VMStartError as detail:
                if start_error:
                    pass
                else:
                    except_msg = "memory hotplug isn't supported by this QEMU binary"
                    if except_msg in detail.reason:
                        test.cancel(detail)
                    test.fail(detail)

        # Set memory operation
        if set_max_mem:
            max_mem_option = params.get("max_mem_option", "")
            ret = virsh.setmaxmem(vm_name, set_max_mem, flagstr=max_mem_option)
            libvirt.check_exit_status(ret, maxmem_error)

        # Hotplug memory device
        if add_mem_device and hot_plug:
            process.run('ps -ef|grep qemu', shell=True, verbose=True)
            session = vm.wait_for_login()
            original_mem = vm.get_totalmem_sys()
            dev_xml = utils_hotplug.create_mem_xml(tg_size, pg_size, mem_addr,
                                                   tg_sizeunit, pg_unit,
                                                   tg_node, node_mask,
                                                   mem_model)
            add_device(dev_xml, True)
            mem_after = vm.get_totalmem_sys()
            params['delta'] = mem_after - original_mem

        # Check domain xml after start the domain.
        if test_dom_xml:
            check_dom_xml(at_mem=attach_device)

        if mem_align:
            dom_mem = check_mem_align()
            check_qemu_cmd(dom_mem['maxMemory'], dom_mem['attached_mem'])
            if hot_plug and params['delta'] != dom_mem['attached_mem']:
                test.fail(
                    'Memory after attach not equal to original mem + attached mem'
                )

        # Check qemu command line
        if test_qemu_cmd:
            check_qemu_cmd(max_mem_rt, tg_size)

        # Check guest meminfo after attachment
        if (attach_device and not attach_option.count("config")
                and not any([attach_error, start_error])):
            check_guest_meminfo(old_mem_total, check_option="attach")

        # Consuming memory on guest,
        # to verify memory changes by numastat
        if test_mem_binding:
            pid = vm.get_pid()
            old_numastat = read_from_numastat(pid, "Total")
            logging.debug("Numastat: %s", old_numastat)
            # Increase the memory consumed to  1500
            consume_vm_mem(1500)
            new_numastat = read_from_numastat(pid, "Total")
            logging.debug("Numastat: %s", new_numastat)
            # Only check total memory which is the last element
            if float(new_numastat[-1]) - float(old_numastat[-1]) < 0:
                test.fail("Numa memory can't be consumed on guest")

        # Run managedsave command to check domain xml.
        if test_managedsave:
            # Wait 10s for vm to be ready before managedsave
            time.sleep(wait_before_save_secs)
            ret = virsh.managedsave(vm_name, **virsh_dargs)
            libvirt.check_exit_status(ret)

            def _wait_for_vm_start():
                try:
                    vm.start()
                    return True
                except Exception as e:
                    logging.error(e)

            utils_misc.wait_for(_wait_for_vm_start, timeout=30, step=5)
            vm.wait_for_login().close()
            if test_dom_xml:
                check_dom_xml(at_mem=attach_device)

        # Run save and restore command to check domain xml
        if test_save_restore:
            # Wait 10s for vm to be ready before save
            time.sleep(wait_before_save_secs)
            check_save_restore()
            if test_dom_xml:
                check_dom_xml(at_mem=attach_device)

        # Check domain xml after restarting libvirtd
        if restart_libvirtd:
            libvirtd = utils_libvirtd.Libvirtd()
            libvirtd.restart()
            if test_dom_xml:
                check_dom_xml(at_mem=attach_device)

        # Detach the memory device
        unplug_failed_with_known_error = False
        if detach_device:
            dev_xml = utils_hotplug.create_mem_xml(tg_size, pg_size, mem_addr,
                                                   tg_sizeunit, pg_unit,
                                                   tg_node, node_mask,
                                                   mem_model, mem_discard)
            for x in xrange(at_times):
                if not detach_alias:
                    ret = virsh.detach_device(vm_name,
                                              dev_xml.xml,
                                              flagstr=attach_option,
                                              debug=True)
                else:
                    ret = virsh.detach_device_alias(vm_name,
                                                    device_alias,
                                                    detach_alias_options,
                                                    debug=True)
                if ret.stderr and host_known_unplug_errors:
                    for known_error in host_known_unplug_errors:
                        if (known_error[0] == known_error[-1]) and \
                           known_error.startswith(("'")):
                            known_error = known_error[1:-1]
                        if known_error in ret.stderr:
                            unplug_failed_with_known_error = True
                            logging.debug(
                                "Known error occurred in Host, while"
                                " hot unplug: %s", known_error)
                if unplug_failed_with_known_error:
                    break
                try:
                    libvirt.check_exit_status(ret, detach_error)
                except Exception as detail:
                    dmesg_file = tempfile.mktemp(dir=data_dir.get_tmp_dir())
                    try:
                        session = vm.wait_for_login()
                        utils_misc.verify_dmesg(dmesg_log_file=dmesg_file,
                                                ignore_result=True,
                                                session=session,
                                                level_check=5)
                    except Exception:
                        session.close()
                        test.fail("After memory unplug Unable to connect to VM"
                                  " or unable to collect dmesg")
                    session.close()
                    if os.path.exists(dmesg_file):
                        with open(dmesg_file, 'r') as f:
                            flag = re.findall(
                                r'memory memory\d+?: Offline failed', f.read())
                        if not flag:
                            # The attached memory is used by vm, and it could
                            #  not be unplugged.The result is expected
                            os.remove(dmesg_file)
                            test.fail(detail)
                        unplug_failed_with_known_error = True
                        os.remove(dmesg_file)
            # Check whether a known error occurred or not
            dmesg_file = tempfile.mktemp(dir=data_dir.get_tmp_dir())
            try:
                session = vm.wait_for_login()
                utils_misc.verify_dmesg(dmesg_log_file=dmesg_file,
                                        ignore_result=True,
                                        session=session,
                                        level_check=4)
            except Exception:
                session.close()
                test.fail("After memory unplug Unable to connect to VM"
                          " or unable to collect dmesg")
            session.close()
            if guest_known_unplug_errors and os.path.exists(dmesg_file):
                for known_error in guest_known_unplug_errors:
                    if (known_error[0] == known_error[-1]) and \
                       known_error.startswith(("'")):
                        known_error = known_error[1:-1]
                    with open(dmesg_file, 'r') as f:
                        if known_error in f.read():
                            unplug_failed_with_known_error = True
                            logging.debug(
                                "Known error occurred, while hot"
                                " unplug: %s", known_error)
            if test_dom_xml and not unplug_failed_with_known_error:
                check_dom_xml(dt_mem=detach_device)
                # Remove dmesg temp file
                if os.path.exists(dmesg_file):
                    os.remove(dmesg_file)
    except xcepts.LibvirtXMLError:
        if define_error:
            pass
    finally:
        # Delete snapshots.
        snapshot_lists = virsh.snapshot_list(vm_name)
        if len(snapshot_lists) > 0:
            libvirt.clean_up_snapshots(vm_name, snapshot_lists)
            for snap in snapshot_lists:
                virsh.snapshot_delete(vm_name, snap, "--metadata")

        # Recover VM.
        if vm.is_alive():
            vm.destroy(gracefully=False)
        logging.info("Restoring vm...")
        if (setup_hugepages_flag == "yes"):
            restore_hugepages()
        vmxml_backup.sync()
예제 #2
0
def run(test, params, env):
    """
    Test virtiofs filesystem device:

    1.Start guest with 1/2 virtiofs filesystem devices.
    2.Start 2 guest with the same virtiofs filesystem device.
    3.Coldplug/Coldunplug virtiofs filesystem device
    4.Share data between guests and host.
    5.Lifecycle for guest with virtiofs filesystem device.
    """
    def generate_expected_process_option(expected_results):
        """
        Generate expected virtiofsd process option
        """
        if cache_mode != "auto":
            expected_results = "cache=%s" % cache_mode
        if xattr == "on":
            expected_results += ",xattr"
        elif xattr == "off":
            expected_results += ",no_xattr"
        if flock == "on":
            expected_results += ",flock"
        else:
            expected_results += ",no_flock"
        if lock_posix == "on":
            expected_results += ",posix_lock"
        else:
            expected_results += ",no_posix_lock"
        logging.debug(expected_results)
        return expected_results

    def shared_data(vm_names, fs_devs):
        """
        Shared data between guests and host:
        1.Mount dir in guest;
        2.Write a file in guest;
        3.Check the md5sum value are the same in guests and host;
        """
        md5s = []
        for vm in vms:
            session = vm.wait_for_login()
            for fs_dev in fs_devs:
                logging.debug(fs_dev)
                mount_dir = '/var/tmp/' + fs_dev.target['dir']
                session.cmd('rm -rf %s' % mount_dir, ignore_all_errors=False)
                session.cmd('mkdir -p %s' % mount_dir)
                logging.debug("mount virtiofs dir in guest")
                cmd = "mount -t virtiofs %s %s" % (fs_dev.target['dir'],
                                                   mount_dir)
                status, output = session.cmd_status_output(cmd, timeout=300)
                if status != 0:
                    session.close()
                    test.fail("mount virtiofs dir failed: %s" % output)
                if vm == vms[0]:
                    filename_guest = mount_dir + '/' + vm.name
                    cmd = "dd if=/dev/urandom of=%s bs=1M count=512 oflag=direct" % filename_guest
                    status, output = session.cmd_status_output(cmd,
                                                               timeout=300)
                    if status != 0:
                        session.close()
                        test.fail("Write data failed: %s" % output)
                md5_value = session.cmd_status_output(
                    "md5sum %s" % filename_guest)[1].strip().split()[0]
                md5s.append(md5_value)
                logging.debug(md5_value)
                md5_value = process.run(
                    "md5sum %s" %
                    filename_guest).stdout_text.strip().split()[0]
                logging.debug(md5_value)
                md5s.append(md5_value)
            session.close()
        if len(set(md5s)) != len(fs_devs):
            test.fail("The md5sum value are not the same in guests and host")

    def launch_externally_virtiofs(source_dir, source_socket):
        """
        Launch externally virtiofs

        :param source_dir:  the dir shared on host
        :param source_socket: the socket file listened on
        """
        process.run('chcon -t virtd_exec_t %s' % path,
                    ignore_status=False,
                    shell=True)
        cmd = "systemd-run %s --socket-path=%s -o source=%s" % (
            path, source_socket, source_dir)
        try:
            process.run(cmd, ignore_status=False, shell=True)
            # Make sure the socket is created
            utils_misc.wait_for(lambda: os.path.isdir(source_socket),
                                timeout=3)
            process.run("chown qemu:qemu %s" % source_socket,
                        ignore_status=False)
            process.run('chcon -t svirt_image_t %s' % source_socket,
                        ignore_status=False,
                        shell=True)
        except Exception as err:
            cmd = "pkill virtiofsd"
            process.run(cmd, shell=True)
            test.fail("{}".format(err))

    def prepare_stress_script(script_path, script_content):
        """
        Refer to xfstest generic/531. Create stress test script to create a lot of unlinked files.

        :param source_path: The path of script
        :param content: The content of stress script
        """
        logging.debug("stress script path: %s content: %s" %
                      (script_path, script_content))
        script_lines = script_content.split(';')
        try:
            with open(script_path, 'w') as fd:
                fd.write('\n'.join(script_lines))
            os.chmod(script_path, 0o777)
        except Exception as e:
            test.error("Prepare the guest stress script failed %s" % e)

    def run_stress_script(session, script_path):
        """
        Run stress script in the guest

        :param session: guest session
        :param script_path: The path of script in the guest
        """
        # Set ULIMIT_NOFILE to increase the number of unlinked files
        session.cmd("ulimit -n 500000 && /usr/bin/python3 %s" % script_path,
                    timeout=120)

    def umount_fs(vm):
        """
        Unmount the filesystem in guest

        :param vm: filesystem in this vm that should be unmounted
        """
        if vm.is_alive():
            session = vm.wait_for_login()
            for fs_dev in fs_devs:
                mount_dir = '/var/tmp/' + fs_dev.target['dir']
                session.cmd('umount -f %s' % mount_dir, ignore_all_errors=True)
                session.cmd('rm -rf %s' % mount_dir, ignore_all_errors=True)
            session.close()

    def check_detached_xml(vm):
        """
        Check whether there is xml about the filesystem device
        in the vm xml

        :param vm: the vm to be checked
        """
        vmxml = vm_xml.VMXML.new_from_dumpxml(vm.name)
        filesystems = vmxml.devices.by_device_tag('filesystem')
        if filesystems:
            test.fail("There should be no filesystem devices in guest "
                      "xml after hotunplug")

    def check_filesystem_in_guest(vm, fs_dev):
        """
        Check whether there is virtiofs in vm

        :param vm: the vm to be checked
        :param fs_dev: the virtiofs device to be checked
        """
        session = vm.wait_for_login()
        mount_dir = '/var/tmp/' + fs_dev.target['dir']
        cmd = "mkdir %s; mount -t virtiofs %s %s" % (
            mount_dir, fs_dev.target['dir'], mount_dir)
        status, output = session.cmd_status_output(cmd, timeout=300)
        session.cmd('rm -rf %s' % mount_dir, ignore_all_errors=True)
        if not status:
            test.fail(
                "Mount virtiofs should failed after hotunplug device. %s" %
                output)
        session.close()

    start_vm = params.get("start_vm", "no")
    vm_names = params.get("vms", "avocado-vt-vm1").split()
    cache_mode = params.get("cache_mode", "none")
    xattr = params.get("xattr", "on")
    lock_posix = params.get("lock_posix", "on")
    flock = params.get("flock", "on")
    xattr = params.get("xattr", "on")
    path = params.get("virtiofsd_path", "/usr/libexec/virtiofsd")
    queue_size = int(params.get("queue_size", "512"))
    driver_type = params.get("driver_type", "virtiofs")
    guest_num = int(params.get("guest_num", "1"))
    fs_num = int(params.get("fs_num", "1"))
    vcpus_per_cell = int(params.get("vcpus_per_cell", 2))
    dir_prefix = params.get("dir_prefix", "mount_tag")
    error_msg_start = params.get("error_msg_start", "")
    error_msg_save = params.get("error_msg_save", "")
    status_error = params.get("status_error", "no") == "yes"
    socket_file_checking = params.get("socket_file_checking", "no") == "yes"
    suspend_resume = params.get("suspend_resume", "no") == "yes"
    managedsave = params.get("managedsave", "no") == "yes"
    coldplug = params.get("coldplug", "no") == "yes"
    hotplug_unplug = params.get("hotplug_unplug", "no") == "yes"
    detach_device_alias = params.get("detach_device_alias", "no") == "yes"
    extra_hugepages = params.get_numeric("extra_hugepages")
    edit_start = params.get("edit_start", "no") == "yes"
    with_hugepages = params.get("with_hugepages", "yes") == "yes"
    with_numa = params.get("with_numa", "yes") == "yes"
    with_memfd = params.get("with_memfd", "no") == "yes"
    source_socket = params.get("source_socket", "/var/tmp/vm001.socket")
    launched_mode = params.get("launched_mode", "auto")
    destroy_start = params.get("destroy_start", "no") == "yes"
    bug_url = params.get("bug_url", "")
    script_content = params.get("stress_script", "")
    stdio_handler_file = "file" == params.get("stdio_handler")

    fs_devs = []
    vms = []
    vmxml_backups = []
    expected_fails_msg = []
    expected_results = ""
    host_hp_size = utils_memory.get_huge_page_size()
    backup_huge_pages_num = utils_memory.get_num_huge_pages()
    huge_pages_num = 0

    if len(vm_names) != guest_num:
        test.cancel("This test needs exactly %d vms." % guest_num)

    if not libvirt_version.version_compare(7, 0, 0) and not with_numa:
        test.cancel("Not supported without NUMA before 7.0.0")

    if not libvirt_version.version_compare(7, 6, 0) and destroy_start:
        test.cancel("Bug %s is not fixed on current build" % bug_url)

    try:
        # Define filesystem device xml
        for index in range(fs_num):
            driver = {'type': driver_type, 'queue': queue_size}
            source_dir = os.path.join('/var/tmp/',
                                      str(dir_prefix) + str(index))
            logging.debug(source_dir)
            not os.path.isdir(source_dir) and os.mkdir(source_dir)
            target_dir = dir_prefix + str(index)
            source = {'socket': source_socket}
            target = {'dir': target_dir}
            if launched_mode == "auto":
                binary_keys = [
                    'path', 'cache_mode', 'xattr', 'lock_posix', 'flock'
                ]
                binary_values = [path, cache_mode, xattr, lock_posix, flock]
                binary_dict = dict(zip(binary_keys, binary_values))
                source = {'dir': source_dir}
                accessmode = "passthrough"
                fsdev_keys = [
                    'accessmode', 'driver', 'source', 'target', 'binary'
                ]
                fsdev_values = [
                    accessmode, driver, source, target, binary_dict
                ]
            else:
                fsdev_keys = ['driver', 'source', 'target']
                fsdev_values = [driver, source, target]
            fsdev_dict = dict(zip(fsdev_keys, fsdev_values))
            logging.debug(fsdev_dict)
            fs_dev = libvirt_device_utils.create_fs_xml(
                fsdev_dict, launched_mode)
            logging.debug(fs_dev)
            fs_devs.append(fs_dev)

        #Start guest with virtiofs filesystem device
        for index in range(guest_num):
            logging.debug("prepare vm %s", vm_names[index])
            vm = env.get_vm(vm_names[index])
            vms.append(vm)
            vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_names[index])
            vmxml_backup = vmxml.copy()
            vmxml_backups.append(vmxml_backup)
            if vmxml.max_mem < 1024000:
                vmxml.max_mem = 1024000
            if with_hugepages:
                huge_pages_num += vmxml.max_mem // host_hp_size + extra_hugepages
                utils_memory.set_num_huge_pages(huge_pages_num)
            vmxml.remove_all_device_by_type('filesystem')
            vmxml.sync()
            numa_no = None
            if with_numa:
                numa_no = vmxml.vcpu // vcpus_per_cell if vmxml.vcpu != 1 else 1
            vm_xml.VMXML.set_vm_vcpus(vmxml.vm_name,
                                      vmxml.vcpu,
                                      numa_number=numa_no)
            vm_xml.VMXML.set_memoryBacking_tag(vmxml.vm_name,
                                               access_mode="shared",
                                               hpgs=with_hugepages,
                                               memfd=with_memfd)
            vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_names[index])
            logging.debug(vmxml)
            if launched_mode == "externally":
                launch_externally_virtiofs(source_dir, source_socket)
            if coldplug:
                ret = virsh.attach_device(vm_names[index],
                                          fs_devs[0].xml,
                                          flagstr='--config',
                                          debug=True)
                utils_test.libvirt.check_exit_status(ret, expect_error=False)
            else:
                if not hotplug_unplug:
                    for fs in fs_devs:
                        vmxml.add_device(fs)
                        vmxml.sync()
            logging.debug(vmxml)
            libvirt_pcicontr.reset_pci_num(vm_names[index])
            result = virsh.start(vm_names[index], debug=True)
            if hotplug_unplug:
                if stdio_handler_file:
                    qemu_config = LibvirtQemuConfig()
                    qemu_config.stdio_handler = "file"
                    utils_libvirtd.Libvirtd().restart()
                for fs_dev in fs_devs:
                    ret = virsh.attach_device(vm_names[index],
                                              fs_dev.xml,
                                              ignore_status=True,
                                              debug=True)
                    libvirt.check_exit_status(ret, status_error)

                if status_error:
                    return
            if status_error and not managedsave:
                expected_error = error_msg_start
                utils_test.libvirt.check_exit_status(result, expected_error)
                return
            else:
                utils_test.libvirt.check_exit_status(result,
                                                     expect_error=False)
            expected_results = generate_expected_process_option(
                expected_results)
            if launched_mode == "auto":
                cmd = 'ps aux | grep virtiofsd | head -n 1'
                utils_test.libvirt.check_cmd_output(cmd,
                                                    content=expected_results)

        if managedsave:
            expected_error = error_msg_save
            result = virsh.managedsave(vm_names[0],
                                       ignore_status=True,
                                       debug=True)
            utils_test.libvirt.check_exit_status(result, expected_error)
        else:
            shared_data(vm_names, fs_devs)
            if suspend_resume:
                virsh.suspend(vm_names[0], debug=True, ignore_status=False)
                time.sleep(30)
                virsh.resume(vm_names[0], debug=True, ignore_statue=False)
            elif destroy_start:
                session = vm.wait_for_login(timeout=120)
                # Prepare the guest test script
                script_path = os.path.join(fs_devs[0].source["dir"], "test.py")
                script_content %= (fs_devs[0].source["dir"],
                                   fs_devs[0].source["dir"])
                prepare_stress_script(script_path, script_content)
                # Run guest stress script
                stress_script_thread = threading.Thread(
                    target=run_stress_script, args=(session, script_path))
                stress_script_thread.setDaemon(True)
                stress_script_thread.start()
                # Create a lot of unlink files
                time.sleep(60)
                virsh.destroy(vm_names[0], debug=True, ignore_status=False)
                ret = virsh.start(vm_names[0], debug=True)
                libvirt.check_exit_status(ret)
            elif edit_start:
                vmxml_virtio_backup = vm_xml.VMXML.new_from_inactive_dumpxml(
                    vm_names[0])
                if vm.is_alive():
                    virsh.destroy(vm_names[0])
                    cmd = "virt-xml %s --edit --qemu-commandline '\-foo'" % vm_names[
                        0]
                    cmd_result = process.run(cmd,
                                             ignore_status=True,
                                             shell=True)
                    logging.debug(virsh.dumpxml(vm_names[0]))
                    if cmd_result.exit_status:
                        test.error("virt-xml edit guest failed: %s" %
                                   cmd_result)
                    result = virsh.start(vm_names[0],
                                         ignore_status=True,
                                         debug=True)
                    if error_msg_start:
                        expected_fails_msg.append(error_msg_start)
                    utils_test.libvirt.check_result(
                        result, expected_fails=expected_fails_msg)
                    if not libvirt_version.version_compare(6, 10, 0):
                        # Because of bug #1897105, it was fixed in libvirt-6.10.0,
                        # before this version, need to recover the env manually.
                        cmd = "pkill virtiofsd"
                        process.run(cmd, shell=True)
                    if not vm.is_alive():
                        # Restoring vm and check if vm can start successfully
                        vmxml_virtio_backup.sync()
                        virsh.start(vm_names[0],
                                    ignore_status=False,
                                    shell=True)
            elif socket_file_checking:
                result = virsh.domid(vm_names[0])
                domid = result.stdout.strip()
                domain_dir = "var/lib/libvirt/qemu/domain-" + domid + '-' + vm_names[
                    0]
                if result.exit_status:
                    test.fail("Get domid failed.")
                    for fs_dev in fs_devs:
                        alias = fs_dev.alias['name']
                        expected_pid = domain_dir + alias + '-fs.pid'
                        expected_sock = alias + '-fs.sock'
                        status1 = process.run('ls -l %s' % expected_pid,
                                              shell=True).exit_status
                        status2 = process.run('ls -l %s' % expected_sock,
                                              shell=True).exit_status
                        if not (status1 and status2):
                            test.fail(
                                "The socket and pid file is not as expected")
            elif hotplug_unplug:
                for vm in vms:
                    umount_fs(vm)
                    for fs_dev in fs_devs:
                        if detach_device_alias:
                            alias = fs_dev.alias['name']
                            cmd = 'lsof /var/log/libvirt/qemu/%s-%s-virtiofsd.log' % (
                                vm.name, alias)
                            output = process.run(cmd).stdout_text.splitlines()
                            for item in output[1:]:
                                if stdio_handler_file:
                                    if item.split()[0] != "virtiofsd":
                                        test.fail(
                                            "When setting stdio_handler as file, the command"
                                            "to write log should be virtiofsd!"
                                        )
                                else:
                                    if item.split()[0] != "virtlogd":
                                        test.fail(
                                            "When setting stdio_handler as logd, the command"
                                            "to write log should be virtlogd!")
                            ret = virsh.detach_device_alias(
                                vm.name,
                                alias,
                                ignore_status=True,
                                debug=True,
                                wait_for_event=True)
                        else:
                            ret = virsh.detach_device(vm.name,
                                                      fs_dev.xml,
                                                      ignore_status=True,
                                                      debug=True,
                                                      wait_for_event=True)
                        libvirt.check_exit_status(ret, status_error)
                        check_filesystem_in_guest(vm, fs_dev)
                    check_detached_xml(vm)
    finally:
        for vm in vms:
            if vm.is_alive():
                umount_fs(vm)
                vm.destroy(gracefully=False)
        for vmxml_backup in vmxml_backups:
            vmxml_backup.sync()
        for index in range(fs_num):
            process.run('rm -rf %s' % '/var/tmp/' + str(dir_prefix) +
                        str(index),
                        ignore_status=False)
            process.run('rm -rf %s' % source_socket,
                        ignore_status=False,
                        shell=True)
        if launched_mode == "externally":
            process.run('restorecon %s' % path,
                        ignore_status=False,
                        shell=True)
        utils_memory.set_num_huge_pages(backup_huge_pages_num)
        if stdio_handler_file:
            qemu_config.restore()
            utils_libvirtd.Libvirtd().restart()
예제 #3
0
def run(test, params, env):
    """
    Test vsock device:

    1.Edit/start guest with vsock device.
    2.Hotplug/hotunplug vsock device.
    3.Coldplug/Coldunplug vsock device
    4.Check if hotplugged vsock communicates.
    """
    def remove_all_vsocks(vm, vmxml):
        """
        Removes all vsock devices from current domain
        to avoid failures due to original domain definition
        which must have been backed up earlier for correct
        restore.

        :param vm: the current test domain
        :param vmxml: VMXML of the current test domain
        :return: None
        """

        vmxml.remove_all_device_by_type('vsock')
        was_alive = vm.is_alive()
        vmxml.sync()
        if was_alive:
            vm.start()

    def env_setup():
        """
        1. Install build dependency for nc-vsock both on guest
        and host.
        2. Get nc-vsock code
        3. Build nc-vsock on both guest and host.
        :return: None
        """

        session = vm.wait_for_login()
        cmds = [
            'rpm -q lsof || yum -y install lsof',
            'rpm -q git || yum -y install git',
            'rpm -q make || yum -y install make',
            'rpm -q gcc && yum -y reinstall gcc || yum -y install gcc',
            'rm -rf %s' % NC_VSOCK_DIR,
            'git clone %s %s' % (git_repo, NC_VSOCK_DIR),
            'cd %s && make' % NC_VSOCK_DIR,
        ]
        for cmd in cmds:
            for where in ["guest", "host"]:
                session_arg = session if where == "guest" else None
                status, output = utils_misc.cmd_status_output(
                    cmd, shell=True, timeout=CMD_TIMEOUT, session=session_arg)
                cancel_if_failed(cmd, status, output, where)

        session.close()

    def cancel_if_failed(cmd, status, output, where):
        """
        Cancel test execution as soon as command failed reporting output

        :param cmd: Command that was executed
        :param status: Exit status of command
        :param output: Output of command
        :param where: "guest" or "host"
        :return:
        """

        if status:
            test.cancel("Failed to run %s on %s output: %s" %
                        (cmd, where, output))

    def write_from_host_to_guest():
        """
        1. Create file for stdin to nc-vsock
        2. Invoke nc-vsock as client writing to guest cid
        :return msg: The message sent to the server
        """

        msg = "message from client"
        process.run('echo %s > %s' % (msg, NC_VSOCK_CLI_TXT), shell=True)
        output = process.run(
            "%s %d %s < %s" %
            (NC_VSOCK_CMD, int(cid), VSOCK_PORT, NC_VSOCK_CLI_TXT),
            shell=True).stdout_text
        logging.debug(output)
        process.system_output('cat %s' % NC_VSOCK_CLI_TXT)
        return msg

    def wait_for_guest_to_receive(server):
        """
        nc-vsock server finishes as soon as it received data.
        We report if it's still running.

        :param server: The started server instance accepting requests
        :return: Nothing
        """

        server.join(5)
        if server.is_alive():
            logging.debug("The server thread is still running in the guest.")

    def validate_data_transfer_by_vsock():
        """
        1. Setup nc-vsock on host and guest
        2. Start vsock server on guest (wait a bit)
        3. Send message from host to guest using correct cid
        4. Get received message from vsock server
        5. Verify message was sent
        """

        env_setup()

        session = vm.wait_for_login()

        def _start_vsock_server_in_guest():
            """
            Starts the nc-vsock server to listen on VSOCK_PORT in the guest.
            The server will stop as soon as it received message from host.
            :return:
            """

            session.cmd("%s -l %s > %s" %
                        (NC_VSOCK_CMD, VSOCK_PORT, NC_VSOCK_SRV_OUT))

        server = Thread(target=_start_vsock_server_in_guest)
        server.start()
        time.sleep(5)

        sent_data = write_from_host_to_guest()
        wait_for_guest_to_receive(server)
        received_data = session.cmd_output('cat %s' % NC_VSOCK_SRV_OUT).strip()

        if not sent_data == received_data:
            test.fail("Data transfer error with vsock device\n"
                      "Sent: '%s'\n"
                      "Received:'%s'" % (sent_data, received_data))

        session.close()

    def managedsave_restore():
        """
        Check that vm can be saved and restarted with current configuration
        """

        result = virsh.managedsave(vm_name, debug=True)
        utils_test.libvirt.check_exit_status(result, expect_error=False)
        result = virsh.start(vm_name)
        utils_test.libvirt.check_exit_status(result, expect_error=False)

    start_vm = params.get("start_vm", "no")
    vm_name = params.get("main_vm", "avocado-vt-vm1")
    vm = env.get_vm(params["main_vm"])
    auto_cid = params.get("auto_cid", "no")
    status_error = params.get("status_error", "no") == "yes"
    edit_xml = params.get("edit_xml", "no") == "yes"
    option = params.get("option", "")
    git_repo = params.get("git_repo", "")
    invalid_cid = params.get("invalid_cid", "no") == "yes"
    managedsave = params.get("managedsave", "no") == "yes"
    no_vsock = params.get("no_vsock", "no") == "yes"
    vsock_num = params.get("num")
    communication = params.get("communication", "no") == "yes"
    detach_device_alias = params.get("detach_device_alias", "no") == "yes"
    # Backup xml file
    vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    backup_xml = vmxml.copy()

    remove_all_vsocks(vm, vmxml)

    # Define vsock device xml
    vsock_dev = Vsock()
    vsock_dev.model_type = "virtio"
    if process.run("modprobe vhost_vsock").exit_status != 0:
        test.fail("Failed to load vhost_vsock module")
    if invalid_cid:
        cid = "-1"
    else:
        cid = random.randint(3, 10)
        vsock_dev.cid = {'auto': auto_cid, 'address': cid}
        vsock_dev.alias = {'name': 'ua-' + str(uuid.uuid1())}
    logging.debug(vsock_dev)

    if start_vm == "no" and vm.is_alive():
        virsh.destroy()

    try:
        if edit_xml:
            edit_status1 = libvirt.exec_virsh_edit(
                vm_name, [(r":/<devices>/s/$/%s" %
                           re.findall(r"<vsock.*<\/vsock>", str(vsock_dev),
                                      re.M)[0].replace("/", "\/"))])
            edit_status2 = True
            if vsock_num == 2:
                edit_status2 = libvirt.exec_virsh_edit(
                    vm_name, [(r":/<devices>/s/$/%s" %
                               re.findall(r"<vsock.*<\/vsock>", str(vsock_dev),
                                          re.M)[0].replace("/", "\/"))])
                logging.debug(vm_xml.VMXML.new_from_inactive_dumpxml(vm_name))

            if status_error:
                if edit_status1 or edit_status2:
                    test.fail("virsh edit should fail\n")
            else:
                if not edit_status1:
                    test.fail("Failed to edit vm xml with vsock device\n")
                else:
                    result = virsh.start(vm_name, debug=True)
                    utils_test.libvirt.check_exit_status(result,
                                                         expect_error=False)
        else:
            session = vm.wait_for_login()
            session.close()
            result = virsh.attach_device(vm_name,
                                         vsock_dev.xml,
                                         flagstr=option,
                                         debug=True)
            utils_test.libvirt.check_exit_status(result, expect_error=False)
            if option == "--config":
                result = virsh.start(vm_name, debug=True)
                utils_test.libvirt.check_exit_status(result,
                                                     expect_error=False)
            vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
            logging.debug(vmxml)
            vsock_list = vmxml.devices.by_device_tag("vsock")
            cid = vsock_list[0].cid['address']
            if 0 == len(vsock_list):
                test.fail("No vsock device found in live xml\n")
            if communication:
                validate_data_transfer_by_vsock()
            if managedsave and not no_vsock:
                managedsave_restore()

            def _detach_completed():
                status = process.run("lsof /dev/vhost-vsock",
                                     ignore_status=True,
                                     shell=True).exit_status
                return status == 1

            if detach_device_alias:
                result = virsh.detach_device_alias(vm.name,
                                                   vsock_dev.alias['name'],
                                                   ignore_status=False,
                                                   debug=True,
                                                   wait_for_event=True,
                                                   event_timeout=20)
            else:
                result = virsh.detach_device(vm_name,
                                             vsock_dev.xml,
                                             debug=True)
            utils_test.libvirt.check_exit_status(result, expect_error=False)
            utils_misc.wait_for(_detach_completed, timeout=20)
            vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
            vsock_list = vmxml.get_devices("vsock")
            if vsock_list:
                test.fail(
                    "Still find vsock device in live xml after hotunplug\n")
            if managedsave and no_vsock:
                managedsave_restore()
    finally:
        if vm.is_alive():
            vm.destroy(gracefully=False)
        backup_xml.sync()
예제 #4
0
def run(test, params, env):
    """
    Test rng device options.

    1.Prepare test environment, destroy or suspend a VM.
    2.Edit xml and start the domain.
    3.Perform test operation.
    4.Recover test environment.
    5.Confirm the test result.
    """
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)

    def check_rng_xml(xml_set, exists=True):
        """
        Check rng xml in/not in domain xml
        :param xml_set: rng xml object for setting
        :param exists: Check xml exists or not in domain xml

        :return: boolean
        """
        vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
        # Get all current xml rng devices
        xml_devices = vmxml.devices
        rng_devices = xml_devices.by_device_tag("rng")
        logging.debug("rng_devices is %s", rng_devices)

        # check if xml attr same with checking
        try:
            rng_index = xml_devices.index(rng_devices[0])
            xml_get = xml_devices[rng_index]

            if not exists:
                # should be detach device check
                return False
        except IndexError:
            if exists:
                # should be attach device check
                return False
            else:
                logging.info("Can not find rng xml as expected")
                return True

        def get_compare_values(xml_set, xml_get, rng_attr):
            """
            Get set and get value to compare

            :param xml_set: seting xml object
            :param xml_get: getting xml object
            :param rng_attr: attribute of rng device
            :return: set and get value in xml
            """
            try:
                set_value = xml_set[rng_attr]
            except xcepts.LibvirtXMLNotFoundError:
                set_value = None
            try:
                get_value = xml_get[rng_attr]
            except xcepts.LibvirtXMLNotFoundError:
                get_value = None
            logging.debug("get xml_set value(%s) is %s, get xml_get value is %s",
                          rng_attr, set_value, get_value)
            return (set_value, get_value)

        match = True
        for rng_attr in xml_set.__slots__:
            set_value, get_value = get_compare_values(xml_set, xml_get, rng_attr)
            logging.debug("rng_attr=%s, set_value=%s, get_value=%s", rng_attr, set_value, get_value)
            if set_value and set_value != get_value:
                if rng_attr == 'backend':
                    for bak_attr in xml_set.backend.__slots__:
                        set_backend, get_backend = get_compare_values(xml_set.backend, xml_get.backend, bak_attr)
                        if set_backend and set_backend != get_backend:
                            if bak_attr == 'source':
                                set_source = xml_set.backend.source
                                get_source = xml_get.backend.source
                                find = False
                                for i in range(len(set_source)):
                                    for j in get_source:
                                        if set(set_source[i].items()).issubset(j.items()):
                                            find = True
                                            break
                                    if not find:
                                        logging.debug("set source(%s) not in get source(%s)",
                                                      set_source[i], get_source)
                                        match = False
                                        break
                                    else:
                                        continue
                            else:
                                logging.debug("set backend(%s)- %s not equal to get backend-%s",
                                              rng_attr, set_backend, get_backend)
                                match = False
                                break
                        else:
                            continue
                        if not match:
                            break
                else:
                    logging.debug("set value(%s)-%s not equal to get value-%s",
                                  rng_attr, set_value, get_value)
                    match = False
                    break
            else:
                continue
            if not match:
                break

        if match:
            logging.info("Find same rng xml as hotpluged")
        else:
            test.fail("Rng xml in VM not same with attached xml")

        return True

    def modify_rng_xml(dparams, sync=True, get_xml=False):
        """
        Modify interface xml options

        :params dparams: parameters for organize xml
        :params sync: whether sync to domain xml, if get_xml is True,
                      then sync will not take effect
        :params get_xml: whether get device xml
        :return: if get_xml=True, return xml file
        """
        rng_model = dparams.get("rng_model", "virtio")
        rng_rate = dparams.get("rng_rate")
        backend_model = dparams.get("backend_model", "random")
        backend_type = dparams.get("backend_type")
        backend_dev = dparams.get("backend_dev", "")
        backend_source_list = dparams.get("backend_source",
                                          "").split()
        backend_protocol = dparams.get("backend_protocol")
        rng_alias = dparams.get("rng_alias")
        vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
        rng_xml = rng.Rng()
        rng_xml.rng_model = rng_model
        if rng_rate:
            rng_xml.rate = ast.literal_eval(rng_rate)
        backend = rng.Rng.Backend()
        backend.backend_model = backend_model
        if backend_type:
            backend.backend_type = backend_type
        if backend_dev:
            backend.backend_dev = backend_dev
        if backend_source_list:
            source_list = [ast.literal_eval(source) for source in
                           backend_source_list]
            backend.source = source_list
        if backend_protocol:
            backend.backend_protocol = backend_protocol
        rng_xml.backend = backend
        if detach_alias:
            rng_xml.alias = dict(name=rng_alias)

        logging.debug("Rng xml: %s", rng_xml)
        if get_xml:
            return rng_xml
        if sync:
            vmxml.add_device(rng_xml)
            vmxml.xmltreefile.write()
            vmxml.sync()
        else:
            status = libvirt.exec_virsh_edit(
                vm_name, [(r":/<devices>/s/$/%s" %
                           re.findall(r"<rng.*<\/rng>",
                                      str(rng_xml), re.M
                                      )[0].replace("/", "\/"))])
            if not status:
                test.fail("Failed to edit vm xml")

    def check_qemu_cmd(dparams):
        """
        Verify qemu-kvm command line.
        """
        rng_model = dparams.get("rng_model", "virtio")
        rng_rate = dparams.get("rng_rate")
        backend_type = dparams.get("backend_type")
        backend_source_list = dparams.get("backend_source",
                                          "").split()
        cmd = ("ps -ef | grep %s | grep -v grep" % vm_name)
        chardev = src_host = src_port = None
        if backend_type == "tcp":
            chardev = "socket"
        elif backend_type == "udp":
            chardev = "udp"
        for bc_source in backend_source_list:
            source = ast.literal_eval(bc_source)
            if "mode" in source and source['mode'] == "connect":
                src_host = source['host']
                src_port = source['service']

        if chardev and src_host and src_port:
            cmd += (" | grep 'chardev %s,.*host=%s,port=%s'"
                    % (chardev, src_host, src_port))
        if rng_model == "virtio":
            cmd += (" | grep 'device %s'" % dparams.get("rng_device"))
        if rng_rate:
            rate = ast.literal_eval(rng_rate)
            cmd += (" | grep 'max-bytes=%s,period=%s'"
                    % (rate['bytes'], rate['period']))
        if process.run(cmd, ignore_status=True, shell=True).exit_status:
            test.fail("Can't see rng option"
                      " in command line")

    def check_host():
        """
        Check random device on host
        """
        backend_dev = params.get("backend_dev")
        if backend_dev:
            cmd = "lsof |grep %s" % backend_dev
            ret = process.run(cmd, ignore_status=True, shell=True)
            if ret.exit_status or not ret.stdout_text.count("qemu"):
                test.fail("Failed to check random device"
                          " on host, command output: %s" %
                          ret.stdout_text)

    def check_snapshot(bgjob=None):
        """
        Do snapshot operation and check the results
        """
        snapshot_name1 = "snap.s1"
        snapshot_name2 = "snap.s2"
        if not snapshot_vm_running:
            vm.destroy(gracefully=False)
        ret = virsh.snapshot_create_as(vm_name, snapshot_name1, debug=True)
        libvirt.check_exit_status(ret)
        snap_lists = virsh.snapshot_list(vm_name, debug=True)
        if snapshot_name not in snap_lists:
            test.fail("Snapshot %s doesn't exist"
                      % snapshot_name)

        if snapshot_vm_running:
            options = "--force"
        else:
            options = ""
        ret = virsh.snapshot_revert(
            vm_name, ("%s %s" % (snapshot_name, options)), debug=True)
        libvirt.check_exit_status(ret)
        ret = virsh.dumpxml(vm_name, debug=True)
        if ret.stdout.strip().count("<rng model="):
            test.fail("Found rng device in xml")

        if snapshot_with_rng:
            if vm.is_alive():
                vm.destroy(gracefully=False)
            if bgjob:
                bgjob.kill_func()
            modify_rng_xml(params, False)

        # Start the domain before disk-only snapshot
        if vm.is_dead():
            # Add random server
            if params.get("backend_type") == "tcp":
                cmd = "cat /dev/random | nc -4 -l localhost 1024"
                bgjob = utils_misc.AsyncJob(cmd)
            vm.start()
            vm.wait_for_login().close()
        err_msgs = ("live disk snapshot not supported"
                    " with this QEMU binary")
        ret = virsh.snapshot_create_as(vm_name,
                                       "%s --disk-only"
                                       % snapshot_name2, debug=True)
        if ret.exit_status:
            if ret.stderr.count(err_msgs):
                test.skip(err_msgs)
            else:
                test.fail("Failed to create external snapshot")
        snap_lists = virsh.snapshot_list(vm_name, debug=True)
        if snapshot_name2 not in snap_lists:
            test.fail("Failed to check snapshot list")

        ret = virsh.domblklist(vm_name, debug=True)
        if not ret.stdout.strip().count(snapshot_name2):
            test.fail("Failed to find snapshot disk")

    def check_guest_dump(session, exists=True):
        """
        Check guest with hexdump

        :param session: ssh session to guest
        :param exists: check rng device exists/not exists
        """
        check_cmd = "hexdump /dev/hwrng"
        try:
            status = session.cmd_status(check_cmd, 5)

            if status != 0 and exists:
                test.fail("Fail to check hexdump in guest")
            elif not exists:
                logging.info("hexdump cmd failed as expected")
        except aexpect.exceptions.ShellTimeoutError:
            if not exists:
                test.fail("Still can find rng device in guest")
            else:
                logging.info("Hexdump do not fail with error")

    def check_guest(session, expect_fail=False):
        """
        Check random device on guest

        :param session: ssh session to guest
        :param expect_fail: expect the dd cmd pass or fail
        """
        rng_files = (
            "/sys/devices/virtual/misc/hw_random/rng_available",
            "/sys/devices/virtual/misc/hw_random/rng_current")
        rng_avail = session.cmd_output("cat %s" % rng_files[0],
                                       timeout=timeout).strip()
        rng_currt = session.cmd_output("cat %s" % rng_files[1],
                                       timeout=timeout).strip()
        logging.debug("rng avail:%s, current:%s", rng_avail, rng_currt)
        if not rng_currt.count("virtio") or rng_currt not in rng_avail:
            test.fail("Failed to check rng file on guest")

        # Read the random device
        rng_rate = params.get("rng_rate")
        # For rng rate test this command and return in a short time
        # but for other test it will hang
        cmd = ("dd if=/dev/hwrng of=rng.test count=100"
               " && rm -f rng.test")
        try:
            ret, output = session.cmd_status_output(cmd, timeout=timeout)
            if ret and expect_fail:
                logging.info("dd cmd failed as expected")
            elif ret:
                test.fail("Failed to read the random device")
        except aexpect.exceptions.ShellTimeoutError:
            logging.info("dd cmd timeout")
            # Close session as the current session still hang on last cmd
            session.close()
            session = vm.wait_for_login()

            if expect_fail:
                test.fail("Still can find rng device in guest")
            else:
                logging.info("dd cmd do not fail with error")
                # Check if file have data
                size = session.cmd_output("wc -c rng.test").split()[0]
                if int(size) > 0:
                    logging.info("/dev/hwrng is not empty, size %s", size)
                else:
                    test.fail("/dev/hwrng is empty")
        finally:
            session.cmd("rm -f rng.test")

        if rng_rate:
            rate_bytes, rate_period = list(ast.literal_eval(rng_rate).values())
            rate_conf = float(rate_bytes) / (float(rate_period)/1000)
            ret = re.search(r"(\d+) bytes.*copied, (\d+.\d+) s",
                            output, re.M)
            if not ret:
                test.fail("Can't find rate from output")
            rate_real = float(ret.group(1)) / float(ret.group(2))
            logging.debug("Find rate: %s, config rate: %s",
                          rate_real, rate_conf)
            if rate_real > rate_conf * 1.2:
                test.fail("The rate of reading exceed"
                          " the limitation of configuration")
        if device_num > 1:
            rng_dev = rng_avail.split()
            if len(rng_dev) != device_num:
                test.cancel("Multiple virtio-rng devices are not"
                            " supported on this guest kernel. "
                            "Bug: https://bugzilla.redhat.com/"
                            "show_bug.cgi?id=915335")
            session.cmd("echo -n %s > %s" % (rng_dev[1], rng_files[1]))
            # Read the random device
            if session.cmd_status(cmd, timeout=timeout):
                test.fail("Failed to read the random device")

    def get_rng_device(guest_arch, rng_model):
        """
        Return the expected rng device in qemu cmd
        :param guest_arch: e.g. x86_64
        :param rng_model: the value for //rng@model, e.g. "virtio"
        :return: expected device type in qemu cmd
        """
        if "virtio" in rng_model:
            return "virtio-rng-pci" if "s390x" not in guest_arch else "virtio-rng-ccw"
        else:
            test.fail("Unknown rng model %s" % rng_model)

    start_error = "yes" == params.get("start_error", "no")
    status_error = "yes" == params.get("status_error", "no")

    test_host = "yes" == params.get("test_host", "no")
    test_guest = "yes" == params.get("test_guest", "no")
    test_guest_dump = "yes" == params.get("test_guest_dump", "no")
    test_qemu_cmd = "yes" == params.get("test_qemu_cmd", "no")
    test_snapshot = "yes" == params.get("test_snapshot", "no")
    snapshot_vm_running = "yes" == params.get("snapshot_vm_running",
                                              "no")
    snapshot_with_rng = "yes" == params.get("snapshot_with_rng", "no")
    snapshot_name = params.get("snapshot_name")
    device_num = int(params.get("device_num", 1))
    detach_alias = "yes" == params.get("rng_detach_alias", "no")
    detach_alias_options = params.get("rng_detach_alias_options")
    attach_rng = "yes" == params.get("rng_attach_device", "no")
    attach_options = params.get("rng_attach_options", "")
    random_source = "yes" == params.get("rng_random_source", "yes")
    timeout = int(params.get("timeout", 600))
    wait_timeout = int(params.get("wait_timeout", 60))

    if device_num > 1 and not libvirt_version.version_compare(1, 2, 7):
        test.skip("Multiple virtio-rng devices not "
                  "supported on this libvirt version")

    guest_arch = params.get("vm_arch_name", "x86_64")

    # Back up xml file.
    vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    logging.debug("vm xml is %s", vmxml_backup)

    # Try to install rng-tools on host, it can speed up random rate
    # if installation failed, ignore the error and continue the test
    if utils_package.package_install(["rng-tools"]):
        rngd_conf = "/etc/sysconfig/rngd"
        rngd_srv = "/usr/lib/systemd/system/rngd.service"
        if os.path.exists(rngd_conf):
            # For rhel6 host, add extraoptions
            with open(rngd_conf, 'w') as f_rng:
                f_rng.write('EXTRAOPTIONS="--rng-device /dev/urandom"')
        elif os.path.exists(rngd_srv):
            # For rhel7 host, modify start options
            rngd_srv_conf = "/etc/systemd/system/rngd.service"
            if not os.path.exists(rngd_srv_conf):
                shutil.copy(rngd_srv, rngd_srv_conf)
            process.run("sed -i -e 's#^ExecStart=.*#ExecStart=/sbin/rngd"
                        " -f -r /dev/urandom -o /dev/random#' %s"
                        % rngd_srv_conf, shell=True)
            process.run('systemctl daemon-reload')
        process.run("service rngd start")

    # Build the xml and run test.
    try:
        bgjob = None

        # Prepare xml, make sure no extra rng dev.
        vmxml = vmxml_backup.copy()
        vmxml.remove_all_device_by_type('rng')
        vmxml.sync()
        logging.debug("Prepared vm xml without rng dev is %s", vmxml)

        # Take snapshot if needed
        if snapshot_name:
            if snapshot_vm_running:
                vm.start()
                vm.wait_for_login().close()
            ret = virsh.snapshot_create_as(vm_name, snapshot_name, debug=True)
            libvirt.check_exit_status(ret)

        # Destroy VM first
        if vm.is_alive():
            vm.destroy(gracefully=False)

        # Build vm xml.
        dparams = {}
        if device_num > 1:
            for i in xrange(device_num):
                rng_model = params.get("rng_model_%s" % i, "virtio")
                dparams[i] = {"rng_model": rng_model}
                dparams[i].update({"backend_model": params.get(
                    "backend_model_%s" % i, "random")})
                dparams[i].update({"rng_device": get_rng_device(
                    guest_arch, rng_model)})
                bk_type = params.get("backend_type_%s" % i)
                if bk_type:
                    dparams[i].update({"backend_type": bk_type})
                bk_dev = params.get("backend_dev_%s" % i)
                if bk_dev:
                    dparams[i].update({"backend_dev": bk_dev})
                bk_src = params.get("backend_source_%s" % i)
                if bk_src:
                    dparams[i].update({"backend_source": bk_src})
                bk_pro = params.get("backend_protocol_%s" % i)
                if bk_pro:
                    dparams[i].update({"backend_protocol": bk_pro})
                modify_rng_xml(dparams[i], False)
        else:
            params.update({"rng_device": get_rng_device(
                guest_arch, params.get("rng_model", "virtio"))})

            if detach_alias:
                device_alias = "ua-" + str(uuid.uuid4())
                params.update({"rng_alias": device_alias})

            rng_xml = modify_rng_xml(params, not test_snapshot, attach_rng)

        try:
            # Add random server
            if random_source and params.get("backend_type") == "tcp":
                cmd = "cat /dev/random | nc -4 -l localhost 1024"
                bgjob = utils_misc.AsyncJob(cmd)

            vm.start()
            if attach_rng:
                ret = virsh.attach_device(vm_name, rng_xml.xml,
                                          flagstr=attach_options,
                                          debug=True, ignore_status=True)
                libvirt.check_exit_status(ret, status_error)
                if status_error:
                    return
                if not check_rng_xml(rng_xml, True):
                    test.fail("Can not find rng device in xml")

            else:
                # Start the VM.
                if start_error:
                    test.fail("VM started unexpectedly")

            if test_qemu_cmd and not attach_rng:
                if device_num > 1:
                    for i in xrange(device_num):
                        check_qemu_cmd(dparams[i])
                else:
                    check_qemu_cmd(params)
            if test_host:
                check_host()
            session = vm.wait_for_login()
            if test_guest:
                check_guest(session)
            if test_guest_dump:
                check_guest_dump(session, True)
            if test_snapshot:
                check_snapshot(bgjob)

            if detach_alias:
                result = virsh.detach_device_alias(vm_name, device_alias,
                                                   detach_alias_options, debug=True)
                if "--config" in detach_alias_options:
                    vm.destroy()

                def have_rng_xml():
                    """
                    check if xml have rng item
                    """
                    output = virsh.dumpxml(vm_name)
                    return not output.stdout.strip().count("<rng model=")

                if utils_misc.wait_for(have_rng_xml, wait_timeout):
                    logging.info("Cannot find rng device in xml after detach")
                else:
                    test.fail("Found rng device in xml after detach")

            # Detach after attach
            if attach_rng:
                ret = virsh.detach_device(vm_name, rng_xml.xml,
                                          flagstr=attach_options,
                                          debug=True, ignore_status=True)
                libvirt.check_exit_status(ret, status_error)
                if utils_misc.wait_for(lambda: check_rng_xml(rng_xml, False), wait_timeout):
                    logging.info("Find same rng xml as hotpluged")
                else:
                    test.fail("Rng device still exists after detach!")

                if test_guest_dump:
                    check_guest_dump(session, False)

            session.close()
        except virt_vm.VMStartError as details:
            logging.info(str(details))
            if not start_error:
                test.fail('VM failed to start, '
                          'please refer to https://bugzilla.'
                          'redhat.com/show_bug.cgi?id=1220252:'
                          '\n%s' % details)
    finally:
        # Delete snapshots.
        snapshot_lists = virsh.snapshot_list(vm_name, debug=True)
        if len(snapshot_lists) > 0:
            libvirt.clean_up_snapshots(vm_name, snapshot_lists)
            for snapshot in snapshot_lists:
                virsh.snapshot_delete(vm_name, snapshot, "--metadata", debug=True)

        # Recover VM.
        if vm.is_alive():
            vm.destroy(gracefully=False)
        logging.info("Restoring vm...")
        vmxml_backup.sync()
        if bgjob:
            bgjob.kill_func()
def run(test, params, env):
    """
    Test for basic detach serial/console device by alias function.

    1) Define the VM with specified serial type device.
    2) Start the guest and check if start result meets expectation
    3) Hot unplug serial device and check if meets expectation
    4) Cold unplug serial device and check if meets expectation
    5) Shutdown the VM and clean up environment
    """
    def set_targets(serial):
        """
        Set a serial device target attributes.

        :param serial: one serial target
        """
        machine = platform.machine()
        if "ppc" in machine:
            serial.target_model = 'spapr-vty'
            serial.target_type = 'spapr-vio-serial'
        elif "aarch" in machine:
            serial.target_model = 'pl011'
            serial.target_type = 'system-serial'
        else:
            serial.target_model = target_type
            serial.target_type = target_type

    def prepare_serial_device():
        """
        Prepare a serial device XML
        """
        local_serial_type = serial_type
        serial = librarian.get('serial')(local_serial_type)
        serial.target_port = "0"
        serial.alias = {'name': alias_name}

        set_targets(serial)

        sources = []
        logging.debug(sources_str)
        for source_str in sources_str.split():
            source_dict = {}
            for att in source_str.split(','):
                key, val = att.split(':')
                source_dict[key] = val
            sources.append(source_dict)
        serial.sources = sources
        return serial

    def check_vm_xml(existed=True, inactive=False):
        """
        Check VM xml file to validate whether serial and console elements exists.

        :param existed: Default is True indicate whether element exist or not
        :param inactive: indicate VM xml is from active or inactive VM.
        """
        # Get current serial and console XML
        current_xml = VMXML.new_from_dumpxml(vm_name)
        if inactive:
            current_xml = VMXML.new_from_inactive_dumpxml(vm_name)
        serial_elem = current_xml.xmltreefile.find('devices/serial')
        console_elem = current_xml.xmltreefile.find('devices/console')
        if existed:
            if serial_elem is None:
                test.fail("Expect generate serial" "but found none.")
            if target_type != 'pci-serial' and console_elem is None:
                test.fail("Expect generate console automatically, "
                          "but found none.")

    def cleanup():
        """
        Clean up test environment
        """
        if serial_type == 'file':
            if os.path.exists('/var/log/libvirt/virt-test'):
                os.remove('/var/log/libvirt/virt-test')

    serial_type = params.get('serial_type', 'pty')
    target_type = params.get('target_type', 'isa-serial')
    sources_str = params.get('serial_sources', '')
    hot_plug_support = "yes" == params.get('hot_plugging_support')

    # Customize alias name string.
    chars = string.ascii_letters + string.digits + '-_'
    alias_name = 'ua-' + ''.join(random.choice(chars) for _ in list(range(64)))

    vm_name = params.get('main_vm', 'alias-vm-tests-vm1')
    vm = env.get_vm(vm_name)

    vm_xml = VMXML.new_from_inactive_dumpxml(vm_name)
    vm_xml_backup = vm_xml.copy()
    try:
        if not libvirt_version.version_compare(4, 5, 0):
            test.cancel(
                "virsh detach-device-alias is supported until libvirt 4.5.0 version"
            )
        vm_xml.remove_all_device_by_type('serial')
        vm_xml.remove_all_device_by_type('console')

        serial_dev = prepare_serial_device()
        logging.debug('Serial device:\n%s', serial_dev)
        vm_xml.add_device(serial_dev)
        vm_xml.sync()

        vm.start()
        check_vm_xml()

        # Hot detach device by its alias name.
        # If hotplug not supported, serial should be still there.
        res = virsh.detach_device_alias(vm_name, alias_name, "--live")
        libvirt.check_exit_status(res, not hot_plug_support)
        check_vm_xml(existed=not hot_plug_support)

        # Cold detach device by its alias name.
        # After detach, serial should not be there.
        res = virsh.detach_device_alias(vm_name, alias_name, "--config")
        libvirt.check_exit_status(res)
        check_vm_xml(existed=False, inactive=True)
        debug_xml = VMXML.new_from_inactive_dumpxml(vm_name)
        logging.debug("After VM cold detached:%s", debug_xml)
        vm.destroy()
    finally:
        cleanup()
        vm_xml_backup.sync()
예제 #6
0
def run(test, params, env):
    """
    Test detach-device-alias command with
    --config, --live, --current

    1. Test hostdev device detach
    2. Test scsi controller device detach
    3. Test redirect device detach
    4. Test channel devices detach
    """
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    detach_options = params.get("detach_alias_options", "")
    detach_check_xml = params.get("detach_check_xml")
    # hostdev device params
    hostdev_type = params.get("detach_hostdev_type", "")
    hostdev_managed = params.get("detach_hostdev_managed")
    # controller params
    contr_type = params.get("detach_controller_type")
    contr_model = params.get("detach_controller_mode")
    # redirdev params
    redir_type = params.get("detach_redirdev_type")
    redir_bus = params.get("detach_redirdev_bus")
    # channel params
    channel_type = params.get("detach_channel_type")
    channel_target = eval(params.get("detach_channel_target", "{}"))

    device_alias = "ua-" + str(uuid.uuid4())

    def get_usb_info():
        """
        Get local host usb info

        :return: usb verndor and product id
        """
        install_cmd = process.run("yum install usbutils* -y", shell=True)
        result = process.run("lsusb|awk '{print $6\":\"$2\":\"$4}'",
                             shell=True)
        if not result.exit_status:
            return result.stdout_text.rstrip(':')
        else:
            test.error("Can not get usb hub info for testing")

    # backup xml
    vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    backup_xml = vmxml.copy()

    if not vm.is_alive():
        vm.start()
    # wait for vm start successfully
    vm.wait_for_login()

    if hostdev_type:
        if hostdev_type in ["usb", "scsi"]:
            if hostdev_type == "usb":
                pci_id = get_usb_info()
            elif hostdev_type == "scsi":
                source_disk = libvirt.create_scsi_disk(scsi_option="",
                                                       scsi_size="8")
                pci_id = get_scsi_info(source_disk)
            device_xml = libvirt.create_hostdev_xml(pci_id=pci_id,
                                                    dev_type=hostdev_type,
                                                    managed=hostdev_managed,
                                                    alias=device_alias)
        else:
            test.error("Hostdev type %s not handled by test."
                       " Please check code." % hostdev_type)
    if contr_type:
        controllers = vmxml.get_controllers(contr_type)
        contr_index = len(controllers) + 1
        contr_dict = {
            "controller_type": contr_type,
            "controller_model": contr_model,
            "controller_index": contr_index,
            "contr_alias": device_alias
        }
        device_xml = libvirt.create_controller_xml(contr_dict)
        detach_check_xml = detach_check_xml % contr_index

    if redir_type:
        device_xml = libvirt.create_redirdev_xml(redir_type, redir_bus,
                                                 device_alias)

    if channel_type:
        channel_params = {'channel_type_name': channel_type}
        channel_params.update(channel_target)
        device_xml = libvirt.create_channel_xml(channel_params,
                                                device_alias).xml

    try:
        dump_option = ""
        if "--config" in detach_options:
            dump_option = "--inactive"

        # Attach xml to domain
        logging.info("Attach xml is %s" %
                     process.run("cat %s" % device_xml).stdout_text)
        virsh.attach_device(vm_name,
                            device_xml,
                            flagstr=detach_options,
                            debug=True,
                            ignore_status=False)
        domxml_at = virsh.dumpxml(vm_name, dump_option,
                                  debug=True).stdout.strip()
        if detach_check_xml not in domxml_at:
            test.error("Can not find %s in domxml after attach" %
                       detach_check_xml)

        # Detach xml with alias
        result = virsh.detach_device_alias(vm_name,
                                           device_alias,
                                           detach_options,
                                           debug=True)
        time.sleep(10)
        libvirt.check_exit_status(result)
        domxml_dt = virsh.dumpxml(vm_name, dump_option,
                                  debug=True).stdout.strip()
        if detach_check_xml in domxml_dt:
            test.fail("Still can find %s in domxml" % detach_check_xml)
    finally:
        backup_xml.sync()
        if hostdev_type == "scsi":
            libvirt.delete_scsi_disk()
예제 #7
0
def run(test, params, env):
    """
    Test virsh detach-device command.

    The command can detach disk.
    1.Prepare test environment,destroy or suspend a VM.
    2.Perform virsh detach-device operation.
    3.Recover test environment.
    4.Confirm the test result.
    """
    def create_device_file(device_source="/tmp/attach.img"):
        """
        Create a device source file.

        :param device_source: Device source file.
        """
        try:
            with open(device_source, 'wb') as device_file:
                device_file.seek((512 * 1024 * 1024) - 1)
                device_file.write(str(0).encode())
        except IOError:
            logging.error("Image file %s created failed.", device_source)

    def check_vm_partition(vm, device, os_type, target_name):
        """
        Check VM disk's partition.

        :param vm. VM guest.
        :param os_type. VM's operation system type.
        :param target_name. Device target type.
        :return: True if check successfully.
        """
        logging.info("Checking VM partittion...")
        if vm.is_dead():
            vm.start()
        try:
            if os_type == "linux":
                session = vm.wait_for_login()
                if device == "disk":
                    s, o = session.cmd_status_output(
                        "grep %s /proc/partitions" % target_name)
                    logging.info("Virtio devices in VM:\n%s", o)
                elif device == "cdrom":
                    s, o = session.cmd_status_output("ls /dev/cdrom")
                    logging.info("CDROM in VM:\n%s", o)
                elif device == "iface":
                    s, o = session.cmd_status_output("ls /")
                session.close()
                if s != 0:
                    return False
            return True
        except (remote.LoginError, virt_vm.VMError, aexpect.ShellError) as e:
            logging.error(str(e))
            return False

    def acpiphp_module_modprobe(vm, os_type):
        """
        Add acpiphp module if VM's os type is rhle5.*

        :param vm. VM guest.
        :param os_type. VM's operation system type.
        :return: True if operate successfully.
        """
        if vm.is_dead():
            vm.start()
        try:
            if os_type == "linux":
                session = vm.wait_for_login()
                s_rpm, _ = session.cmd_status_output("rpm --version")
                # If status is different from 0, this
                # guest OS doesn't support the rpm package
                # manager
                if s_rpm:
                    session.close()
                    return True
                _, o_vd = session.cmd_status_output("rpm -qa | grep"
                                                    " redhat-release")
                if o_vd.find("5Server") != -1:
                    s_mod, _ = session.cmd_status_output("modprobe acpiphp")
                    if s_mod != 0:
                        session.close()
                        return False
                session.close()
            return True
        except (remote.LoginError, virt_vm.VMError, aexpect.ShellError) as e:
            logging.error(str(e))
            return False

    def create_device_xml(params, xml_path, device_source):
        """
        Create a xml file for device
        """
        device_xml_name = params.get("dt_device_xml", "device.xml")
        device_xml_file = os.path.join(xml_path, device_xml_name)
        device_type = params.get("dt_device_device", "disk")
        if device_type in ["disk", 'cdrom']:
            disk_class = vm_xml.VMXML.get_device_class('disk')
            if test_block_dev:
                disk = disk_class(type_name='block')
                stype = 'dev'
            else:
                disk = disk_class(type_name='file')
                stype = 'file'
            disk.device = device_type
            disk.driver = dict(name='qemu', type='raw')
            disk.source = disk.new_disk_source(attrs={stype: device_source})
            disk.target = dict(bus=device_bus, dev=device_target)
            if detach_alias:
                disk.alias = dict(name=device_alias)
            disk.xmltreefile.write()
            shutil.copyfile(disk.xml, device_xml_file)
        else:
            iface_class = vm_xml.VMXML.get_device_class('interface')
            iface = iface_class(type_name='network')
            iface.mac_address = iface_mac_address
            iface.source = dict(network=iface_network)
            iface.model = iface_model_type
            if detach_alias:
                iface.alias = dict(name=device_alias)
            iface.xmltreefile.write()
            shutil.copyfile(iface.xml, device_xml_file)
        return device_xml_file

    def handle_detach_mix():
        """
        Do detach_device and detach_device_alias parallel for multiple times
        """
        detach_times = int(params.get("detach_times"))
        thread_list = []

        def _detach_device(by_alias, detach_target):
            """
            Route the invoke for suitable detach functions

            :param by_alias: True to use detach_device_alias
            :param detach_target: Device xml or device alias
            :return: None
            """
            func_name = virsh.detach_device_alias if by_alias else virsh.detach_device
            func_name(vm_ref,
                      detach_target,
                      extra=dt_options,
                      wait_remove_event=True,
                      event_timeout=7,
                      debug=True)

        for count in range(0, detach_times):
            dt_thread = threading.Thread(target=_detach_device,
                                         args=(False, device_xml))

            dt_alias_thread = threading.Thread(target=_detach_device,
                                               args=(True, device_alias))

            dt_thread.start()
            thread_list.append(dt_thread)
            logging.debug("Start thread using detach_device %s",
                          dt_thread.name)

            dt_alias_thread.start()
            thread_list.append(dt_alias_thread)
            logging.debug("Start thread using detach_device_alias %s",
                          dt_alias_thread.name)

        for one_thread in thread_list:
            logging.debug("Ensure thread '{}' to exist".format(
                one_thread.name))
            one_thread.join()

    vm_ref = params.get("dt_device_vm_ref", "name")
    dt_options = params.get("dt_device_options", "")
    pre_vm_state = params.get("dt_device_pre_vm_state", "running")
    status_error = "yes" == params.get("status_error", 'no')
    no_attach = "yes" == params.get("dt_device_no_attach", 'no')
    os_type = params.get("os_type", "linux")
    device = params.get("dt_device_device", "disk")
    readonly = "yes" == params.get("detach_readonly", "no")
    tmp_dir = data_dir.get_tmp_dir()
    test_cmd = "detach-device"
    detach_mixed = "yes" == params.get("detach_mixed", "no")
    detach_alias = "yes" == params.get("dt_device_alias", "no")
    if detach_alias:
        test_cmd = "detach-device-alias"
        device_alias = "ua-" + str(uuid.uuid4())
    else:
        test_cmd = "detach-device"
    if not virsh.has_command_help_match(test_cmd, dt_options) and\
       not status_error:
        test.cancel("Current libvirt version doesn't support '%s'"
                    " for %s" % (dt_options, test_cmd))

    # Disk specific attributes.
    device_source_name = params.get("dt_device_device_source", "attach.img")
    device_target = params.get("dt_device_device_target", "vdd")
    device_bus = params.get("dt_device_bus_type")
    test_block_dev = "yes" == params.get("dt_device_iscsi_device", "no")

    # interface specific attributes.
    iface_network = params.get("dt_device_iface_network")
    iface_model_type = params.get("dt_device_iface_model_type")
    iface_mac_address = params.get("dt_device_iface_mac_address")

    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    if vm.is_alive():
        vm.destroy(gracefully=False)

    # Back up xml file.
    backup_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    device_source = os.path.join(tmp_dir, device_source_name)

    # Create virtual device file.
    if test_block_dev:
        device_source = libvirt.setup_or_cleanup_iscsi(True)
        if not device_source:
            # We should skip this case
            test.cancel("Can not get iscsi device name in host")
    else:
        create_device_file(device_source)

    try:
        if vm.is_alive():
            vm.destroy(gracefully=False)

        # If we are testing cdrom device, we need to detach hdc in VM first.
        if device == "cdrom":
            virsh.detach_disk(vm_name,
                              device_target,
                              "--config",
                              ignore_status=True)

        device_xml = create_device_xml(params, tmp_dir, device_source)
        if not no_attach:
            s_attach = virsh.attach_device(vm_name,
                                           device_xml,
                                           flagstr="--config",
                                           debug=True).exit_status
            if s_attach != 0:
                logging.error("Attach device failed before testing "
                              "detach-device")

        vm.start()
        vm.wait_for_serial_login()

        # Add acpiphp module before testing if VM's os type is rhle5.*
        if device in ['disk', 'cdrom']:
            if not acpiphp_module_modprobe(vm, os_type):
                test.error("Add acpiphp module failed before test.")

        # Turn VM into certain state.
        if pre_vm_state == "paused":
            logging.info("Suspending %s...", vm_name)
            if vm.is_alive():
                vm.pause()
        elif pre_vm_state == "shut off":
            logging.info("Shutting down %s...", vm_name)
            if vm.is_alive():
                vm.destroy(gracefully=False)

        # Get disk count before test.
        if device in ['disk', 'cdrom']:
            device_count_before_cmd = vm_xml.VMXML.get_disk_count(vm_name)
        else:
            vm_cls = vm_xml.VMXML.new_from_dumpxml(vm_name)
            device_count_before_cmd = len(vm_cls.devices)

        # Test.
        domid = vm.get_id()
        domuuid = vm.get_uuid()

        # Confirm how to reference a VM.
        if vm_ref == "name":
            vm_ref = vm_name
        elif vm_ref.find("invalid") != -1:
            vm_ref = params.get(vm_ref)
        elif vm_ref == "id":
            vm_ref = domid
        elif vm_ref == "hex_id":
            vm_ref = hex(int(domid))
        elif vm_ref == "uuid":
            vm_ref = domuuid
        else:
            vm_ref = ""
        if detach_mixed:
            handle_detach_mix()
            status = 0  # We do not care the virsh command return result
        elif detach_alias:
            status = virsh.detach_device_alias(vm_ref,
                                               device_alias,
                                               dt_options,
                                               True,
                                               7,
                                               readonly=readonly,
                                               debug=True).exit_status
        else:
            status = virsh.detach_device(vm_ref,
                                         device_xml,
                                         readonly=readonly,
                                         flagstr=dt_options,
                                         debug=True).exit_status
        # If status_error is False, then wait for seconds to let detach operation accomplish complete.
        if not status_error:
            utils_misc.wait_for(
                lambda: not libvirt.device_exists(vm, device_target),
                timeout=40)
        time.sleep(2)
        # Resume guest after command. On newer libvirt this is fixed as it has
        # been a bug. The change in xml file is done after the guest is
        # resumed.
        if pre_vm_state == "paused":
            vm.resume()

        # Check disk count after command.
        check_count_after_cmd = True
        if device in ['disk', 'cdrom']:
            logging.debug("Checking disk count via VM xml...")
            logging.debug(vm_xml.VMXML.new_from_dumpxml(vm_name))
            device_count_after_cmd = vm_xml.VMXML.get_disk_count(vm_name)
        else:
            vm_cls = vm_xml.VMXML.new_from_dumpxml(vm_name)
            device_count_after_cmd = len(vm_cls.devices)
        logging.debug("device_count_after_cmd=%d, device_count_before_cmd=%d",
                      device_count_after_cmd, device_count_before_cmd)
        if device_count_after_cmd < device_count_before_cmd:
            check_count_after_cmd = False

        # Recover VM state.
        if pre_vm_state == "shut off" and device in ['disk', 'cdrom']:
            vm.start()

        # Check in VM after command.
        check_vm_after_cmd = True
        if device in ['disk', 'cdrom']:
            check_vm_after_cmd = check_vm_partition(vm, device, os_type,
                                                    device_target)

        # Destroy VM.
        if vm.is_alive():
            vm.destroy(gracefully=False)

        # Check disk count after VM shutdown (with --config).
        check_count_after_shutdown = True
        if device in ['disk', 'cdrom']:
            device_count_after_shutdown = vm_xml.VMXML.get_disk_count(vm_name)
        else:
            vm_cls = vm_xml.VMXML.new_from_dumpxml(vm_name)
            device_count_after_shutdown = len(vm_cls.devices)
        if device_count_after_shutdown < device_count_before_cmd:
            check_count_after_shutdown = False
    finally:
        # Recover VM.
        if vm.is_alive():
            vm.destroy(gracefully=False)
        backup_xml.sync()
        if test_block_dev:
            libvirt.setup_or_cleanup_iscsi(False)
        elif os.path.exists(device_source):
            os.remove(device_source)
        elif os.path.exists(tmp_dir):
            os.remove(tmp_dir)

    # Check results.
    if status_error:
        if not status:
            test.fail("detach-device exit with unexpected value.")
    else:
        if status:
            test.fail("virsh detach-device failed.")
        if dt_options.count("config"):
            if check_count_after_shutdown:
                test.fail("See config detached device in "
                          "xml file after VM shutdown.")
            if pre_vm_state == "shut off":
                if check_count_after_cmd:
                    test.fail("See device in xml after detach with"
                              " --config option")
            elif pre_vm_state == "running":
                if not check_vm_after_cmd and device in ['disk', 'cdrom']:
                    test.fail("Cannot see device in VM after"
                              " detach with '--config' option"
                              " when VM is running.")

        elif dt_options.count("live"):
            if check_count_after_cmd:
                test.fail("See device in xml after detach with"
                          "--live option")
            if not check_count_after_shutdown:
                test.fail("Cannot see config detached device in"
                          " xml file after VM shutdown with"
                          " '--live' option.")
            if check_vm_after_cmd and device in ['disk', 'cdrom']:
                test.fail("See device in VM with '--live' option"
                          " when VM is running")
        elif dt_options.count("current"):
            if check_count_after_cmd:
                test.fail("See device in xml after detach with"
                          " --current option")
            if pre_vm_state == "running":
                if not check_count_after_shutdown:
                    test.fail("Cannot see config detached device in"
                              " xml file after VM shutdown with"
                              " '--current' option.")
                if check_vm_after_cmd and device in ['disk', 'cdrom']:
                    test.fail("See device in VM with '--live'"
                              " option when VM is running")
        elif dt_options.count("persistent"):
            if check_count_after_shutdown:
                test.fail("See device deattached with "
                          "'--persistent' option after "
                          "VM shutdown.")
def run(test, params, env):
    """
    Test detach-device-alias command with
    --config, --live, --current

    1. Test hostdev device detach
    2. Test scsi controller device detach
    3. Test redirect device detach
    4. Test channel devices detach
    """
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    detach_options = params.get("detach_alias_options", "")
    detach_check_xml = params.get("detach_check_xml")
    # hostdev device params
    hostdev_type = params.get("detach_hostdev_type", "")
    hostdev_managed = params.get("detach_hostdev_managed")
    # controller params
    contr_type = params.get("detach_controller_type")
    contr_model = params.get("detach_controller_mode")
    # redirdev params
    redir_type = params.get("detach_redirdev_type")
    redir_bus = params.get("detach_redirdev_bus")
    # channel params
    channel_type = params.get("detach_channel_type")
    channel_target = eval(params.get("detach_channel_target", "{}"))
    # watchdog params
    watchdog_type = params.get("detach_watchdog_type")
    watchdog_dict = eval(params.get('watchdog_dict', '{}'))

    device_alias = "ua-" + str(uuid.uuid4())

    def check_detached_xml_noexist():
        """
        Check detached xml does not exist in the guest dumpxml

        :return: True if it does not exist, False if still exists
        """
        domxml_dt = virsh.dumpxml(vm_name, dump_option).stdout_text.strip()
        if detach_check_xml not in domxml_dt:
            return True
        else:
            return False

    def get_usb_info():
        """
        Get local host usb info

        :return: usb vendor and product id
        """
        install_cmd = process.run("yum install usbutils* -y", shell=True)
        result = process.run("lsusb|awk '{print $6\":\"$2\":\"$4}'",
                             shell=True)
        if not result.exit_status:
            return result.stdout_text.rstrip(':')
        else:
            test.error("Can not get usb hub info for testing")

    # backup xml
    vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    backup_xml = vmxml.copy()
    device_xml = None
    attach_device = True

    if not vm.is_alive():
        vm.start()
    # wait for vm start successfully
    vm.wait_for_login()

    if hostdev_type:
        if hostdev_type in ["usb", "scsi"]:
            if hostdev_type == "usb":
                pci_id = get_usb_info()
            elif hostdev_type == "scsi":
                source_disk = libvirt.create_scsi_disk(scsi_option="",
                                                       scsi_size="8")
                pci_id = get_scsi_info(source_disk)
            device_xml = libvirt.create_hostdev_xml(pci_id=pci_id,
                                                    dev_type=hostdev_type,
                                                    managed=hostdev_managed,
                                                    alias=device_alias)
        else:
            test.error("Hostdev type %s not handled by test."
                       " Please check code." % hostdev_type)
    if contr_type:
        controllers = vmxml.get_controllers(contr_type)
        contr_index = len(controllers) + 1
        contr_dict = {
            "controller_type": contr_type,
            "controller_model": contr_model,
            "controller_index": contr_index,
            "contr_alias": device_alias
        }
        device_xml = libvirt.create_controller_xml(contr_dict)
        detach_check_xml = detach_check_xml % contr_index

    if redir_type:
        device_xml = libvirt.create_redirdev_xml(redir_type, redir_bus,
                                                 device_alias)

    if channel_type:
        channel_params = {'channel_type_name': channel_type}
        channel_params.update(channel_target)
        device_xml = libvirt.create_channel_xml(channel_params, device_alias)

    if watchdog_type:
        vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
        vmxml.remove_all_device_by_type('watchdog')

        device_xml_file = Watchdog()
        device_xml_file.update({"alias": {"name": device_alias}})
        device_xml_file.setup_attrs(**watchdog_dict)
        vmxml.devices = vmxml.devices.append(device_xml_file)
        vmxml.xmltreefile.write()
        vmxml.sync()

        vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
        logging.debug('The vmxml after attached watchdog is:%s', vmxml)

        if not vm.is_alive():
            vm.start()
        vm.wait_for_login().close()

        attach_device = False

    try:
        dump_option = ""
        wait_event = True
        if "--config" in detach_options:
            dump_option = "--inactive"
            wait_event = False

        # Attach xml to domain
        if attach_device:
            logging.info("Attach xml is %s" %
                         process.run("cat %s" % device_xml.xml).stdout_text)
            virsh.attach_device(vm_name,
                                device_xml.xml,
                                flagstr=detach_options,
                                debug=True,
                                ignore_status=False)

        domxml_at = virsh.dumpxml(vm_name, dump_option,
                                  debug=True).stdout.strip()
        if detach_check_xml not in domxml_at:
            test.error("Can not find %s in domxml after attach" %
                       detach_check_xml)

        # Detach xml with alias
        result = virsh.detach_device_alias(vm_name,
                                           device_alias,
                                           detach_options,
                                           wait_for_event=wait_event,
                                           event_timeout=20,
                                           debug=True)
        libvirt.check_exit_status(result)
        if not utils_misc.wait_for(
                check_detached_xml_noexist,
                60,
                step=2,
                text="Repeatedly search guest dumpxml with detached xml"):
            test.fail("Still can find %s in domxml" % detach_check_xml)
    finally:
        backup_xml.sync()
        if hostdev_type == "scsi":
            libvirt.delete_scsi_disk()