Пример #1
0
def run(test, params, env):
    """
    Test multiple disks attachment.

    1.Prepare test environment,destroy or suspend a VM.
    2.Prepare disk image.
    3.Edit disks xml and start the domain.
    4.Perform test operation.
    5.Recover test environment.
    6.Confirm the test result.
    """
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    virsh_dargs = {'debug': True, 'ignore_status': True}
    gluster_server_name = params.get("gluster_server_name")
    # If gluster_server is specified from config file, just use this gluster server.
    if 'EXAMPLE' not in gluster_server_name:
        params.update({'gluster_server_ip': gluster_server_name})

    def prepare_gluster_disk(disk_img, disk_format):
        """
        Setup glusterfs and prepare disk image.
        """
        # Get the image path
        image_source = vm.get_first_disk_devices()['source']

        # Setup gluster
        host_ip = gluster.setup_or_cleanup_gluster(True,
                                                   brick_path=brick_path,
                                                   **params)
        logging.debug("host ip: %s ", host_ip)
        image_info = utils_misc.get_image_info(image_source)
        image_dest = "/mnt/%s" % disk_img

        if image_info["format"] == disk_format:
            disk_cmd = ("cp -f %s %s" % (image_source, image_dest))
        else:
            # Convert the disk format
            disk_cmd = (
                "qemu-img convert -f %s -O %s %s %s" %
                (image_info["format"], disk_format, image_source, image_dest))

        # Mount the gluster disk and create the image.
        process.run("mount -t glusterfs %s:%s /mnt && "
                    "%s && chmod a+rw /mnt/%s && umount /mnt" %
                    (host_ip, vol_name, disk_cmd, disk_img),
                    shell=True)

        return host_ip

    def build_disk_xml(disk_img, disk_format, host_ip):
        """
        Try to rebuild disk xml
        """
        if default_pool:
            disk_xml = Disk(type_name="file")
        else:
            disk_xml = Disk(type_name="network")
        disk_xml.device = "disk"
        driver_dict = {"name": "qemu", "type": disk_format, "cache": "none"}
        if driver_iothread:
            driver_dict.update({"iothread": driver_iothread})
        disk_xml.driver = driver_dict
        disk_xml.target = {"dev": "vdb", "bus": "virtio"}
        if default_pool:
            utils_misc.mount("%s:%s" % (host_ip, vol_name), default_pool,
                             "glusterfs")
            process.run("setsebool virt_use_fusefs on", shell=True)
            source_dict = {"file": "%s/%s" % (default_pool, disk_img)}
            disk_xml.source = disk_xml.new_disk_source(
                **{"attrs": source_dict})
        else:
            source_dict = {
                "protocol": "gluster",
                "name": "%s/%s" % (vol_name, disk_img)
            }
            host_dict = [{"name": host_ip, "port": "24007"}]
            # If mutiple_hosts is True, attempt to add multiple hosts.
            if multiple_hosts:
                host_dict.append({
                    "name": params.get("dummy_host1"),
                    "port": "24007"
                })
                host_dict.append({
                    "name": params.get("dummy_host2"),
                    "port": "24007"
                })
            if transport:
                host_dict[0]['transport'] = transport
            disk_xml.source = disk_xml.new_disk_source(**{
                "attrs": source_dict,
                "hosts": host_dict
            })
        return disk_xml

    def test_pmsuspend(vm_name):
        """
        Test pmsuspend command.
        """
        if vm.is_dead():
            vm.start()
            vm.wait_for_login()
        # Create swap partition if nessesary.
        if not vm.has_swap():
            swap_path = os.path.join(data_dir.get_tmp_dir(), 'swap.img')
            vm.create_swap_partition(swap_path)
        ret = virsh.dompmsuspend(vm_name, "disk", **virsh_dargs)
        libvirt.check_exit_status(ret)
        # wait for vm to shutdown

        if not utils_misc.wait_for(lambda: vm.state() == "shut off", 60):
            test.fail("vm is still alive after S4 operation")

        # Wait for vm and qemu-ga service to start
        vm.start()
        # Prepare guest agent and start guest
        try:
            vm.prepare_guest_agent()
        except (remote.LoginError, virt_vm.VMError) as detail:
            test.fail("failed to prepare agent:\n%s" % detail)

        #TODO This step may hang for rhel6 guest
        ret = virsh.dompmsuspend(vm_name, "mem", **virsh_dargs)
        libvirt.check_exit_status(ret)

        # Check vm state
        if not utils_misc.wait_for(lambda: vm.state() == "pmsuspended", 60):
            test.fail("vm isn't suspended after S3 operation")

        ret = virsh.dompmwakeup(vm_name, **virsh_dargs)
        libvirt.check_exit_status(ret)
        if not vm.is_alive():
            test.fail("vm is not alive after dompmwakeup")

    # Disk specific attributes.
    pm_enabled = "yes" == params.get("pm_enabled", "no")
    gluster_disk = "yes" == params.get("gluster_disk", "no")
    disk_format = params.get("disk_format", "qcow2")
    vol_name = params.get("vol_name")
    transport = params.get("transport", "")
    default_pool = params.get("default_pool", "")
    pool_name = params.get("pool_name")
    driver_iothread = params.get("driver_iothread")
    dom_iothreads = params.get("dom_iothreads")
    brick_path = os.path.join(test.virtdir, pool_name)
    test_qemu_cmd = "yes" == params.get("test_qemu_cmd", "no")

    # Gluster server multiple hosts flag.
    multiple_hosts = "yes" == params.get("multiple_hosts", "no")

    pre_vm_state = params.get("pre_vm_state", "running")

    # Destroy VM first.
    if vm.is_alive():
        vm.destroy(gracefully=False)

    # Back up xml file.
    vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    vmxml = vmxml_backup.copy()
    mnt_src = ""

    # This is brought by new feature:block-dev
    if transport == "rdma":
        test.cancel("transport protocol 'rdma' is not yet supported")
    try:
        # Build new vm xml.
        if pm_enabled:
            vm_xml.VMXML.set_pm_suspend(vm_name)
            vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm.name)
            logging.debug("Attempting to set guest agent channel")
            vmxml.set_agent_channel()
            vmxml.sync()

        if gluster_disk:
            # Setup glusterfs and disk xml.
            disk_img = "gluster.%s" % disk_format
            host_ip = prepare_gluster_disk(disk_img, disk_format)
            mnt_src = "%s:%s" % (host_ip, vol_name)
            global custom_disk
            custom_disk = build_disk_xml(disk_img, disk_format, host_ip)

        start_vm = "yes" == params.get("start_vm", "yes")

        # set domain options
        if dom_iothreads:
            try:
                vmxml.iothreads = int(dom_iothreads)
                vmxml.sync()
            except ValueError:
                # 'iothreads' may not invalid number in negative tests
                logging.debug("Can't convert '%s' to integer type",
                              dom_iothreads)
        if default_pool:
            disks_dev = vmxml.get_devices(device_type="disk")
            for disk in disks_dev:
                vmxml.del_device(disk)
            vmxml.sync()

        # If hot plug, start VM first, otherwise stop VM if running.
        if start_vm:
            if vm.is_dead():
                vm.start()
        else:
            if not vm.is_dead():
                vm.destroy()

        # If gluster_disk is True, use attach_device.
        attach_option = params.get("attach_option", "")
        if gluster_disk:
            cmd_result = virsh.attach_device(domainarg=vm_name,
                                             filearg=custom_disk.xml,
                                             flagstr=attach_option,
                                             dargs=virsh_dargs,
                                             debug=True)
            libvirt.check_exit_status(cmd_result)

        # Turn VM into certain state.
        if pre_vm_state == "running":
            logging.info("Starting %s...", vm_name)
            if vm.is_dead():
                vm.start()
        elif pre_vm_state == "transient":
            logging.info("Creating %s...", vm_name)
            vmxml_for_test = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
            vm.undefine()
            if virsh.create(vmxml_for_test.xml, **virsh_dargs).exit_status:
                vmxml_backup.define()
                test.skip("can't create the domain")

        # Run the tests.
        if pm_enabled:
            # Makesure the guest agent is started
            try:
                vm.prepare_guest_agent()
            except (remote.LoginError, virt_vm.VMError) as detail:
                test.fail("failed to prepare agent: %s" % detail)
            # Run dompmsuspend command.
            test_pmsuspend(vm_name)

        # After block-dev introduced in libvirt 6.0.0 afterwards, gluster+%s.*format information is not provided from qemu output
        if libvirt_version.version_compare(6, 0, 0):
            test_qemu_cmd = False

        if test_qemu_cmd:
            # Check qemu-kvm command line
            cmd = ("ps -ef | grep %s | grep -v grep " % vm_name)
            if transport == "rdma":
                cmd += " | grep gluster+%s.*format=%s" % (transport,
                                                          disk_format)
            else:
                cmd += " | grep gluster.*format=%s" % disk_format
            if driver_iothread:
                cmd += " | grep iothread=iothread%s" % driver_iothread
            if process.run(cmd, ignore_status=True, shell=True).exit_status:
                test.fail("Can't see gluster option '%s' "
                          "in command line" % cmd)
        # Detach hot plugged device.
        if start_vm and not default_pool:
            if gluster_disk:
                ret = virsh.detach_device(vm_name,
                                          custom_disk.xml,
                                          flagstr=attach_option,
                                          dargs=virsh_dargs)
                libvirt.check_exit_status(ret)

    finally:
        # Recover VM.
        if vm.is_alive():
            vm.destroy(gracefully=False)
        logging.info("Restoring vm...")
        vmxml_backup.sync()
        if utils_misc.is_mounted(mnt_src,
                                 default_pool,
                                 'fuse.glusterfs',
                                 verbose=True):
            process.run("umount %s" % default_pool,
                        ignore_status=True,
                        shell=True)

        if gluster_disk:
            gluster.setup_or_cleanup_gluster(False,
                                             brick_path=brick_path,
                                             **params)
Пример #2
0
def run(test, params, env):
    """
    Test command: virt-admin server-update-tls.

    1) when tls related files changed, notify server to update TLS related files
        online, without restart daemon
    """
    def add_remote_firewall_port(port, params):
        """
        Add the port on remote host

        :param port: port to add
        :param params: Dictionary with the test parameters
        """
        server_ip = params.get("server_ip")
        server_user = params.get("server_user")
        server_pwd = params.get("server_pwd")
        remote_session = remote.wait_for_login('ssh', server_ip, '22',
                                               server_user, server_pwd,
                                               r"[\#\$]\s*$")
        firewall_cmd = utils_iptables.Firewall_cmd(remote_session)
        firewall_cmd.add_port(port, 'tcp', permanent=True)
        remote_session.close()

    def remove_remote_firewall_port(port, params):
        """
        Remove the port on remote host

        :param port: port to remove
        :param params: Dictionary with the test parameters
        """
        server_ip = params.get("server_ip")
        server_user = params.get("server_user")
        server_pwd = params.get("server_pwd")
        remote_session = remote.wait_for_login('ssh', server_ip, '22',
                                               server_user, server_pwd,
                                               r"[\#\$]\s*$")
        firewall_cmd = utils_iptables.Firewall_cmd(remote_session)
        firewall_cmd.remove_port(port, 'tcp', permanent=True)
        remote_session.close()

    def update_server_pem(cert_saved_dir, remote_libvirt_pki_dir):
        """
        Update the server info and re-build servercert

        :param cert_saved_dir: The directory where cert files are saved
        :param remote_libvirt_pki_dir: Directory to store pki on remote
        """
        logging.debug("Update serverinfo")
        serverinfo = os.path.join(cert_saved_dir, "server.info")
        with open(os.path.join(cert_saved_dir, "server.info"), "r") as f1:
            lines = f1.readlines()
        with open(os.path.join(cert_saved_dir, "server2.info"), "w") as f2:
            for line in lines:
                if fake_ip in line:
                    line = line.replace(fake_ip, server_ip)
                f2.write(line)

        cmd = ("certtool --generate-certificate --load-privkey "
               "{0}/serverkey.pem --load-ca-certificate {0}/cacert.pem "
               "--load-ca-privkey {0}/cakey.pem --template {0}/server2.info "
               "--outfile {0}/servercert.pem".format(cert_saved_dir))
        servercert_pem = os.path.join(cert_saved_dir, "servercert.pem")
        process.run(cmd, shell=True, verbose=True)
        remote.copy_files_to(server_ip, 'scp', server_user, server_pwd, '22',
                             servercert_pem, remote_libvirt_pki_dir)

    server_ip = params["server_ip"] = params.get("remote_ip")
    server_user = params["server_user"] = params.get("remote_user", "root")
    server_pwd = params["server_pwd"] = params.get("remote_pwd")
    client_ip = params["client_ip"] = params.get("local_ip")
    client_pwd = params["client_pwd"] = params.get("local_pwd")
    tls_port = params.get("tls_port", "16514")
    uri = "qemu+tls://%s:%s/system" % (server_ip, tls_port)

    remote_virt_dargs = {
        'remote_ip': server_ip,
        'remote_user': server_user,
        'remote_pwd': server_pwd,
        'unprivileged_user': None,
        'ssh_remote_auth': True
    }
    tls_obj = None

    if not libvirt_version.version_compare(6, 2, 0):
        test.cancel("This libvirt version doesn't support "
                    "virt-admin server-update-tls.")
    remote_session = remote.wait_for_login('ssh', server_ip, '22', server_user,
                                           server_pwd, r"[\#\$]\s*$")
    remote_daemon = utils_libvirtd.Libvirtd("virtproxyd",
                                            session=remote_session)
    if not remote_daemon.is_running():
        remote_daemon.start()
    remote_session.close()

    try:
        vp = virt_admin.VirtadminPersistent(**remote_virt_dargs)

        add_remote_firewall_port(tls_port, params)

        # Generate a fake ip for testing
        repl = str(int(server_ip.strip().split('.')[-1]) + 1 % 255)
        fake_ip = re.sub("([0-9]+)$", repl, server_ip)
        params.update({"server_info_ip": fake_ip})

        tls_obj = TLSConnection(params)
        tls_obj.conn_setup()
        tls_obj.auto_recover = True

        # Connection should fail because TLS is set incorrectly
        ret, output = libvirt.connect_libvirtd(uri)
        if ret:
            test.fail(
                "Connection should fail but succeed. ret: {}, output: {}".
                format(ret, output))
        if "authentication failed" not in output:
            test.fail(
                "Unablee to find the expected error message. output: %s" %
                output)

        tmp_dir = tls_obj.tmp_dir
        remote_libvirt_pki_dir = tls_obj.libvirt_pki_dir
        update_server_pem(tmp_dir, remote_libvirt_pki_dir)

        serv_name = virt_admin.check_server_name()
        logging.debug("service name: %s", serv_name)
        result = vp.server_update_tls(serv_name, debug=True)
        libvirt.check_exit_status(result)

        # Re-connect to the server
        ret, output = libvirt.connect_libvirtd(uri)
        if not ret:
            test.fail("Connection fails, ret: {}, output: {}".format(
                ret, output))
    finally:
        logging.info("Recover test environment")
        remove_remote_firewall_port(tls_port, params)
Пример #3
0
def run(test, params, env):
    """
    Test guest numa setting
    """
    def replace_qemu_cmdline(cmdline_list):
        """
        Replace the expected qemu command line for new machine type

        :param cmdline_list: The list for expected qemu command lines
        :return: The list contains the updated qemu command lines if any
        """
        os_xml = getattr(vmxml, "os")
        machine_ver = getattr(os_xml, 'machine')
        if (machine_ver.startswith("pc-q35-rhel")
                and machine_ver > 'pc-q35-rhel8.2.0'
                and libvirt_version.version_compare(6, 4, 0)):
            # Replace 'node,nodeid=0,cpus=0-1,mem=512' with
            # 'node,nodeid=0,cpus=0-1,memdev=ram-node0'
            # Replace 'node,nodeid=1,cpus=2-3,mem=512' with
            # 'node,nodeid=1,cpus=2-3,memdev=ram-node1'
            for cmd in cmdline_list:
                line = cmd['cmdline']
                try:
                    node = line.split(',')[1][-1]
                    cmd['cmdline'] = line.replace(
                        'mem=512', 'memdev=ram-node{}'.format(node))
                # We can skip replacing, when the cmdline parameter is empty.
                except IndexError:
                    pass

        return cmdline_list

    host_numa_node = utils_misc.NumaInfo()
    node_list = host_numa_node.online_nodes
    arch = platform.machine()
    dynamic_node_replacement(params, host_numa_node, test)
    if 'ppc64' in arch:
        try:
            ppc_memory_nodeset = ""
            nodes = params['memory_nodeset']
            if '-' in nodes:
                for n in range(int(nodes.split('-')[0]),
                               int(nodes.split('-')[1])):
                    ppc_memory_nodeset += str(node_list[n]) + ','
                ppc_memory_nodeset += str(node_list[int(nodes.split('-')[1])])
            else:
                node_lst = nodes.split(',')
                for n in range(len(node_lst) - 1):
                    ppc_memory_nodeset += str(node_list[int(
                        node_lst[n])]) + ','
                ppc_memory_nodeset += str(node_list[int(node_lst[-1])])
            params['memory_nodeset'] = ppc_memory_nodeset
        except IndexError:
            test.cancel("No of numas in config does not match with no of "
                        "online numas in system")
        except utils_params.ParamNotFound:
            pass
        pkeys = ('memnode_nodeset', 'page_nodenum')
        for pkey in pkeys:
            for key in params.keys():
                if pkey in key:
                    params[key] = str(node_list[int(params[key])])
        # Modify qemu command line
        try:
            if params['qemu_cmdline_mem_backend_1']:
                memory_nodeset = sorted(params['memory_nodeset'].split(','))
                if len(memory_nodeset) > 1:
                    if int(memory_nodeset[1]) - int(memory_nodeset[0]) == 1:
                        qemu_cmdline = "memory-backend-ram,.*?id=ram-node1," \
                                       ".*?host-nodes=%s-%s,policy=bind" % \
                                       (memory_nodeset[0], memory_nodeset[1])
                    else:
                        qemu_cmdline = "memory-backend-ram,.*?id=ram-node1," \
                                       ".*?host-nodes=%s,.*?host-nodes=%s,policy=bind" % \
                                       (memory_nodeset[0], memory_nodeset[1])
                    params['qemu_cmdline_mem_backend_1'] = qemu_cmdline
        except utils_params.ParamNotFound:
            pass
        try:
            if params['qemu_cmdline_mem_backend_0']:
                qemu_cmdline = params['qemu_cmdline_mem_backend_0']
                params['qemu_cmdline_mem_backend_0'] = qemu_cmdline.replace(
                    ".*?host-nodes=1",
                    ".*?host-nodes=%s" % params['memnode_nodeset_0'])
        except utils_params.ParamNotFound:
            pass
    vcpu_num = int(params.get("vcpu_num", 2))
    max_mem = int(params.get("max_mem", 1048576))
    max_mem_unit = params.get("max_mem_unit", 'KiB')
    vcpu_placement = params.get("vcpu_placement", 'static')
    bug_url = params.get("bug_url", "")
    expect_cpus = params.get('expect_cpus')
    status_error = "yes" == params.get("status_error", "no")
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    backup_xml = libvirt_xml.VMXML.new_from_dumpxml(vm_name)
    mode_dict = {
        'strict': 'bind',
        'preferred': 'prefer',
        'interleave': 'interleave'
    }

    cpu_num = cpu.get_cpu_info().get('CPU(s)')
    if vcpu_num > int(cpu_num):
        test.cancel('Number of vcpus(%s) is larger than number of '
                    'cpus on host(%s).' % (vcpu_num, cpu_num))

    # Prepare numatune memory parameter dict and list
    mem_tuple = ('memory_mode', 'memory_placement', 'memory_nodeset')
    numa_memory = {}
    for mem_param in mem_tuple:
        value = params.get(mem_param)
        if value:
            numa_memory[mem_param.split('_')[1]] = value

    memnode_tuple = ('memnode_cellid', 'memnode_mode', 'memnode_nodeset')
    numa_memnode = handle_param(memnode_tuple, params)

    if numa_memnode:
        if not libvirt_version.version_compare(1, 2, 7):
            test.cancel("Setting hugepages more specifically per "
                        "numa node not supported on current "
                        "version")

    # Prepare cpu numa cell parameter
    topology = {}
    topo_tuple = ('sockets', 'cores', 'threads')
    for key in topo_tuple:
        if params.get(key):
            topology[key] = params.get(key)

    cell_tuple = ('cell_id', 'cell_cpus', 'cell_memory')
    numa_cell = handle_param(cell_tuple, params)

    # Prepare qemu cmdline check parameter
    cmdline_tuple = ("qemu_cmdline", )
    cmdline_list = handle_param(cmdline_tuple, params)

    # Prepare hugepages parameter
    backup_list = []
    page_tuple = ('vmpage_size', 'vmpage_unit', 'vmpage_nodeset')
    page_list = handle_param(page_tuple, params)
    nr_pagesize_total = params.get("nr_pagesize_total")
    deallocate = False

    if page_list:
        if not libvirt_version.version_compare(1, 2, 5):
            test.cancel("Setting hugepages more specifically per "
                        "numa node not supported on current "
                        "version")

    hp_cl = test_setup.HugePageConfig(params)
    supported_hp_size = hp_cl.get_multi_supported_hugepage_size()
    mount_path = []
    qemu_conf = utils_config.LibvirtQemuConfig()
    libvirtd = utils_libvirtd.Libvirtd()
    qemu_conf_restore = False

    def _update_qemu_conf():
        """
        Mount hugepage path, update qemu conf then restart libvirtd
        """
        size_dict = {'2048': '2M', '1048576': '1G', '16384': '16M'}
        for page in page_list:
            if page['size'] not in supported_hp_size:
                test.cancel("Hugepage size [%s] isn't supported, "
                            "please verify kernel cmdline configuration." %
                            page['size'])
            m_path = "/dev/hugepages%s" % size_dict[page['size']]
            hp_cl.hugepage_size = int(page['size'])
            hp_cl.hugepage_path = m_path
            hp_cl.mount_hugepage_fs()
            mount_path.append(m_path)
        if mount_path:
            qemu_conf.hugetlbfs_mount = mount_path
            libvirtd.restart()

    try:
        # Get host numa node list
        logging.debug("host node list is %s", node_list)
        used_node = []
        if numa_memory.get('nodeset'):
            used_node += cpu.cpus_parser(numa_memory['nodeset'])
        if numa_memnode:
            for i in numa_memnode:
                used_node += cpu.cpus_parser(i['nodeset'])
        if page_list:
            host_page_tuple = ("hugepage_size", "page_num", "page_nodenum")
            h_list = handle_param(host_page_tuple, params)
            h_nodenum = [
                h_list[p_size]['nodenum'] for p_size in range(len(h_list))
            ]
            for i in h_nodenum:
                used_node += cpu.cpus_parser(i)
        if used_node and not status_error:
            logging.debug("set node list is %s", used_node)
            used_node = list(set(used_node))
            for i in used_node:
                if i not in node_list:
                    test.cancel("%s in nodeset out of range" % i)
                mem_size = host_numa_node.read_from_node_meminfo(i, 'MemTotal')
                logging.debug("the memory total in the node %s is %s", i,
                              mem_size)
                if not int(mem_size):
                    test.cancel("node %s memory is empty" % i)

        # set hugepage with qemu.conf and mount path
        _update_qemu_conf()
        qemu_conf_restore = True

        # set hugepage with total number or per-node number
        if nr_pagesize_total:
            # Only set total 2M size huge page number as total 1G size runtime
            # update not supported now.
            deallocate = True
            hp_cl.target_hugepages = int(nr_pagesize_total)
            hp_cl.set_hugepages()
        if page_list:
            hp_size = [h_list[p_size]['size'] for p_size in range(len(h_list))]
            multi_hp_size = hp_cl.get_multi_supported_hugepage_size()
            for size in hp_size:
                if size not in multi_hp_size:
                    test.cancel("The hugepage size %s not "
                                "supported or not configured under"
                                " current running kernel." % size)
            # backup node page setting and set new value
            for i in h_list:
                node_val = hp_cl.get_node_num_huge_pages(
                    i['nodenum'], i['size'])
                # set hugpege per node if current value not satisfied
                # kernel 1G hugepage runtime number update is supported now
                if int(i['num']) > node_val:
                    node_dict = i.copy()
                    node_dict['num'] = node_val
                    backup_list.append(node_dict)
                    hp_cl.set_node_num_huge_pages(i['num'], i['nodenum'],
                                                  i['size'])
                    node_val_after_set = hp_cl.get_node_num_huge_pages(
                        i['nodenum'], i['size'])
                    if node_val_after_set < int(i['num']):
                        test.cancel("There is not enough memory to allocate.")

        vmxml = libvirt_xml.VMXML.new_from_dumpxml(vm_name)
        vmxml.vcpu = vcpu_num
        vmxml.max_mem = max_mem
        vmxml.max_mem_unit = max_mem_unit
        vmxml.current_mem = max_mem
        vmxml.current_mem_unit = max_mem_unit

        # numatune setting
        if numa_memnode:
            vmxml.numa_memory = numa_memory
            vmxml.numa_memnode = numa_memnode
            del vmxml.numa_memory
        if numa_memory:
            vmxml.numa_memory = numa_memory

        # vcpu placement setting
        vmxml.placement = vcpu_placement

        # guest numa cpu setting
        vmcpuxml = libvirt_xml.vm_xml.VMCPUXML()
        vmcpuxml.xml = "<cpu mode='host-model'><numa/></cpu>"
        if topology:
            vmcpuxml.topology = topology
        logging.debug(vmcpuxml.numa_cell)
        vmcpuxml.numa_cell = vmcpuxml.dicts_to_cells(numa_cell)
        logging.debug(vmcpuxml.numa_cell)
        vmxml.cpu = vmcpuxml

        # hugepages setting
        if page_list:
            membacking = libvirt_xml.vm_xml.VMMemBackingXML()
            hugepages = libvirt_xml.vm_xml.VMHugepagesXML()
            pagexml_list = []
            for i in range(len(page_list)):
                pagexml = hugepages.PageXML()
                pagexml.update(page_list[i])
                pagexml_list.append(pagexml)
            hugepages.pages = pagexml_list
            membacking.hugepages = hugepages
            vmxml.mb = membacking

        logging.debug("vm xml is %s", vmxml)
        vmxml.sync()

        try:
            vm.start()
            session = vm.wait_for_login()
            vmxml_new = libvirt_xml.VMXML.new_from_dumpxml(vm_name)
            logging.debug("vm xml after start is %s", vmxml_new)

        except virt_vm.VMStartError as e:
            # Starting VM failed.
            if status_error:
                return
            else:
                test.fail("Test failed in positive case.\n error:"
                          " %s\n%s" % (e, bug_url))

        vm_pid = vm.get_pid()
        # numa hugepage check
        if page_list:
            with open("/proc/%s/numa_maps" % vm_pid) as numa_maps:
                numa_map_info = numa_maps.read()
            hugepage_info = re.findall(".*file=\S*hugepages.*", numa_map_info)
            if not hugepage_info:
                test.fail("Can't find hugepages usage info in vm " "numa maps")
            else:
                logging.debug("The hugepage info in numa_maps is %s" %
                              hugepage_info)
                map_dict = {}
                usage_dict = {}
                node_pattern = r"\s(\S+):(\S+)\s.*ram-node(\d+).*\s"
                node_pattern += "N(\d+)=(\d+)"
                for map_info in hugepage_info:
                    for (mem_mode, mem_num, cell_num, host_node_num,
                         vm_page_num) in re.findall(node_pattern, map_info):
                        usage_dict[mem_mode] = cpu.cpus_parser(mem_num)
                        usage_dict[host_node_num] = vm_page_num
                        map_dict[cell_num] = usage_dict.copy()
                logging.debug("huagepage info in vm numa maps is %s", map_dict)
                memnode_dict = {}
                usage_dict = {}
                if numa_memnode:
                    for i in numa_memnode:
                        node = cpu.cpus_parser(i['nodeset'])
                        mode = mode_dict[i['mode']]
                        usage_dict[mode] = node
                        memnode_dict[i['cellid']] = usage_dict.copy()
                    logging.debug("memnode setting dict is %s", memnode_dict)
                    for k in list(memnode_dict.keys()):
                        for mk in list(memnode_dict[k].keys()):
                            if memnode_dict[k][mk] != map_dict[k][mk]:
                                test.fail("vm pid numa map dict %s"
                                          " not expected" % map_dict)

        # qemu command line check
        with open("/proc/%s/cmdline" % vm_pid) as f_cmdline:
            q_cmdline_list = f_cmdline.read().split("\x00")
        logging.debug("vm qemu cmdline list is %s" % q_cmdline_list)
        cmdline_list = replace_qemu_cmdline(cmdline_list)
        for cmd in cmdline_list:
            logging.debug("checking '%s' in qemu cmdline", cmd['cmdline'])
            p_found = False
            for q_cmd in q_cmdline_list:
                if re.search(cmd['cmdline'], q_cmd):
                    p_found = True
                    break
                else:
                    continue
            if not p_found:
                test.fail("%s not found in vm qemu cmdline" % cmd['cmdline'])

        # vm inside check
        vm_cpu_info = cpu.get_cpu_info(session)
        logging.debug("lscpu output dict in vm is %s", vm_cpu_info)
        session.close()
        node_num = int(vm_cpu_info["NUMA node(s)"])
        if node_num != len(numa_cell):
            test.fail("node number %s in vm is not expected" % node_num)
        for i in range(len(numa_cell)):
            cpu_str = vm_cpu_info["NUMA node%s CPU(s)" % i]
            vm_cpu_list = cpu.cpus_parser(cpu_str)
            cpu_list = cpu.cpus_parser(numa_cell[i]["cpus"])
            if i == 0 and expect_cpus:
                cpu_list = cpu.cpus_parser(expect_cpus)
            if vm_cpu_list != cpu_list:
                test.fail("vm node %s cpu list %s not expected" %
                          (i, vm_cpu_list))
        if topology:
            vm_topo_tuple = ("Socket(s)", "Core(s) per socket",
                             "Thread(s) per core")
            for i in range(len(topo_tuple)):
                topo_info = vm_cpu_info[vm_topo_tuple[i]]
                if topo_info != topology[topo_tuple[i]]:
                    test.fail("%s in vm topology not expected." %
                              topo_tuple[i])
    finally:
        if vm.is_alive():
            vm.destroy(gracefully=False)
        backup_xml.sync()
        if page_list:
            for i in backup_list:
                hp_cl.set_node_num_huge_pages(i['num'], i['nodenum'],
                                              i['size'])
        if deallocate:
            hp_cl.deallocate = deallocate
            hp_cl.cleanup()
        if qemu_conf_restore:
            qemu_conf.restore()
            libvirtd.restart()
            for mt_path in mount_path:
                try:
                    process.run("umount %s" % mt_path, shell=True)
                except process.CmdError:
                    logging.warning("umount %s failed" % mt_path)
def run(test, params, env):
    """
    Test network/interface function on 2 vms:

        - Test settings on 2 vms
        - Run ping check on 2 vms including pinging each other
        ...

    """
    vms = params.get('vms').split()
    vm_list = [env.get_vm(v_name) for v_name in vms]
    if len(vm_list) != 2:
        test.cancel('More or less than 2 vms is currently unsupported')

    feature = params.get('feature', '')
    case = params.get('case', '')
    check_ping = 'yes' == params.get('check_ping')
    expect_ping_host = 'yes' == params.get('expect_ping_host', 'no')
    expect_ping_out = 'yes' == params.get('expect_ping_out', 'no')
    expect_ping_vm = 'yes' == params.get('expect_ping_vm', 'no')
    out_ip = params.get('out_ip', 'www.redhat.com')
    live_update = 'yes' == params.get('live_update', 'no')
    set_all = 'yes' == params.get('set_all', 'no')

    rand_id = '_' + utils_misc.generate_random_string(3)
    bridge_name = params.get('bridge_name', 'test_br0') + rand_id
    iface_name = utils_net.get_net_if(state="UP")[0]
    test_net = 'net_isolated' + rand_id
    bridge_created = False

    vmxml_backup_list = []
    for vm_i in vm_list:
        vmxml_backup_list.append(
            vm_xml.VMXML.new_from_inactive_dumpxml(vm_i.name))

    try:
        # Test feature: port isolated
        if feature == 'port_isolated':
            if not libvirt_version.version_compare(6, 2, 0):
                test.cancel('Libvirt version should be'
                            ' > 6.2.0 to support port isolated')

            if case.startswith('set_iface'):
                create_bridge(bridge_name, iface_name)
                bridge_created = True
                iface_type = case.split('_')[-1]
                if iface_type == 'network':
                    net_dict = {
                        'net_forward': "{'mode': 'bridge'}",
                        'net_bridge': "{'name': '%s'}" % bridge_name
                    }
                    prepare_network(test_net, **net_dict)
                    updated_iface_dict = {
                        'type': iface_type,
                        'source': "{'network': '%s'}" % test_net,
                    }
                elif iface_type == 'bridge':
                    updated_iface_dict = {
                        'type': iface_type,
                        'source': "{'bridge': '%s'}" % bridge_name,
                    }
                else:
                    test.error('Unsupported iface type: %s' % iface_type)

                # Set 2 vms to isolated=yes or set one to 'yes', the other to 'no'
                isolated_settings = ['yes'] * 2 if set_all else ['yes', 'no']
                for i in (0, 1):
                    vm_i = vm_list[i]
                    new_iface_dict = dict(
                        list(updated_iface_dict.items()) +
                        [('port',
                          "{'isolated': '%s'}" % isolated_settings[i])])
                    libvirt.modify_vm_iface(vm_i.name, 'update_iface',
                                            new_iface_dict)
                    logging.debug(virsh.dumpxml(vm_i.name).stdout_text)

            if case == 'update_iface':
                if params.get('iface_port'):
                    iface_dict = {'port': params['iface_port']}
                    for vm_i in vm_list:
                        libvirt.modify_vm_iface(vm_i.name, 'update_iface',
                                                iface_dict)
                        logging.debug(virsh.dumpxml(vm_i.name).stdout_text)
                if live_update:
                    for vm_i in vm_list:
                        vm_i.start()

                # Test Update iface with new attrs
                new_iface_dict = {}
                if params.get('new_iface_port'):
                    new_iface_dict['port'] = params['new_iface_port']
                elif params.get('del_port') == 'yes':
                    new_iface_dict['del_port'] = True
                for vm_i in vm_list:
                    updated_iface = libvirt.modify_vm_iface(
                        vm_i.name, 'get_xml', new_iface_dict)
                    result = virsh.update_device(vm_i.name,
                                                 updated_iface,
                                                 debug=True)
                    libvirt.check_exit_status(result)

            if case == 'attach_iface':
                new_ifaces = {}
                for vm_i in vm_list:
                    # Create iface xml to be attached
                    new_iface = interface.Interface('network')
                    new_iface.xml = libvirt.modify_vm_iface(
                        vm_i.name, 'get_xml',
                        {'port': params.get('new_iface_port')})
                    new_ifaces[vm_i.name] = new_iface

                    # Remove current ifaces on vm
                    vmxml_i = vm_xml.VMXML.new_from_inactive_dumpxml(vm_i.name)
                    vmxml_i.remove_all_device_by_type('interface')
                    vmxml_i.sync()
                    logging.debug(virsh.dumpxml(vm_i.name).stdout_text)

                    # Start vm for hotplug
                    vm_i.start()
                    session = vm_i.wait_for_serial_login()

                    # Hotplug iface
                    virsh.attach_device(vm_i.name,
                                        new_iface.xml,
                                        debug=True,
                                        ignore_status=False)

                    # Wait a few seconds for interface to be fully attached
                    time.sleep(5)
                    ip_l_before = session.cmd_output('ip l')
                    logging.debug(ip_l_before)
                    session.close()

            if case == 'set_network':
                create_bridge(bridge_name, iface_name)
                bridge_created = True
                net_dict = {
                    'net_forward':
                    "{'mode': 'bridge'}",
                    'net_bridge':
                    "{'name': '%s'}" % bridge_name,
                    'net_port':
                    "{'isolated': '%s'}" % params.get('net_isolated', 'yes')
                }
                prepare_network(test_net, **net_dict)

                # Modify iface to connect to newly added network
                updated_iface_dict = {
                    'type': 'network',
                    'source': "{'network': '%s'}" % test_net
                }
                for vm_i in vm_list:
                    libvirt.modify_vm_iface(vm_i.name, 'update_iface',
                                            updated_iface_dict)
                    logging.debug(virsh.domiflist(vm_i.name).stdout_text)

        # Check ping result from vm session to host, outside, the other vm
        if check_ping:
            for vm_i in vm_list:
                if vm_i.is_dead():
                    vm_i.start()
            host_ip = utils_net.get_host_ip_address()
            ping_expect = {
                host_ip: expect_ping_host,
                out_ip: expect_ping_out,
            }

            # A map of vm session and vm's ip addr
            session_n_ip = {}
            for vm_i in vm_list:
                mac = vm_i.get_mac_address()
                sess = vm_i.wait_for_serial_login()
                vm_ip = utils_net.get_guest_ip_addr(sess, mac)
                session_n_ip[sess] = vm_ip
                logging.debug('Vm %s ip: %s', vm_i.name, vm_ip)

            # Check ping result from each vm's session
            for i in (0, 1):
                sess = list(session_n_ip.keys())[i]
                another_sess = list(session_n_ip.keys())[1 - i]
                ping_expect[session_n_ip[another_sess]] = expect_ping_vm
                if not ping_func(sess, **ping_expect):
                    test.fail('Ping check failed')
                # Remove the other session's ip from ping result, then the
                # next round of ping check will not do a ping check to the vm itself
                ping_expect.pop(session_n_ip[another_sess])

        # Some test steps after ping check
        if feature == 'port_isolated':
            if case == 'attach_iface':
                # Test detach of iface
                for vm_name_i in new_ifaces:
                    virsh.detach_device(vm_name_i,
                                        new_ifaces[vm_name_i].xml,
                                        wait_remove_event=True,
                                        debug=True,
                                        ignore_status=False)

                # Check whether iface successfully detached by checking 'ip l' output
                for vm_i in vm_list:
                    session = vm_i.wait_for_serial_login()
                    ip_l_after = session.cmd_output('ip l')
                    session.close()
                    if len(ip_l_before.splitlines()) == len(
                            ip_l_after.splitlines()):
                        test.fail(
                            'Output of "ip l" is not changed afte detach, '
                            'interface not successfully detached')
Пример #5
0
def run(test, params, env):
    """
    Test virsh interface related commands.

    (1) If using given exist interface for testing(eg. lo or ethX):
        1.1 Dumpxml for the interface(with --inactive option)
        1.2 Destroy the interface
        1.3 Undefine the interface
    (2) Define an interface from XML file
    (3) List interfaces with '--inactive' option
    (4) Start the interface
    (5) List interfaces with no option
    (6) Dumpxml for the interface
    (7) Get interface MAC address by interface name
    (8) Get interface name by interface MAC address
    (9) Delete interface if not use the exist interface for testing
        9.1 Destroy the interface
        9.2 Undefine the interface

    Caveat, this test may affect the host network, so using the loopback(lo)
    device by default. You can specify the interface which you want, but be
    careful.
    """

    iface_name = params.get("iface_name", "ENTER.BRIDGE.NAME")
    iface_xml = params.get("iface_xml")
    iface_type = params.get("iface_type", "ethernet")
    iface_pro = params.get("iface_pro", "")
    iface_eth = params.get("iface_eth", "")
    iface_tag = params.get("iface_tag", "0")
    if iface_type == "vlan":
        iface_name = iface_eth + "." + iface_tag
    iface_eth_using = "yes" == params.get("iface_eth_using", "no")
    ping_ip = params.get("ping_ip", "localhost")
    use_exist_iface = "yes" == params.get("use_exist_iface", "no")
    status_error = "yes" == params.get("status_error", "no")
    net_restart = "yes" == params.get("iface_net_restart", "no")
    list_dumpxml_acl = "yes" == params.get("list_dumpxml_acl", "no")
    if ping_ip.count("ENTER"):
        test.cancel("Please input a valid ip address")
    if iface_name.count("ENTER"):
        test.cancel("Please input a existing bridge/ethernet name")

    uri = params.get("virsh_uri")
    unprivileged_user = params.get('unprivileged_user', "EXAMPLE")
    if unprivileged_user:
        if unprivileged_user.count('EXAMPLE'):
            unprivileged_user = '******'

    if not libvirt_version.version_compare(1, 1, 1):
        if params.get('setup_libvirt_polkit') == 'yes':
            test.cancel("API acl test not supported in current"
                        " libvirt version.")

    virsh_dargs = {'debug': True}
    list_dumpxml_dargs = {'debug': True}
    if params.get('setup_libvirt_polkit') == 'yes':
        if not list_dumpxml_acl:
            virsh_dargs['uri'] = uri
            virsh_dargs['unprivileged_user'] = unprivileged_user
        else:
            list_dumpxml_dargs['uri'] = uri
            list_dumpxml_dargs['unprivileged_user'] = unprivileged_user
            list_dumpxml_dargs['ignore_status'] = False

    # acl api negative testing params
    write_save_status_error = "yes" == params.get("write_save_status_error",
                                                  "no")
    start_status_error = "yes" == params.get("start_status_error", "no")
    stop_status_error = "yes" == params.get("stop_status_error", "no")
    delete_status_error = "yes" == params.get("delete_status_error", "no")

    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    if vm:
        xml_bak = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    iface_script = NETWORK_SCRIPT + iface_name
    iface_script_bk = os.path.join(data_dir.get_tmp_dir(), "iface-%s.bk" % iface_name)
    net_bridge = utils_net.Bridge()
    if use_exist_iface:
        if iface_type == "bridge":
            if iface_name not in net_bridge.list_br():
                test.error("Bridge '%s' not exists" % iface_name)
            ifaces = net_bridge.get_structure()[iface_name]
            if len(ifaces) < 1:
                # In this situation, dhcp maybe cannot get ip address
                # Unless you use static, we'd better skip such case
                test.cancel("Bridge '%s' has no interface"
                            " bridged, perhaps cannot get"
                            " ipaddress" % iface_name)
    net_iface = utils_net.Interface(name=iface_name)
    iface_is_up = True
    list_option = "--all"
    if use_exist_iface:
        if not libvirt.check_iface(iface_name, "exists", "--all"):
            test.error("Interface '%s' not exists" % iface_name)
        iface_xml = os.path.join(data_dir.get_tmp_dir(), "iface.xml.tmp")
        iface_is_up = net_iface.is_up()
    else:
        # Note, if not use the interface which already exists, iface_name must
        # be equal to the value specified in XML file
        if libvirt.check_iface(iface_name, "exists", "--all"):
            test.error("Interface '%s' already exists" % iface_name)
        if not iface_xml:
            test.error("XML file is needed.")
        iface_xml = os.path.join(data_dir.get_tmp_dir(), iface_xml)
        create_xml_file(iface_xml, params)

    # Stop NetworkManager as which may conflict with virsh iface commands
    try:
        NM = utils_path.find_command("NetworkManager")
    except utils_path.CmdNotFoundError:
        logging.debug("No NetworkManager service.")
        NM = None
    NM_is_running = False
    if NM is not None:
        NM_service = service.Factory.create_service("NetworkManager")
        NM_is_running = NM_service.status()
        if NM_is_running:
            NM_service.stop()

    # run test cases
    try:
        if use_exist_iface:
            # back up the interface script
            process.run("cp %s %s" % (iface_script, iface_script_bk), shell=True)
            # step 1.1
            # dumpxml for interface
            if list_dumpxml_acl:
                virsh.iface_list(**list_dumpxml_dargs)
            xml = virsh.iface_dumpxml(iface_name, "--inactive",
                                      to_file=iface_xml,
                                      **list_dumpxml_dargs)
            # Step 1.2
            # Destroy interface
            if iface_is_up:
                result = virsh.iface_destroy(iface_name, **virsh_dargs)
                if (params.get('setup_libvirt_polkit') == 'yes' and
                        stop_status_error):
                    # acl_test negative test
                    libvirt.check_exit_status(result, stop_status_error)
                    virsh.iface_destroy(iface_name, debug=True)
                else:
                    libvirt.check_exit_status(result, status_error)

            # Step 1.3
            # Undefine interface
            result = virsh.iface_undefine(iface_name, **virsh_dargs)
            if (params.get('setup_libvirt_polkit') == 'yes' and
                    delete_status_error):
                # acl_test negative test
                libvirt.check_exit_status(result, delete_status_error)
                virsh.iface_undefine(iface_name, debug=True)
            else:
                libvirt.check_exit_status(result, status_error)
            if not status_error:
                if libvirt.check_iface(iface_name, "exists", list_option):
                    test.fail("%s is still present." % iface_name)

        # Step 2
        # Define interface
        result = virsh.iface_define(iface_xml, **virsh_dargs)
        if (params.get('setup_libvirt_polkit') == 'yes' and
                write_save_status_error):
            # acl_test negative test
            libvirt.check_exit_status(result, write_save_status_error)
            virsh.iface_define(iface_xml, debug=True)
        elif iface_type == "bond" and not ping_ip:
            libvirt.check_exit_status(result, True)
            return
        else:
            libvirt.check_exit_status(result, status_error)

        if net_restart:
            network = service.Factory.create_service("network")
            network.restart()

        # After network restart, (ethernet)interface will be started
        if (not net_restart and iface_type in ("bridge", "ethernet")) or\
           (not use_exist_iface and iface_type in ("vlan", "bond")):
            # Step 3
            # List inactive interfaces
            list_option = "--inactive"
            if not status_error:
                if not libvirt.check_iface(iface_name, "exists", list_option):
                    test.fail("Fail to find %s." % iface_name)

            # Step 4
            # Start interface
            result = virsh.iface_start(iface_name, **virsh_dargs)
            if (params.get('setup_libvirt_polkit') == 'yes' and
                    start_status_error):
                # acl_test negative test
                libvirt.check_exit_status(result, start_status_error)
                virsh.iface_start(iface_name, debug=True)
            elif (not net_restart and not use_exist_iface and
                    (iface_type == "ethernet" and iface_pro in ["", "dhcp"] or
                        iface_type == "bridge" and iface_pro == "dhcp")):
                libvirt.check_exit_status(result, True)
            else:
                libvirt.check_exit_status(result, status_error)
            if not status_error:
                iface_ip = net_iface.get_ip()
                ping_ip = ping_ip if not iface_ip else iface_ip
                if ping_ip:
                    if not libvirt.check_iface(iface_name, "ping", ping_ip):
                        test.fail("Ping %s fail." % ping_ip)

        # Step 5
        # List active interfaces
        if use_exist_iface or\
           (iface_pro != "dhcp" and iface_type == "bridge") or\
           (iface_eth_using and iface_type == "vlan"):
            list_option = ""
            if not status_error:
                if not libvirt.check_iface(iface_name, "exists", list_option):
                    test.fail("Fail to find %s in active "
                              "interface list" % iface_name)
            if vm:
                if vm.is_alive():
                    vm.destroy()
                iface_index = 0
                iface_mac_list = vm_xml.VMXML.get_iface_dev(vm_name)
                # Before test, detach all interfaces in guest
                for mac in iface_mac_list:
                    iface_info = vm_xml.VMXML.get_iface_by_mac(vm_name, mac)
                    type = iface_info.get('type')
                    virsh.detach_interface(vm_name,
                                           "--type %s --mac %s"
                                           " --config" % (type, mac))
                    # After detach interface, vm.virtnet also need update, the
                    # easy way is free these mac addresses before start VM
                    vm.free_mac_address(iface_index)
                    iface_index += 1
                virsh.attach_interface(vm_name,
                                       "--type %s --source %s"
                                       " --config" % (iface_type, iface_name))
                vm.start()
                try:
                    # Test if guest can be login
                    vm.wait_for_login()
                except remote.LoginError:
                    test.fail("Cannot login guest with %s" %
                              iface_name)

        # Step 6
        # Dumpxml for interface
        if list_dumpxml_acl:
            virsh.iface_list(**list_dumpxml_dargs)
        xml = virsh.iface_dumpxml(iface_name, "", to_file="",
                                  **list_dumpxml_dargs)
        logging.debug("Interface '%s' XML:\n%s", iface_name, xml)

        # Step 7
        # Get interface MAC address by name
        result = virsh.iface_mac(iface_name, debug=True)
        libvirt.check_exit_status(result, status_error)
        if not status_error and result.stdout.strip():
            if not libvirt.check_iface(iface_name, "mac",
                                       result.stdout.strip()):
                test.fail("Mac address check fail")

        # Step 8
        # Get interface name by MAC address
        # Bridge's Mac equal to bridged interface's mac
        if iface_type not in ("bridge", "vlan") and result.stdout.strip():
            iface_mac = net_iface.get_mac()
            result = virsh.iface_name(iface_mac, debug=True)
            libvirt.check_exit_status(result, status_error)

        # Step 9
        if not use_exist_iface:
            # Step 9.0
            # check if interface's state is active before destroy
            if libvirt.check_iface(iface_name, "state", "--all"):
                # Step 9.1
                # Destroy interface
                result = virsh.iface_destroy(iface_name, **virsh_dargs)
                if (params.get('setup_libvirt_polkit') == 'yes' and
                        stop_status_error):
                    # acl_test negative test
                    libvirt.check_exit_status(result, stop_status_error)
                    virsh.iface_destroy(iface_name, debug=True)
                elif (not net_restart and iface_type == "ethernet" and
                        iface_pro in ["", "dhcp"] or iface_type == "bridge" and
                        iface_pro == "dhcp"):
                    libvirt.check_exit_status(result, True)
                else:
                    libvirt.check_exit_status(result, status_error)

            # Step 9.2
            # Undefine interface
            result = virsh.iface_undefine(iface_name, **virsh_dargs)
            if (params.get('setup_libvirt_polkit') == 'yes' and
                    delete_status_error):
                # acl_test negative test
                libvirt.check_exit_status(result, delete_status_error)
                virsh.iface_undefine(iface_name, debug=True)
            else:
                libvirt.check_exit_status(result, status_error)
            list_option = "--all"
            if not status_error:
                if libvirt.check_iface(iface_name, "exists", list_option):
                    test.fail("%s is still present." % iface_name)
    finally:
        if os.path.exists(iface_xml):
            os.remove(iface_xml)
        if os.path.exists(iface_script):
            os.remove(iface_script)

        if use_exist_iface:
            if not os.path.exists(iface_script):
                process.run("mv %s %s" % (iface_script_bk, iface_script), shell=True)
            if iface_is_up and\
               not libvirt.check_iface(iface_name, "exists", ""):
                # Need reload script
                process.run("ifup %s" % iface_name, shell=True)
            elif not iface_is_up and libvirt.check_iface(iface_name,
                                                         "exists", ""):
                net_iface.down()
            if vm:
                xml_bak.sync()
        else:
            if libvirt.check_iface(iface_name, "exists", "--all"):
                # Remove the interface
                try:
                    utils_net.bring_down_ifname(iface_name)
                except utils_net.TAPBringDownError:
                    pass
            if iface_type == "bridge":
                if iface_name in net_bridge.list_br():
                    try:
                        net_bridge.del_bridge(iface_name)
                    except IOError:
                        pass
        if NM_is_running:
            NM_service.start()
Пример #6
0
def run(test, params, env):
    """
    Test command: virsh domxml-to-native.

    Convert domain XML config to a native guest configuration format.
    1.Prepare test environment.
    2.When the libvirtd == "off", stop the libvirtd service.
    3.Perform virsh domxml-from-native operation.
    4.Recover test environment.
    5.Confirm the test result.
    """
    vm_name = params.get("main_vm")
    vm = env.get_vm(params["main_vm"])
    vm.verify_alive()

    def buildcmd(arglist):
        """
        Return a list of arguments of qemu command.

        Return a list based on the input string where each list element
        is put together with care to pair up options with their argument
        rather than being on separate lines.  Thus rather than having
        "-option" "argument" in separate list elements, they will be in
        one element "-option argument". Take care to note the argument to
        an option may not be required. This will make it easier to determine
        what is causing the failure when printing error messages.
        """
        # First separate everything by the first space into a list
        elems = arglist.split('\x20')

        # Peruse the list to build up a formatted output retlist
        retlist = []
        i = 0
        skip = False
        for e in elems:
            # If 'skip' is True, then we've appended an option and argument
            if skip:
                skip = False
                i = i + 1
                continue

            # Need a peek at the next element
            enext = elems[i + 1]

            # If current and next element starts with "-", then the
            # is not an argument to the current, thus we just append.
            # Same for anything we find that doesn't start with a "-"
            if (e[0] == '-' and enext[0] == '-') or e[0] != '-':
                retlist.append(e)
            else:
                # -blockdev '{"driver":"file",...,"discard":"unmap"}' should be
                # turned into
                # -blockdev {"driver":"file",...,"discard":"unmap"} in order to
                # match the qemu command line format
                if e in ['-blockdev', '-object']:
                    enext = enext.strip("'")
                # Append this and the next and set our skip flag
                retlist.append(e + " " + enext)
                skip = True
            i = i + 1

        # Now build a list where the
        return retlist

    def filtlist(arglist):
        """
        Return a filtered list of arguments.

        Walk through the supplied list to filter out things that will be
        known to be different depending on the running environment.
        """
        retlist = []
        for arg in arglist:
            if re.search("mode=readline", arg):
                continue
            elif re.search("mac=", arg):
                continue
            elif re.search("127.0.0.1:", arg):
                continue
            elif re.search("tap", arg):
                continue
            # Upstream libvirt commit id 'e8400564':
            # XMLToNative: Don't show -S
            elif re.search("-S", arg):
                continue
            elif re.search("socket,id=", arg):
                continue
            elif re.search("secret,id=", arg):
                continue
            elif re.search("-cpu", arg):
                continue
            # libvirt commit id 'd96fb5cb'
            elif re.search("master-key.aes", arg):
                continue
            retlist.append(arg)

        return retlist

    def prepend_expected_env_vars(conv_arg, cmdline):
        """
        Prepend the various environment variables that will be in
        the conv_arg, but not in the actual command

        :param conv_arg : Converted information
        :param cmdline: Command line qemu has been called with
        :return: cmdline prepended by expected environment variable values
        """
        expected_env_vars = ['LC_ALL', 'PATH']
        if not libvirt_version.version_compare(7, 3, 0):
            expected_env_vars += ['QEMU_AUDIO_DRV']
        if libvirt_version.version_compare(5, 2, 0):
            expected_env_vars += [
                'HOME',
                'XDG_DATA_HOME',
                'XDG_CACHE_HOME',
                'XDG_CONFIG_HOME',
            ]

        valmatcher = '.[^\\s]+\\s'

        def matchf(x):
            return re.search(x + valmatcher, conv_arg).group(0)

        return "".join(map(matchf, expected_env_vars)) + cmdline

    def compare(conv_arg):
        """
        Compare converted information with vm's information.

        :param conv_arg : Converted information.
        :return: True if converted information has no different from
                 vm's information.
        """
        pid = vm.get_pid()
        cmdline_tmp = process.run("cat -v /proc/%d/cmdline" % pid,
                                  shell=True).stdout_text

        # Output has a trailing '^@' which gets converted into an empty
        # element when splitting by '\x20', so strip it on the end.
        cmdline = re.sub(r'\^@', ' ', cmdline_tmp).strip(' ')

        # Fedora 19 replaces the /usr/bin/qemu-kvm with the string
        # "/usr/bin/qemu-system-x86_64 -machine accel=kvm", so let's
        # do the same if we find "/usr/bin/qemu-kvm" in the incoming
        # argument list and we find "qemu-system-x86_64 -machine accel=kvm"
        # in the running guest's cmdline
        # ubuntu uses /usr/bin/kvm as qemu binary
        # RHEL uses /usr/libexec/qemu-kvm as qemu binary
        qemu_bin = [
            "/usr/bin/qemu-kvm", "/usr/bin/kvm", "/usr/libexec/qemu-kvm"
        ]
        arch_bin = [
            "/usr/bin/qemu-system-x86_64 -machine accel=kvm",
            "/usr/bin/qemu-system-ppc64 -machine accel=kvm",
            "qemu-system-ppc64 -enable-kvm"
        ]
        qemu_kvm_bin = ""
        for each_bin in qemu_bin:
            if conv_arg.find(each_bin) != -1:
                qemu_kvm_bin = each_bin
        if qemu_kvm_bin:
            for arch in arch_bin:
                if cmdline.find(arch) != -1:
                    cmdline = re.sub(arch, qemu_kvm_bin, cmdline)
        else:
            logging.warning("qemu-kvm binary is not identified: '%s'",
                            qemu_kvm_bin)

        qemu_arg = prepend_expected_env_vars(conv_arg, cmdline)

        conv_arg_lines = buildcmd(conv_arg)
        qemu_arg_lines = buildcmd(qemu_arg)

        diff1 = filtlist(
            tuple(x for x in conv_arg_lines if x not in set(qemu_arg_lines)))
        if diff1:
            logging.debug("Found the following in conv_arg not in qemu_arg:")
            for elem in diff1:
                logging.debug("\t%s", elem)

        diff2 = filtlist(
            tuple(x for x in qemu_arg_lines if x not in set(conv_arg_lines)))
        if diff2:
            logging.debug("Found the following in qemu_arg not in conv_arg:")
            for elem in diff2:
                logging.debug("\t%s", elem)

        if diff1 or diff2:
            return False

        return True

    # prepare
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)

    if not vm.is_dead():
        vm.destroy()
    vm.start()
    if not vm.is_alive():
        test.fail("VM start failed")

    domid = vm.get_id()
    domuuid = vm.get_uuid()

    dtn_format = params.get("dtn_format")
    file_xml = params.get("dtn_file_xml", "")
    extra_param = params.get("dtn_extra_param")
    extra = params.get("dtn_extra", "")
    libvirtd = params.get("libvirtd")
    status_error = params.get("status_error", "no")
    vm_id = params.get("dtn_vm_id", "")
    readonly = ("yes" == params.get("readonly", "no"))

    # For positive_test
    if status_error == "no":
        if vm_id == "id":
            vm_id = domid
        elif vm_id == "uuid":
            vm_id = domuuid
        elif vm_id == "name":
            vm_id = "%s %s" % (vm_name, extra)
        if file_xml == "":
            extra_param = extra_param + vm_id

    virsh.dumpxml(vm_name, extra="", to_file=file_xml)
    if libvirtd == "off":
        utils_libvirtd.libvirtd_stop()

    # run test case
    ret = virsh.domxml_to_native(dtn_format,
                                 file_xml,
                                 extra_param,
                                 readonly=readonly,
                                 ignore_status=True,
                                 debug=True)
    status = ret.exit_status
    conv_arg = ret.stdout.strip()

    # recover libvirtd service start
    if libvirtd == "off":
        utils_libvirtd.libvirtd_start()

    # clean up
    if os.path.exists(file_xml):
        os.remove(file_xml)

    # check status_error
    if status_error == "yes":
        if status == 0:
            if libvirtd == "off" and libvirt_version.version_compare(5, 6, 0):
                logging.info(
                    "From libvirt version 5.6.0 libvirtd is restarted "
                    "and command should succeed")
            else:
                test.fail("Run successfully with wrong command!")
    elif status_error == "no":
        if status != 0:
            test.fail("Run failed with right command")
        if compare(conv_arg) is not True:
            test.fail("Test failed!")
Пример #7
0
def run(test, params, env):
    """
    1) Test Define/undefine/start/destroy/save/restore a OVMF/Seabios domain
    with 'boot dev' element or 'boot order' element
    2) Test Create snapshot with 'boot dev' or 'boot order'

    Steps:
    1) Prepare a typical VM XML, e.g. for OVMF or Seabios Guest boot
    2) Setup boot sequence by element 'boot dev' or 'boot order'
    3) Define/undefine/start/destroy/save/restore VM and check result
    4) Create snapshot with 'boot dev' or 'boot order'
    """
    vm_name = params.get("main_vm", "")
    vm = env.get_vm(vm_name)
    boot_type = params.get("boot_type", "seabios")
    boot_ref = params.get("boot_ref", "dev")
    disk_target_dev = params.get("disk_target_dev", "")
    disk_target_bus = params.get("disk_target_bus", "")
    tmp_file = data_dir.get_data_dir()
    save_file = os.path.join(tmp_file, vm_name + ".save")
    nvram_file = params.get("nvram", "")
    expected_text = params.get("expected_text", None)
    boot_entry = params.get("boot_entry", None)
    with_snapshot = "yes" == params.get("with_snapshot", "no")
    snapshot_take = int(params.get("snapshot_take", "1"))
    postfix = params.get("postfix", "")

    # Back VM XML
    vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
    vmxml_backup = vmxml.copy()

    if boot_type == "ovmf":
        if not libvirt_version.version_compare(2, 0, 0):
            test.error("OVMF doesn't support in current" " libvirt version.")

        if not utils_package.package_install('OVMF'):
            test.error("OVMF package install failed")

    if (boot_type == "seabios"
            and not utils_package.package_install('seabios-bin')):
        test.error("seabios package install failed")

    try:
        prepare_boot_xml(vmxml, params)

        # Update domain disk
        # Use sata for uefi, and ide for seabios
        domain_disk = vmxml.get_devices(device_type='disk')[0]
        domain_disk.target = {"dev": disk_target_dev, "bus": disk_target_bus}
        del domain_disk['address']
        vmxml.remove_all_disk()
        vmxml.add_device(domain_disk)

        # Setup boot start sequence
        vmxml.remove_all_boots()
        if boot_ref == "order":
            vmxml.set_boot_order_by_target_dev(disk_target_dev, "1")
        if boot_ref == "dev":
            vmxml.set_os_attrs(**{"boots": ["hd"]})

        logging.debug("The new VM XML is:\n%s", vmxml)
        vmxml.undefine()

        virsh_dargs = {"debug": True, "ignore_status": True}

        if boot_type == "s390_qemu":
            # Start test and check result
            ret = virsh.define(vmxml.xml, **virsh_dargs)
            ret = virsh.start(vm_name, "--paused", **virsh_dargs)
            time.sleep(1)
            vm.create_serial_console()
            time.sleep(1)
            vm.resume()
            if not boot_entry:
                check_boot = console_check(vm, expected_text)
                if not wait_for(check_boot, 60, 1):
                    test.fail("No boot menu found. Please check log.")
            else:
                vm.serial_console.send(boot_entry)
                time.sleep(0.5)
                vm.serial_console.sendcontrol('m')
                check_boot = console_check(vm, expected_text)
                if not wait_for(check_boot, 60, 1):
                    test.fail("Boot entry not selected. Please check log.")
                vm.wait_for_login()
        else:
            if with_snapshot:
                # Create snapshot for guest with boot dev or boot order
                virsh.define(vmxml.xml, **virsh_dargs)
                virsh.start(vm_name, **virsh_dargs)
                vm.wait_for_login()
                external_snapshot = libvirt_disk.make_external_disk_snapshots(
                    vm, disk_target_dev, postfix, snapshot_take)
                snapshot_list_check(vm_name, snapshot_take, postfix, test)
            else:
                # Test the lifecycle for the guest
                kwargs = {
                    "vm_name": vm_name,
                    "save_file": save_file,
                    "boot_type": boot_type,
                    "nvram_file": nvram_file
                }
                domain_lifecycle(vmxml, vm, test, virsh_dargs, **kwargs)
    finally:
        logging.debug("Start to cleanup")
        if vm.is_alive:
            vm.destroy()
        logging.debug("Restore the VM XML")
        if os.path.exists(save_file):
            os.remove(save_file)
        if with_snapshot:
            vmxml_recover_from_snap(vm_name, boot_type, vmxml_backup, test)
            # Remove the generated snapshot file
            for snap_file in external_snapshot:
                if os.path.exists(snap_file):
                    os.remove(snap_file)
        else:
            vmxml_backup.sync()
Пример #8
0
def run(test, params, env):
    """
    Test vcpu
    """
    vm_name = params.get('main_vm')
    check = params.get('check', '')
    status_error = 'yes' == params.get('status_error', 'no')
    err_msg = params.get('err_msg', '')
    guest_vcpu = params.get('guest_vcpu')
    boot_timeout = int(params.get('boot_timeout', 240))
    start_fail = 'yes' == params.get('start_fail', 'no')

    vm = env.get_vm(vm_name)
    vmxml = libvirt_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    bkxml = vmxml.copy()

    def check_onlinevcpus(vm, cpu_num):
        """

        Check whether all vcpus are online as expected.

        :param vm: the exact VM need to check
        :param cpu_num: the num of online vcpus need to match
        """
        if not utils_misc.wait_for(
                lambda: cpu.check_if_vm_vcpu_match(cpu_num, vm),
                timeout=120,
                step=5,
                text="wait for vcpu online"):
            test.fail('Not all vcpus are online as expected.')

    def set_iommu(vmxml, **dargs):
        """

        Add iommu device to vm.

        :param vmxml: xml of vm to be add iommu device
        :param dargs: args or the iommu device
        :return:
        """
        logging.info('Add iommu device to vm.')
        iommu_device = Iommu()
        iommu_device.model = dargs.get('model', 'intel')
        iommu_device.driver = dargs.get('driver', {
            'intremap': 'on',
            'eim': 'on'
        })
        vmxml.add_device(iommu_device)

    try:
        # Check the output of "virsh maxvcpus" for both i440fx and q35 VM
        if check == 'virsh_maxvcpus':
            report_num = params.get('report_num', '')
            logging.info('Check the output of virsh maxvcpus')
            cmd_result = virsh.maxvcpus(debug=True)
            if cmd_result.exit_status == 0 and cmd_result.stdout.strip(
            ) == report_num:
                logging.debug('Test passed as the reported max vcpu num is %s',
                              report_num)
            else:
                test.fail(
                    'Test failed as the reported max vcpu num is not as expected.'
                )

        # Check the output of "virsh capabilities" for both i440fx and q35 VM
        if check == "virsh_capabilities":
            report_num_pc_7 = params.get('report_num_pc_7', '')
            report_num_q35_73 = params.get('report_num_q35_73', '')
            report_num_q35_7_8 = params.get('report_num_q35_7_8', '')
            report_num_q35_8_3 = params.get('report_num_q35_8_3', '')
            report_num_q35_8_4 = params.get('report_num_q35_8_4', '')
            logging.info('Check the output of virsh capabilities')
            xmltreefile = capability_xml.CapabilityXML().xmltreefile
            machtype_vcpunum_dict = {}
            for guest in xmltreefile.findall('guest'):
                for arch in guest.findall('arch'):
                    if arch.get('name') == "x86_64":
                        for machine in arch.findall('machine'):
                            machine_text = machine.text
                            vcpunum = machine.get('maxCpus')
                            machtype_vcpunum_dict[machine_text] = vcpunum
            for key in machtype_vcpunum_dict:
                logging.info("%s : %s", key, machtype_vcpunum_dict[key])
                if key.startswith('pc-i440fx') or key.startswith(
                        'rhel') or key == 'pc':
                    if machtype_vcpunum_dict[key] != report_num_pc_7:
                        test.fail('Test failed as i440fx_max_vcpus_num in '
                                  'virsh_capa is wrong. Expected: {} '
                                  'Actual: {}.'.format(
                                      report_num_pc_7,
                                      machtype_vcpunum_dict[key]))
                if key.startswith('pc-q35') or key == 'q35':
                    if key == "pc-q35-rhel7.3.0":
                        if machtype_vcpunum_dict[key] != report_num_q35_73:
                            test.fail(
                                'Test failed as q35_rhel73_max_vcpus_num '
                                'in virsh_capa is wrong. Expected: {} '
                                'Actual: {}.'.format(
                                    report_num_q35_73,
                                    machtype_vcpunum_dict[key]))
                    else:
                        exp_val = report_num_q35_7_8
                        if libvirt_version.version_compare(7, 0, 0):
                            exp_val = report_num_q35_8_4
                        elif libvirt_version.version_compare(6, 6, 0):
                            exp_val = report_num_q35_8_3
                        if machtype_vcpunum_dict[key] != exp_val:
                            test.fail(
                                'Test failed as the q35_max_vcpus_num in '
                                'virsh_capa is wrong. Expected: {} '
                                'Actual: {}.'.format(
                                    exp_val, machtype_vcpunum_dict[key]))

        # Test i440fx VM starts with 240(positive)/241(negative) vcpus and hot-plugs vcpus to 240
        if check.startswith('i440fx_test'):
            current_vcpu = params.get('current_vcpu')
            target_vcpu = params.get('target_vcpu')
            if 'hotplug' not in check:
                vmxml.vcpu = int(guest_vcpu)
                vmxml.sync()
                if status_error:
                    if start_fail:
                        result_need_check = virsh.start(vm_name, debug=True)
                else:
                    vm.start()
                    logging.info(libvirt_xml.VMXML.new_from_dumpxml(vm_name))
                    vm.wait_for_login(timeout=boot_timeout).close()
                    check_onlinevcpus(vm, int(guest_vcpu))
            else:
                vmxml.vcpu = int(guest_vcpu)
                vmxml.current_vcpu = int(current_vcpu)
                target_vcpu = int(target_vcpu)
                vmxml.sync()
                vm.start()
                logging.info(libvirt_xml.VMXML.new_from_dumpxml(vm_name))
                vm.wait_for_login(timeout=boot_timeout).close()
                check_onlinevcpus(vm, int(current_vcpu))
                res = virsh.setvcpus(vm_name, target_vcpu, debug=True)
                libvirt.check_exit_status(res)
                check_onlinevcpus(vm, int(target_vcpu))

        # Configure a guest vcpu > 255 without iommu device for q35 VM
        if check == 'no_iommu':
            logging.info('Set vcpu to %s', guest_vcpu)
            vmxml.vcpu = int(guest_vcpu)
            result_need_check = virsh.define(vmxml.xml, debug=True)

        # Set iommu device but not set ioapci in features for q35 VM
        if check == 'with_iommu':
            logging.info('Set vcpu to %s', guest_vcpu)
            vmxml.vcpu = int(guest_vcpu)
            set_iommu(vmxml)
            result_need_check = virsh.define(vmxml.xml, debug=True)

        # Add ioapic and iommu device in xml for q35 VM
        if check.startswith('ioapic_iommu'):
            logging.info('Modify features')
            vm_features = vmxml.features
            vm_features.add_feature('apic')
            vm_features.add_feature('ioapic', 'driver', 'qemu')
            vmxml.features = vm_features
            logging.debug(vmxml.features.get_feature_list())

            logging.info('Set vcpu to %s', guest_vcpu)
            set_iommu(vmxml)

            ori_vcpu = vmxml.vcpu
            vmxml.vcpu = int(guest_vcpu)
            vmxml.current_vcpu = ori_vcpu

            if 'hotplug' not in check:
                vmxml.current_vcpu = int(guest_vcpu)

            if status_error:
                if start_fail:
                    if libvirt_version.version_compare(5, 6, 0):
                        result_need_check = virsh.define(vmxml.xml, debug=True)
                    else:
                        vmxml.sync()
                        result_need_check = virsh.start(vm_name, debug=True)

            else:
                # Login guest and check guest cpu number
                vmxml.sync()
                logging.debug(virsh.dumpxml(vm_name))
                vm.start()
                session = vm.wait_for_login(timeout=boot_timeout)
                logging.debug(session.cmd('lscpu -e'))

                # Hotplug vcpu to $guest_vcpu
                if 'hotplug' in check:
                    res = virsh.setvcpus(vm_name, guest_vcpu, debug=True)
                    libvirt.check_exit_status(res)

                # Check if vcpu(s) are online
                check_onlinevcpus(vm, int(guest_vcpu))

        # Check result if there's result to check
        if 'result_need_check' in locals():
            libvirt.check_result(result_need_check, err_msg)

    finally:
        bkxml.sync()
Пример #9
0
def run(test, params, env):
    """
    Test virsh migrate command.
    """
    migration_test = migration.MigrationTest()
    migration_test.check_parameters(params)

    # Params for NFS shared storage
    shared_storage = params.get("migrate_shared_storage", "")
    if shared_storage == "":
        default_guest_asset = defaults.get_default_guest_os_info()['asset']
        default_guest_asset = "%s.qcow2" % default_guest_asset
        shared_storage = os.path.join(params.get("nfs_mount_dir"),
                                      default_guest_asset)
        logging.debug("shared_storage:%s", shared_storage)

    # Params to update disk using shared storage
    params["disk_type"] = "file"
    params["disk_source_protocol"] = "netfs"
    params["mnt_path_name"] = params.get("nfs_mount_dir")

    # Local variables
    virsh_args = {"debug": True}
    virsh_options = params.get("virsh_options", "")

    server_ip = params.get("server_ip")
    server_user = params.get("server_user", "root")
    server_pwd = params.get("server_pwd")
    client_ip = params.get("client_ip")
    client_pwd = params.get("client_pwd")
    extra = params.get("virsh_migrate_extra")
    options = params.get("virsh_migrate_options")

    guest_src_url = params.get("guest_src_url")
    guest_src_path = params.get("guest_src_path",
                                "/var/lib/libvirt/images/guest.img")
    check_disk = "yes" == params.get("check_disk")
    disk_model = params.get("disk_model")
    disk_target = params.get("disk_target", "vda")
    controller_model = params.get("controller_model")

    check_interface = "yes" == params.get("check_interface")
    iface_type = params.get("iface_type", "network")
    iface_model = params.get("iface_model", "virtio")
    iface_params = {
        'type': iface_type,
        'model': iface_model,
        'del_addr': True,
        'source': '{"network": "default"}'
    }

    check_memballoon = "yes" == params.get("check_memballoon")
    membal_model = params.get("membal_model")

    check_rng = "yes" == params.get("check_rng")
    rng_model = params.get("rng_model")

    migrate_vm_back = "yes" == params.get("migrate_vm_back", "no")
    status_error = "yes" == params.get("status_error", "no")
    remote_virsh_dargs = {
        'remote_ip': server_ip,
        'remote_user': server_user,
        'remote_pwd': server_pwd,
        'unprivileged_user': None,
        'ssh_remote_auth': True
    }
    remote_dargs = {
        'server_ip': server_ip,
        'server_user': server_user,
        'server_pwd': server_pwd,
        'file_path': "/etc/libvirt/libvirt.conf"
    }

    xml_check_after_mig = params.get("guest_xml_check_after_mig")

    err_msg = params.get("err_msg")
    vm_session = None
    remote_virsh_session = None
    vm = None
    mig_result = None
    remove_dict = {}
    remote_libvirt_file = None
    src_libvirt_file = None

    if not libvirt_version.version_compare(5, 0, 0):
        test.cancel("This libvirt version doesn't support "
                    "virtio-transitional model.")

    # params for migration connection
    params["virsh_migrate_desturi"] = libvirt_vm.complete_uri(
        params.get("migrate_dest_host"))
    params["virsh_migrate_connect_uri"] = libvirt_vm.complete_uri(
        params.get("migrate_source_host"))
    src_uri = params.get("virsh_migrate_connect_uri")
    dest_uri = params.get("virsh_migrate_desturi")

    vm_name = params.get("migrate_main_vm")
    vm = env.get_vm(vm_name)
    vm.verify_alive()

    # For safety reasons, we'd better back up  xmlfile.
    new_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    orig_config_xml = new_xml.copy()

    try:
        # Create a remote runner for later use
        runner_on_target = remote.RemoteRunner(host=server_ip,
                                               username=server_user,
                                               password=server_pwd)
        # download guest source and update interface model to keep guest up
        if guest_src_url:
            blk_source = download.get_file(guest_src_url, guest_src_path)
            if not blk_source:
                test.error("Fail to download image.")
            params["blk_source_name"] = blk_source
            if (not check_interface) and iface_model:
                iface_dict = {'model': iface_model}
                libvirt.modify_vm_iface(vm_name, "update_iface", iface_dict)
            if not check_disk:
                params["disk_model"] = "virtio-transitional"

        if check_interface:
            libvirt.modify_vm_iface(vm_name, "update_iface", iface_params)

        if check_memballoon:
            membal_dict = {'membal_model': membal_model}
            dom_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
            libvirt.update_memballoon_xml(dom_xml, membal_dict)

        if check_rng:
            rng_dict = {'rng_model': rng_model}
            rng_xml = libvirt.create_rng_xml(rng_dict)
            dom_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
            libvirt.add_vm_device(dom_xml, rng_xml)

        # Change the disk of the vm
        libvirt.set_vm_disk(vm, params)

        if not vm.is_alive():
            vm.start()

        logging.debug("Guest xml after starting:\n%s",
                      vm_xml.VMXML.new_from_dumpxml(vm_name))

        # Check local guest network connection before migration
        vm_session = vm.wait_for_login(restart_network=True)
        migration_test.ping_vm(vm, params)

        remove_dict = {"do_search": '{"%s": "ssh:/"}' % dest_uri}
        src_libvirt_file = libvirt_config.remove_key_for_modular_daemon(
            remove_dict)

        # Execute migration process
        vms = [vm]
        migration_test.do_migration(vms,
                                    None,
                                    dest_uri,
                                    'orderly',
                                    options,
                                    thread_timeout=900,
                                    ignore_status=True,
                                    virsh_opt=virsh_options,
                                    extra_opts=extra)
        mig_result = migration_test.ret

        migration_test.check_result(mig_result, params)

        if int(mig_result.exit_status) == 0:
            migration_test.ping_vm(vm, params, dest_uri)

        if xml_check_after_mig:
            if not remote_virsh_session:
                remote_virsh_session = virsh.VirshPersistent(
                    **remote_virsh_dargs)
            target_guest_dumpxml = (remote_virsh_session.dumpxml(
                vm_name, debug=True, ignore_status=True).stdout_text.strip())
            if check_disk:
                check_str = disk_model if disk_model else controller_model
            if check_interface:
                check_str = iface_model
            if check_memballoon:
                check_str = membal_model
            if check_rng:
                check_str = rng_model

            xml_check_after_mig = "%s'%s'" % (xml_check_after_mig, check_str)
            if not re.search(xml_check_after_mig, target_guest_dumpxml):
                test.fail("Fail to search '%s' in target guest XML:\n%s" %
                          (xml_check_after_mig, target_guest_dumpxml))
            remote_virsh_session.close_session()

        # Execute migration from remote
        if migrate_vm_back:
            ssh_connection = utils_conn.SSHConnection(server_ip=client_ip,
                                                      server_pwd=client_pwd,
                                                      client_ip=server_ip,
                                                      client_pwd=server_pwd)
            try:
                ssh_connection.conn_check()
            except utils_conn.ConnectionError:
                ssh_connection.conn_setup()
                ssh_connection.conn_check()

            # Pre migration setup for local machine
            migration_test.migrate_pre_setup(src_uri, params)
            remove_dict = {"do_search": ('{"%s": "ssh:/"}' % src_uri)}
            remote_libvirt_file = libvirt_config\
                .remove_key_for_modular_daemon(remove_dict, remote_dargs)

            cmd = "virsh migrate %s %s %s" % (vm_name, options, src_uri)
            logging.debug("Start migration: %s", cmd)
            cmd_result = remote.run_remote_cmd(cmd, params, runner_on_target)
            logging.info(cmd_result)
            if cmd_result.exit_status:
                test.fail("Failed to run '%s' on remote: %s" %
                          (cmd, cmd_result))

    finally:
        logging.debug("Recover test environment")
        # Clean VM on destination
        migration_test.cleanup_dest_vm(vm, vm.connect_uri, dest_uri)
        if vm.is_alive():
            vm.destroy(gracefully=False)

        logging.info("Recovery VM XML configration")
        orig_config_xml.sync()
        logging.debug("The current VM XML:\n%s", orig_config_xml.xmltreefile)

        if src_libvirt_file:
            src_libvirt_file.restore()
        if remote_libvirt_file:
            del remote_libvirt_file

        # Clean up of pre migration setup for local machine
        if migrate_vm_back:
            if 'ssh_connection' in locals():
                ssh_connection.auto_recover = True
            migration_test.migrate_pre_setup(src_uri, params, cleanup=True)
        if remote_virsh_session:
            remote_virsh_session.close_session()

        logging.info("Remove local NFS image")
        source_file = params.get("source_file")
        libvirt.delete_local_disk("file", path=source_file)
        if guest_src_url and blk_source:
            libvirt.delete_local_disk("file", path=blk_source)
Пример #10
0
def run(test, params, env):
    """
    Run the test

    :param test: test object
    :param params: Dictionary with the test parameters
    :param env: Dictionary with test environment.
    """
    vm_name = params.get("migrate_main_vm")
    vm = env.get_vm(vm_name)
    vm.verify_alive()
    bk_uri = vm.connect_uri

    migration_test = migration.MigrationTest()
    migration_test.check_parameters(params)
    extra_args = migration_test.update_virsh_migrate_extra_args(params)

    extra = params.get("virsh_migrate_extra")
    postcopy_options = params.get("postcopy_options")
    if postcopy_options:
        extra = "%s %s" % (extra, postcopy_options)
    params["virsh_migrate_desturi"] = libvirt_vm.complete_uri(
        params.get("migrate_dest_host"))
    dest_uri = params.get("virsh_migrate_desturi")
    options = params.get("virsh_migrate_options",
                         "--live --p2p --persistent --verbose")
    virsh_options = params.get("virsh_options", "")
    stress_package = params.get("stress_package")
    action_during_mig = params.get("action_during_mig")
    #action_params_map = params.get('action_params_map')
    migrate_speed = params.get("migrate_speed")
    migrate_again = "yes" == params.get("migrate_again", "no")
    vm_state_after_abort = params.get("vm_state_after_abort")
    return_port = params.get("return_port")

    if action_during_mig:
        action_during_mig = migration_base.parse_funcs(action_during_mig,
                                                       test, params)
    setup_tls = params.get("setup_tls", "no")
    if setup_tls == "yes":
        if not libvirt_version.version_compare(6, 9, 0):
            test.cancel("Cannot support migrate_tls_force in this libvirt version.")

    qemu_conf_src = eval(params.get("qemu_conf_src", "{}"))
    qemu_conf_dest = params.get("qemu_conf_dest", "{}")
    status_error = "yes" == params.get("status_error", "no")
    migrate_tls_force_default = "yes" == params.get("migrate_tls_force_default", "no")
    server_ip = params.get("server_ip")
    server_user = params.get("server_user")
    server_pwd = params.get("server_pwd")
    server_params = {'server_ip': server_ip,
                     'server_user': server_user,
                     'server_pwd': server_pwd}
    tls_obj = None
    qemu_conf_local = None
    qemu_conf_remote = None

    # For safety reasons, we'd better back up  xmlfile.
    new_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    orig_config_xml = new_xml.copy()

    try:
        libvirt.set_vm_disk(vm, params)

        # Setup libvirtd remote connection TLS connection env
        if setup_tls == "yes":
            tls_obj = TLSConnection(params)
            tls_obj.auto_recover = True
            tls_obj.conn_setup()

        # Setup default value for migrate_tls_force
        if migrate_tls_force_default:
            value_list = ["migrate_tls_force"]
            # Setup migrate_tls_force default value on remote
            server_params['file_path'] = "/etc/libvirt/qemu.conf"
            libvirt_config.remove_key_in_conf(value_list, "qemu", remote_params=server_params)
            # Setup migrate_tls_force default value on local
            libvirt_config.remove_key_in_conf(value_list, "qemu")

        # Update remote qemu conf
        if qemu_conf_dest:
            qemu_conf_remote = libvirt_remote.update_remote_file(
                server_params, qemu_conf_dest, "/etc/libvirt/qemu.conf")
        # Update local qemu conf
        if qemu_conf_src:
            qemu_conf_local = libvirt.customize_libvirt_config(qemu_conf_src, "qemu")

        if not vm.is_alive():
            vm.start()

        logging.debug("Guest xml after starting:\n%s",
                      vm_xml.VMXML.new_from_dumpxml(vm_name))

        vm.wait_for_login().close()

        if stress_package:
            migration_test.run_stress_in_vm(vm, params)
        if migrate_speed:
            mode = 'both' if '--postcopy' in postcopy_options else 'precopy'
            migration_test.control_migrate_speed(vm_name,
                                                 int(migrate_speed),
                                                 mode)
        # Execute migration process
        migration_base.do_migration(vm, migration_test, None, dest_uri,
                                    options, virsh_options, extra,
                                    action_during_mig,
                                    extra_args)

        func_returns = dict(migration_test.func_ret)
        migration_test.func_ret.clear()
        logging.debug("Migration returns function results:%s", func_returns)
        if return_port:
            port_used = get_used_port(func_returns)

        if vm_state_after_abort:
            check_vm_state_after_abort(vm_name, vm_state_after_abort,
                                       bk_uri, dest_uri, test)

        if migrate_again:
            action_during_mig = migration_base.parse_funcs(params.get('action_during_mig_again'),
                                                           test,
                                                           params)
            extra_args['status_error'] = params.get("migrate_again_status_error", "no")
            if params.get("virsh_migrate_extra_mig_again"):
                extra = params.get("virsh_migrate_extra_mig_again")
            migration_base.do_migration(vm, migration_test, None, dest_uri,
                                        options, virsh_options,
                                        extra, action_during_mig,
                                        extra_args)
            if return_port:
                func_returns = dict(migration_test.func_ret)
                logging.debug("Migration returns function "
                              "results:%s", func_returns)
                port_second = get_used_port(func_returns)
                if port_used != port_second:
                    test.fail("Expect same port '{}' is used as previous one, "
                              "but found new one '{}'".format(port_used,
                                                              port_second))
                else:
                    logging.debug("Same port '%s' was used as "
                                  "expected", port_second)

        migration_test.post_migration_check([vm], params, uri=dest_uri)
    finally:
        logging.info("Recover test environment")
        vm.connect_uri = bk_uri
        # Clean VM on destination and source
        migration_test.cleanup_vm(vm, dest_uri)

        # Restore local qemu conf and restart libvirtd
        if qemu_conf_local:
            logging.debug("Recover local qemu configurations")
            libvirt.customize_libvirt_config(None, config_type="qemu", is_recover=True,
                                             config_object=qemu_conf_local)
        # Restore remote qemu conf and restart libvirtd
        if qemu_conf_remote:
            logging.debug("Recover remote qemu configurations")
            del qemu_conf_remote

        # Clean up TLS test env:
        if setup_tls and tls_obj:
            logging.debug("Clean up TLS object")
            del tls_obj
        orig_config_xml.sync()
Пример #11
0
def run(test, params, env):
    """
    Test only ppc hosts
    """
    if 'ppc64le' not in platform.machine().lower():
        test.cancel('This case is for ppc only.')
    vm_name = params.get('main_vm', 'EXAMPLE')
    status_error = 'yes' == params.get('status_error', 'no')
    case = params.get('case', '')
    error_msg = params.get('error_msg', '')

    # Backup vm xml
    bk_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)

    try:
        vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)

        # Assign address to panic device
        if case == 'panic_address':

            # Check if there is already a panic device on vm, remove it if true
            origin_panic = vmxml.get_devices('panic')
            if origin_panic:
                for dev in origin_panic:
                    vmxml.del_device(dev)
                vmxml.sync()

            # Create panic device to add to vm
            panic_dev = Panic()
            panic_dev.model = 'pseries'
            panic_dev.addr_type = 'isa'
            panic_dev.addr_iobase = '0x505'
            logging.debug(panic_dev)
            vmxml.add_device(panic_dev)
            if version_compare(7, 0, 0):
                cmd_result = virsh.define(vmxml.xml, debug=True)
            else:
                vmxml.sync()
                cmd_result = virsh.start(vm_name, debug=True, ignore_status=True)

        # Get Ethernet pci devices
        if case == 'unavail_pci_device':
            lspci = process.run('lspci|grep Ethernet', shell=True).stdout_text.splitlines()
            pci_ids = [line.split()[0] for line in lspci]
            logging.debug(pci_ids)
            max_id = max([int(pci_id.split('.')[-1]) for pci_id in pci_ids])
            prefix = pci_ids[-1].split('.')[0]

            # Create fake pci ids
            for i in range(5):
                max_id += 1
                # function must be <= 7
                if max_id > 7:
                    break
                new_pci_id = '.'.join([prefix, str(max_id)])
                new_pci_xml = libvirt.create_hostdev_xml(new_pci_id)
                vmxml.add_device(new_pci_xml)
            vmxml.sync()
            logging.debug('Vm xml after adding unavailable pci devices: \n%s', vmxml)

        # Check result if there's a result to check
        if 'cmd_result' in locals():
            libvirt.check_exit_status(cmd_result, status_error)
            if error_msg:
                libvirt.check_result(cmd_result, [error_msg])

    finally:
        # In case vm disappeared after test
        if case == 'unavail_pci_device':
            virsh.define(bk_xml.xml, debug=True)
        else:
            bk_xml.sync()
Пример #12
0
def run(test, params, env):
    """
    Test DAC setting in both domain xml and qemu.conf.

    (1) Init variables for test.
    (2) Set VM xml and qemu.conf with proper DAC label, also set
        monitor socket parent dir with propoer ownership and mode.
    (3) Start VM and check the context.
    """

    # Get general variables.
    status_error = ('yes' == params.get("status_error", 'no'))
    host_sestatus = params.get("host_selinux", "enforcing")
    # Get variables about seclabel for VM.
    sec_type = params.get("vm_sec_type", "dynamic")
    vm_sec_model = params.get("vm_sec_model", "dac")
    vm_sec_label = params.get("vm_sec_label", None)
    vm_sec_relabel = params.get("vm_sec_relabel", "yes")
    sec_dict = {
        'type': sec_type,
        'model': vm_sec_model,
        'relabel': vm_sec_relabel
    }
    if vm_sec_label:
        sec_dict['label'] = vm_sec_label
    set_qemu_conf = "yes" == params.get("set_qemu_conf", "no")
    # Get per-img seclabel variables
    disk_type = params.get("disk_type")
    disk_target = params.get('disk_target')
    disk_src_protocol = params.get("disk_source_protocol")
    vol_name = params.get("vol_name")
    tmp_dir = data_dir.get_tmp_dir()
    pool_name = params.get("pool_name", "gluster-pool")
    brick_path = os.path.join(tmp_dir, pool_name)
    invalid_label = 'yes' == params.get("invalid_label", "no")
    relabel = params.get("per_img_sec_relabel")
    sec_label = params.get("per_img_sec_label")
    per_sec_model = params.get("per_sec_model", 'dac')
    per_img_dict = {
        'sec_model': per_sec_model,
        'relabel': relabel,
        'sec_label': sec_label
    }
    params.update(per_img_dict)
    # Get qemu.conf config variables
    qemu_user = params.get("qemu_user", 'qemu')
    qemu_group = params.get("qemu_group", 'qemu')
    dynamic_ownership = "yes" == params.get("dynamic_ownership", "yes")

    # When using nfs, the virt_use_nfs should be enabled
    enable_virt_use_nfs = 'yes' == params.get("virt_use_nfs", 'no')
    # Get variables about VM and get a VM object and VMXML instance.
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    vmxml = VMXML.new_from_inactive_dumpxml(vm_name)
    backup_xml = vmxml.copy()

    # Set selinux of host.
    backup_sestatus = utils_selinux.get_status()
    if backup_sestatus == "disabled":
        test.cancel("SELinux is in Disabled "
                    "mode. it must be in Enforcing "
                    "mode to run this test")
    utils_selinux.set_status(host_sestatus)
    if enable_virt_use_nfs:
        process.run("setsebool virt_use_nfs on", shell=True)

    qemu_sock_mod = False
    qemu_sock_path = '/var/lib/libvirt/qemu/'
    qemu_conf = utils_config.LibvirtQemuConfig()
    libvirtd = utils_libvirtd.Libvirtd()
    try:
        if set_qemu_conf:
            # Set qemu.conf for user and group
            if qemu_user:
                qemu_conf.user = qemu_user
            if qemu_group:
                qemu_conf.group = qemu_group
            if dynamic_ownership:
                qemu_conf.dynamic_ownership = 1
            else:
                qemu_conf.dynamic_ownership = 0
            logging.debug("the qemu.conf content is: %s" % qemu_conf)
            libvirtd.restart()
            st = os.stat(qemu_sock_path)
            if not bool(st.st_mode & stat.S_IWGRP):
                # chmod g+w
                os.chmod(qemu_sock_path, st.st_mode | stat.S_IWGRP)
                qemu_sock_mod = True

        # Set the context of the VM.
        logging.debug("sec_dict is %s" % sec_dict)
        vmxml.set_seclabel([sec_dict])
        vmxml.sync()

        # Get per-image seclabel in id string
        if sec_label:
            per_img_usr, per_img_grp = sec_label.split(':')
            sec_label_id = format_user_group_str(per_img_usr, per_img_grp)

        # Start VM to check the qemu process and image.
        try:
            # Set per-img sec context and start vm
            utlv.set_vm_disk(vm, params)
            # Start VM successfully.
            if status_error:
                if invalid_label:
                    # invalid label should fail, more info in bug 1165485
                    logging.debug(
                        "The guest failed to start as expected,"
                        "details see bug: bugzilla.redhat.com/show_bug.cgi"
                        "?id=1165485")
                else:
                    test.fail("Test succeeded in negative case.")

            # Get vm process label when VM is running.
            vm_pid = vm.get_pid()
            pid_stat = os.stat("/proc/%d" % vm_pid)
            vm_process_uid = pid_stat.st_uid
            vm_process_gid = pid_stat.st_gid
            vm_context = "%s:%s" % (vm_process_uid, vm_process_gid)
            logging.debug("vm process label is: %s", vm_context)

            # Get vm image label when VM is running
            if disk_type != "network":
                disks = vm.get_blk_devices()
                if libvirt_version.version_compare(3, 1,
                                                   0) and disk_type == "block":
                    output = astring.to_text(
                        process.system_output(
                            "nsenter -t %d -m -- ls -l %s" %
                            (vm_pid, disks[disk_target]['source'])))
                    owner, group = output.strip().split()[2:4]
                    disk_context = format_user_group_str(owner, group)
                else:
                    stat_re = os.stat(disks[disk_target]['source'])
                    disk_context = "%s:%s" % (stat_re.st_uid, stat_re.st_gid)
                logging.debug("The disk dac label after vm start is: %s",
                              disk_context)
                if sec_label and relabel == 'yes':
                    if disk_context != sec_label_id:
                        test.fail("The disk label is not equal to "
                                  "'%s'." % sec_label_id)

        except virt_vm.VMStartError as e:
            # Starting VM failed.
            if not status_error:
                test.fail("Test failed in positive case." "error: %s" % e)
    finally:
        # clean up
        if vm.is_alive():
            vm.destroy(gracefully=False)
        backup_xml.sync()
        if qemu_sock_mod:
            st = os.stat(qemu_sock_path)
            os.chmod(qemu_sock_path, st.st_mode ^ stat.S_IWGRP)
        if set_qemu_conf:
            qemu_conf.restore()
            libvirtd.restart()
        utils_selinux.set_status(backup_sestatus)
        if disk_src_protocol == 'iscsi':
            utlv.setup_or_cleanup_iscsi(is_setup=False)
        elif disk_src_protocol == 'gluster':
            gluster.setup_or_cleanup_gluster(False,
                                             brick_path=brick_path,
                                             **params)
            libvirtd.restart()
        elif disk_src_protocol == 'netfs':
            utlv.setup_or_cleanup_nfs(is_setup=False,
                                      restore_selinux=backup_sestatus)
Пример #13
0
def run(test, params, env):
    """
    Test the command virsh maxvcpus

    (1) Call virsh maxvcpus
    (2) Call virsh -c remote_uri maxvcpus
    (3) Call virsh maxvcpus with an unexpected option
    """

    # get the params from subtests.
    # params for general.
    option = params.get("virsh_maxvcpus_options")
    status_error = params.get("status_error")
    connect_arg = params.get("connect_arg", "")

    # params for transport connect.
    local_ip = params.get("local_ip", "ENTER.YOUR.LOCAL.IP")
    local_pwd = params.get("local_pwd", "ENTER.YOUR.LOCAL.ROOT.PASSWORD")
    server_ip = params.get("remote_ip", local_ip)
    server_pwd = params.get("remote_pwd", local_pwd)
    transport_type = params.get("connect_transport_type", "local")
    transport = params.get("connect_transport", "ssh")
    connect_uri = None
    # check the config
    if (connect_arg == "transport" and
            transport_type == "remote" and
            local_ip.count("ENTER")):
        raise exceptions.TestSkipError("Parameter local_ip is not configured "
                                       "in remote test.")
    if (connect_arg == "transport" and
            transport_type == "remote" and
            local_pwd.count("ENTER")):
        raise exceptions.TestSkipError("Parameter local_pwd is not configured "
                                       "in remote test.")

    if connect_arg == "transport":
        canonical_uri_type = virsh.driver()

        if transport == "ssh":
            ssh_connection = utils_conn.SSHConnection(server_ip=server_ip,
                                                      server_pwd=server_pwd,
                                                      client_ip=local_ip,
                                                      client_pwd=local_pwd)
            try:
                ssh_connection.conn_check()
            except utils_conn.ConnectionError:
                ssh_connection.conn_setup()
                ssh_connection.conn_check()

            connect_uri = libvirt_vm.get_uri_with_transport(
                uri_type=canonical_uri_type,
                transport=transport, dest_ip=server_ip)
            virsh_dargs = {'remote_ip': server_ip, 'remote_user': '******',
                           'remote_pwd': server_pwd,
                           'ssh_remote_auth': True}
            virsh_instance = virsh.VirshPersistent(**virsh_dargs)
    else:
        connect_uri = connect_arg
        virsh_instance = virsh

    if libvirt_version.version_compare(2, 3, 0):
        try:
            maxvcpus = None
            maxvcpus_cap = None
            dom_capabilities = None
            # make sure we take maxvcpus from right host, helps in case remote
            try:
                dom_capabilities = domcap.DomCapabilityXML(virsh_instance=virsh_instance)
                maxvcpus = dom_capabilities.max
                logging.debug("maxvcpus calculate from domcapabilities "
                              "is %s", maxvcpus)
            except Exception as details:
                raise exceptions.TestFail("Failed to get maxvcpus from "
                                          "domcapabilities xml:\n%s"
                                          % dom_capabilities)
            try:
                cap_xml = capability_xml.CapabilityXML()
                maxvcpus_cap = cap_xml.get_guest_capabilities()['hvm'][platform.machine()]['maxcpus']
                logging.debug('maxvcpus_cap is %s', maxvcpus_cap)
            except Exception as details:
                logging.debug("Failed to get maxvcpu from virsh "
                              "capabilities: %s", details)
                # Let's fall back in case of failure
                maxvcpus_cap = maxvcpus
            if not maxvcpus:
                raise exceptions.TestFail("Failed to get max value for vcpu"
                                          "from domcapabilities "
                                          "xml:\n%s" % dom_capabilities)
        except Exception as details:
            raise exceptions.TestFail("Failed get the virsh instance with uri: "
                                      "%s\n Details: %s" % (connect_uri, details))

    is_arm = "aarch" in platform.machine()
    gic_version = ''
    if is_arm:
        for gic_enum in domcap.DomCapabilityXML()['features']['gic_enums']:
            if gic_enum['name'] == "version":
                gic_version = gic_enum['values'][0].get_value()

    # Run test case
    result = virsh.maxvcpus(option, uri=connect_uri, ignore_status=True,
                            debug=True)

    maxvcpus_test = result.stdout.strip()
    status = result.exit_status

    # Check status_error
    if status_error == "yes":
        if status == 0:
            raise exceptions.TestFail("Run succeeded with unsupported option!")
        else:
            logging.info("Run failed with unsupported option %s " % option)
    elif status_error == "no":
        if status == 0:
            if not libvirt_version.version_compare(2, 3, 0):
                if "kqemu" in option:
                    if not maxvcpus_test == '1':
                        raise exceptions.TestFail("Command output %s is not "
                                                  "expected for %s " % (maxvcpus_test, option))
                elif option in ['qemu', '--type qemu', '']:
                    if not maxvcpus_test == '16':
                        raise exceptions.TestFail("Command output %s is not "
                                                  "expected for %s " % (maxvcpus_test, option))
                else:
                    # No check with other types
                    pass
            else:
                # It covers all possible combinations
                if option in ['qemu', 'kvm', '--type qemu', '--type kvm', 'kqemu', '--type kqemu', '']:
                    if (is_arm and gic_version == '2' and option in ['kvm', '']):
                        if not maxvcpus_test == '8':
                            raise exceptions.TestFail("Command output %s is not "
                                                      "expected for %s " % (maxvcpus_test, option))
                    elif not (maxvcpus_test == maxvcpus or maxvcpus_test == maxvcpus_cap):
                        raise exceptions.TestFail("Command output %s is not "
                                                  "expected for %s " % (maxvcpus_test, option))
                else:
                    # No check with other types
                    pass
        else:
            raise exceptions.TestFail("Run command failed")
Пример #14
0
def run(test, params, env):
    """
    1. Configure kernel cmdline to support kdump
    2. Start kdump service
    3. Inject NMI to the guest
    4. Check NMI times
    """
    for cmd in 'inject-nmi', 'qemu-monitor-command':
        if not virsh.has_help_command(cmd):
            test.cancel(
                "This version of libvirt does not "
                " support the %s test", cmd)

    vm_name = params.get("main_vm", "avocado-vt-vm1")
    vm = env.get_vm(vm_name)
    start_vm = params.get("start_vm")
    expected_nmi_times = params.get("expected_nmi_times", '0')
    kernel_params = params.get("kernel_params", "")
    unprivileged_user = params.get('unprivileged_user')
    if unprivileged_user:
        if unprivileged_user.count('EXAMPLE'):
            unprivileged_user = '******'

    if not libvirt_version.version_compare(1, 1, 1):
        if params.get('setup_libvirt_polkit') == 'yes':
            test.cancel("API acl test not supported in current"
                        " libvirt version.")
    try:
        if kernel_params:
            update_boot_option_and_reboot(vm, kernel_params, test)
        if start_vm == "yes":
            # start kdump service in the guest
            cmd = "which kdump"
            try:
                run_cmd_in_guest(vm, cmd, test)
            except Exception:
                try:
                    # try to install kexec-tools on fedoraX/rhelx.y guest
                    run_cmd_in_guest(vm, "yum install -y kexec-tools", test)
                except Exception:
                    test.error(
                        "Requires kexec-tools(or the equivalent for your distro)"
                    )

            # enable kdump service in the guest
            cmd = "service kdump start"
            run_cmd_in_guest(vm, cmd, test, timeout=120)

            # filter original 'NMI' information from the /proc/interrupts
            cmd = "grep NMI /proc/interrupts"
            nmi_str = run_cmd_in_guest(vm, cmd, test)

            # filter CPU from the /proc/cpuinfo and count number
            cmd = "grep -E '^process' /proc/cpuinfo | wc -l"
            vcpu_num = run_cmd_in_guest(vm, cmd, test).strip()

            logging.info("Inject NMI to the guest via virsh inject_nmi")
            virsh.inject_nmi(vm_name, debug=True, ignore_status=False)

            logging.info(
                "Inject NMI to the guest via virsh qemu_monitor_command")
            virsh.qemu_monitor_command(vm_name, '{"execute":"inject-nmi"}')

            # injects a Non-Maskable Interrupt into the default CPU (x86/s390)
            # or all CPUs (ppc64), as usual, the default CPU index is 0
            cmd = "grep NMI /proc/interrupts | awk '{print $2}'"
            nmi_from_default_vcpu = run_cmd_in_guest(vm, cmd, test)
            real_nmi_times = nmi_from_default_vcpu.splitlines()[0]
            logging.debug("The current Non-Maskable Interrupts: %s",
                          real_nmi_times)

            # check Non-maskable interrupts times
            if real_nmi_times != expected_nmi_times:
                test.fail("NMI times aren't expected %s:%s" %
                          (real_nmi_times, expected_nmi_times))
    finally:
        if kernel_params:
            cmd = "grubby --update-kernel=`grubby --default-kernel` --remove-args='%s'" % kernel_params
            run_cmd_in_guest(vm, cmd, test)
            vm.reboot()
Пример #15
0
def run(test, params, env):
    """
    Test the virsh pool commands

    (1) Define a given type pool
    (2) List pool with '--inactive --type' options
    (3) Dumpxml for the pool
    (4) Undefine the pool
    (5) Define pool by using the XML file in step (3)
    (6) Build the pool(except 'disk' type pool
        For 'fs' type pool, cover --overwrite and --no-overwrite options
    (7) Start the pool
    (8) List pool with '--persistent --type' options
    (9) Mark pool autostart
    (10) List pool with '--autostart --type' options
    (11) Restart libvirtd and list pool with '--autostart --persistent' options
    (12) Destroy the pool
    (13) Unmark pool autostart
    (14) Repeat step (11)
    (15) Start the pool
    (16) Get pool info
    (17) Get pool uuid by name
    (18) Get pool name by uuid
    (19) Refresh the pool
         For 'dir' type pool, touch a file under target path and refresh again
         to make the new file show in vol-list.
    (20) Check pool 'Capacity', 'Allocation' and 'Available'
         Create a over size vol in pool(expect fail), then check these values
    (21) Undefine the pool, and this should fail as pool is still active
    (22) Destroy the pool
    (23) Delete pool for 'dir' type pool. After the command, the pool object
         will still exist but target path will be deleted
    (24) Undefine the pool
    """

    # Initialize the variables
    pool_name = params.get("pool_name", "temp_pool_1")
    pool_type = params.get("pool_type", "dir")
    pool_target = params.get("pool_target", "")
    source_format = params.get("source_format", "")
    source_name = params.get("pool_source_name", "gluster-vol1")
    source_path = params.get("pool_source_path", "/")
    new_pool_name = params.get("new_pool_name", "")
    build_option = params.get("build_option", "")
    source_initiator = params.get("source_initiator", "")
    same_source_test = "yes" == params.get("same_source_test", "no")
    customize_initiator_iqn = "yes" == params.get("customize_initiator_iqn",
                                                  "no")
    # The file for dumped pool xml
    poolxml = os.path.join(data_dir.get_tmp_dir(), "pool.xml.tmp")
    if os.path.dirname(pool_target) is "":
        pool_target = os.path.join(data_dir.get_tmp_dir(), pool_target)
    vol_name = params.get("volume_name", "temp_vol_1")
    # Use pool name as VG name
    status_error = "yes" == params.get("status_error", "no")
    vol_path = os.path.join(pool_target, vol_name)
    ip_protocal = params.get('ip_protocal', 'ipv4')
    source_protocol_ver = params.get('source_protocol_ver', "no")

    if not libvirt_version.version_compare(1, 0, 0):
        if pool_type == "gluster":
            test.cancel("Gluster pool is not supported in current"
                        " libvirt version.")
    if not libvirt_version.version_compare(4, 7, 0):
        if pool_type == "iscsi-direct":
            test.cancel("iSCSI-direct pool is not supported in current"
                        "libvirt version.")
    if source_initiator and not libvirt_version.version_compare(6, 10, 0):
        test.cancel("Source_initiator option is not supported in current"
                    " libvirt_version.")
    if source_protocol_ver == "yes" and not libvirt_version.version_compare(
            4, 5, 0):
        test.cancel("source-protocol-ver is not supported on current version.")

    def check_pool_list(pool_name, option="--all", expect_error=False):
        """
        Check pool by running pool-list command with given option.

        :param pool_name: Name of the pool
        :param option: option for pool-list command
        :param expect_error: Boolean value, expect command success or fail
        """
        found = False
        # Get the list stored in a variable
        result = virsh.pool_list(option, ignore_status=True)
        utlv.check_exit_status(result, False)
        output = re.findall(r"(\S+)\ +(\S+)\ +(\S+)",
                            str(result.stdout.strip()))
        for item in output:
            if pool_name in item[0]:
                found = True
                break
        if found:
            logging.debug("Find pool '%s' in pool list.", pool_name)
        else:
            logging.debug("Not find pool %s in pool list.", pool_name)
        if expect_error and found:
            test.fail("Unexpected pool '%s' exist." % pool_name)
        if not expect_error and not found:
            test.fail("Expect pool '%s' doesn't exist." % pool_name)

    def check_vol_list(vol_name, pool_name):
        """
        Check volume from the list

        :param vol_name: Name of the volume
        :param pool_name: Name of the pool
        """
        found = False
        # Get the volume list stored in a variable
        result = virsh.vol_list(pool_name, ignore_status=True)
        utlv.check_exit_status(result)

        output = re.findall(r"(\S+)\ +(\S+)", str(result.stdout.strip()))
        for item in output:
            if vol_name in item[0]:
                found = True
                break
        if found:
            logging.debug("Find volume '%s' in pool '%s'.", vol_name,
                          pool_name)
        else:
            test.fail("Not find volume '%s' in pool '%s'." %
                      (vol_name, pool_name))

    def is_in_range(actual, expected, error_percent):
        deviation = 100 - (100 * (float(actual) / float(expected)))
        logging.debug("Deviation: %0.2f%%", float(deviation))
        return float(deviation) <= float(error_percent)

    def check_pool_info(pool_info, check_point, value):
        """
        Check the pool name, uuid, etc.

        :param pool_info: A dict include pool's information
        :param key: Key of pool info dict, available value: Name, UUID, State
                    Persistent, Autostart, Capacity, Allocation, Available
        :param value: Expect value of pool_info[key]
        """
        if pool_info is None:
            test.fail("Pool info dictionary is needed.")
        # If pool info does not return the value you need to check,
        # raise appropriate error message.
        if pool_info.get(check_point) is None:
            test.fail("The value {} you checked is not returned"
                      " in pool_info : {}".format(check_point, pool_info))
        else:
            val_tup = ('Capacity', 'Allocation', 'Available')
            if check_point in val_tup and float(value.split()[0]):
                # As from bytes to GiB, could cause deviation, and it should not
                # exceed 1 percent.
                if is_in_range(float(pool_info[check_point].split()[0]),
                               float(value.split()[0]), 1):
                    logging.debug("Pool '%s' is '%s'.", check_point, value)
                else:
                    test.fail("Pool '%s' isn't '%s'." % (check_point, value))
            else:
                if pool_info[check_point] == value:
                    logging.debug("Pool '%s' is '%s'.", check_point, value)
                else:
                    test.fail("Pool '%s' isn't '%s'." % (check_point, value))

    # Stop multipathd to avoid start pool fail(For fs like pool, the new add
    # disk may in use by device-mapper, so start pool will report disk already
    # mounted error).
    multipathd = service.Factory.create_service("multipathd")
    multipathd_status = multipathd.status()
    if multipathd_status:
        multipathd.stop()

    # Run Testcase
    pvt = utlv.PoolVolumeTest(test, params)
    kwargs = {
        'image_size': '1G',
        'pre_disk_vol': ['100M'],
        'source_name': source_name,
        'source_path': source_path,
        'source_format': source_format,
        'persistent': True,
        'ip_protocal': ip_protocal,
        'emulated_image': "emulated-image",
        'pool_target': pool_target,
        'source_initiator': source_initiator,
        'source_protocol_ver': source_protocol_ver
    }
    params.update(kwargs)

    try:
        _pool = libvirt_storage.StoragePool()
        # Step (1)
        # Pool define
        pvt.pre_pool(**params)

        # Step (2)
        # Pool list
        option = "--inactive --type %s" % pool_type
        check_pool_list(pool_name, option)

        # Step (3)
        # Pool dumpxml
        xml = virsh.pool_dumpxml(pool_name, to_file=poolxml)
        logging.debug("Pool '%s' XML:\n%s", pool_name, xml)

        # Update pool name
        if new_pool_name:
            if "/" in new_pool_name:
                new_pool_name = new_pool_name.replace("/", "\/")
                logging.debug(new_pool_name)
            p_xml = pool_xml.PoolXML.new_from_dumpxml(pool_name)
            p_xml.name = new_pool_name
            del p_xml.uuid
            poolxml = p_xml.xml
            logging.debug("XML after update pool name:\n%s" % p_xml)

        # Update host name
        if same_source_test:
            s_xml = p_xml.get_source()
            s_xml.host_name = "192.168.1.1"
            p_xml.set_source(s_xml)
            poolxml = p_xml.xml
            logging.debug("XML after update host name:\n%s" % p_xml)

        if customize_initiator_iqn:
            initiator_iqn = params.get("initiator_iqn",
                                       "iqn.2018-07.com.virttest:pool.target")
            p_xml = pool_xml.PoolXML.new_from_dumpxml(pool_name)
            s_node = p_xml.xmltreefile.find('/source')
            i_node = ET.SubElement(s_node, 'initiator')
            ET.SubElement(i_node, 'iqn', {'name': initiator_iqn})
            p_xml.xmltreefile.write()
            poolxml = p_xml.xml
            logging.debug('XML after add Multi-IQN:\n%s' % p_xml)

        # Step (4)
        # Undefine pool
        if not same_source_test:
            result = virsh.pool_undefine(pool_name)
            utlv.check_exit_status(result)
            check_pool_list(pool_name, "--all", True)

        # Step (5)
        # Define pool from XML file
        result = virsh.pool_define(poolxml, debug=True)
        # Give error msg when exit status is not expected
        if "/" in new_pool_name and not result.exit_status:
            error_msg = "https://bugzilla.redhat.com/show_bug.cgi?id=639923 "
            error_msg += "is helpful for tracing this bug."
            logging.error(error_msg)
        if "." in new_pool_name and result.exit_status:
            error_msg = "https://bugzilla.redhat.com/show_bug.cgi?id=1333248 "
            error_msg += "is helpful for tracing this bug."
            logging.error(error_msg)
        if same_source_test and not result.exit_status:
            error_msg = "https://bugzilla.redhat.com/show_bug.cgi?id=1171984 "
            error_msg += "is helpful for tracing this bug."
            logging.error(error_msg)
        utlv.check_exit_status(result, status_error)
        if not result.exit_status:
            # Step (6)
            # Build pool
            # '--overwrite/--no-overwrite' just for fs/disk/logiacl type pool
            # disk/fs pool: as prepare step already make label and create filesystem
            #               for the disk, use '--overwrite' is necessary
            # logical_pool: build pool will fail if VG already exist, BZ#1373711
            if new_pool_name:
                pool_name = new_pool_name
            if pool_type != "logical":
                result = virsh.pool_build(pool_name,
                                          build_option,
                                          ignore_status=True)
                utlv.check_exit_status(result)

            # Step (7)
            # Pool start
            result = virsh.pool_start(pool_name,
                                      debug=True,
                                      ignore_status=True)
            utlv.check_exit_status(result)

            # Step (8)
            # Pool list
            option = "--persistent --type %s" % pool_type
            check_pool_list(pool_name, option)

            # Step (9)
            # Pool autostart
            result = virsh.pool_autostart(pool_name, ignore_status=True)
            utlv.check_exit_status(result)

            # Step (10)
            # Pool list
            option = "--autostart --type %s" % pool_type
            check_pool_list(pool_name, option)

            # Step (11)
            # Restart libvirtd and check the autostart pool
            utils_libvirtd.unmark_storage_autostarted()
            utils_libvirtd.Libvirtd("virtstoraged").restart()
            option = "--autostart --persistent"
            check_pool_list(pool_name, option)

            # Step (12)
            # Pool destroy
            if virsh.pool_destroy(pool_name):
                logging.debug("Pool %s destroyed.", pool_name)
            else:
                test.fail("Destroy pool % failed." % pool_name)

            # Step (13)
            # Pool autostart disable
            result = virsh.pool_autostart(pool_name,
                                          "--disable",
                                          ignore_status=True)
            utlv.check_exit_status(result)

            # Step (14)
            # Repeat step (11)
            utils_libvirtd.Libvirtd("virtstoraged").restart()
            option = "--autostart"
            check_pool_list(pool_name, option, True)

            # Step (15)
            # Pool start
            # When libvirtd starts up, it'll check to see if any of the storage
            # pools have been activated externally. If so, then it'll mark the
            # pool as active. This is independent of autostart.
            # So a directory based storage pool is thus pretty much always active,
            # and so as the SCSI pool.
            if pool_type not in ['dir', 'scsi']:
                if pool_type == 'disk' and libvirt_version.version_compare(
                        8, 1, 0):
                    utlv.check_exit_status(result)
                else:
                    result = virsh.pool_start(pool_name, ignore_status=True)
                    utlv.check_exit_status(result)

            # Step (16)
            # Pool info
            pool_info = _pool.pool_info(pool_name)
            logging.debug("Pool '%s' info:\n%s", pool_name, pool_info)

            # Step (17)
            # Pool UUID
            result = virsh.pool_uuid(pool_info["Name"], ignore_status=True)
            utlv.check_exit_status(result)
            check_pool_info(pool_info, "UUID", result.stdout.strip())

            # Step (18)
            # Pool Name
            result = virsh.pool_name(pool_info["UUID"], ignore_status=True)
            utlv.check_exit_status(result)
            check_pool_info(pool_info, "Name", result.stdout.strip())

            # Step (19)
            # Pool refresh for 'dir' type pool
            if pool_type == "dir":
                os.mknod(vol_path)
                result = virsh.pool_refresh(pool_name)
                utlv.check_exit_status(result)
                check_vol_list(vol_name, pool_name)

            # Step (20)
            # Create an over size vol in pool(expect fail), then check pool:
            # 'Capacity', 'Allocation' and 'Available'
            # For NFS type pool, there's a bug(BZ#1077068) about allocate volume,
            # and glusterfs pool not support create volume, so not test them
            if pool_type != "netfs":
                vol_capacity = "10000G"
                vol_allocation = "10000G"
                result = virsh.vol_create_as("oversize_vol", pool_name,
                                             vol_capacity, vol_allocation,
                                             "raw")
                utlv.check_exit_status(result, True)
                new_info = _pool.pool_info(pool_name)
                check_items = ["Capacity", "Allocation", "Available"]
                for i in check_items:
                    if new_info.get(i) is None:
                        test.fail("The value {} you checked is not returned "
                                  "in pool_info : {}".format(i, new_info))
                    else:
                        check_pool_info(pool_info, i, new_info[i])

            # Step (21)
            # Undefine pool, this should fail as the pool is active
            result = virsh.pool_undefine(pool_name, ignore_status=True)
            utlv.check_exit_status(result, expect_error=True)
            check_pool_list(pool_name, "", False)

            # Step (22)
            # Pool destroy
            if virsh.pool_destroy(pool_name):
                logging.debug("Pool %s destroyed.", pool_name)
            else:
                test.fail("Destroy pool % failed." % pool_name)

            # Step (23)
            # Pool delete for 'dir' type pool
            if pool_type == "dir":
                for f in os.listdir(pool_target):
                    os.remove(os.path.join(pool_target, f))
                    result = virsh.pool_delete(pool_name, ignore_status=True)
                    utlv.check_exit_status(result)
                    option = "--inactive --type %s" % pool_type
                    check_pool_list(pool_name, option)
                    if os.path.exists(pool_target):
                        test.fail("The target path '%s' still exist." %
                                  pool_target)
                        result = virsh.pool_start(pool_name,
                                                  ignore_status=True)
                        utlv.check_exit_status(result, True)

            # Step (24)
            # Pool undefine
                result = virsh.pool_undefine(pool_name, ignore_status=True)
                utlv.check_exit_status(result)
                check_pool_list(pool_name, "--all", True)
    finally:
        # Clean up
        try:
            pvt.cleanup_pool(**params)
            utlv.setup_or_cleanup_iscsi(False)
        except exceptions.TestFail as detail:
            logging.error(str(detail))
        if multipathd_status:
            multipathd.start()
        if os.path.exists(poolxml):
            os.remove(poolxml)
Пример #16
0
def run(test, params, env):
    """
    Test virsh {at|de}tach-device command.

    1) Prepare test environment and its parameters
    2) Operate virsh on one or more devices
    3) Check functionality of each device
    4) Check functionality of mmconfig option
    5) Restore domain
    6) Handle results
    """
    vm_name = params.get('main_vm')
    machine_type = params.get("machine_type", "pc")
    backup_vm_xml = vmxml = VMXML.new_from_inactive_dumpxml(vm_name)

    dev_obj = params.get("vadu_dev_objs")
    vadu_vdb = int(params.get("vadu_dev_obj_count_VirtualDiskBasic", "0"))
    vadu_dom_ref = params.get("vadu_dom_ref", "dom_ref")
    status_error = "yes" == params.get("status_error", "no")
    vadu_domain_positional = "yes" == params.get("vadu_domain_positional", "no")
    vadu_file_positional = "yes" == params.get("vadu_file_positional", "no")
    vadu_preboot_error = "yes" == params.get("vadu_preboot_function_error", "no")

    # Skip chardev hotplug on rhel6 host as it is not supported
    if "Serial" in dev_obj:
        if not libvirt_version.version_compare(1, 1, 0):
            test.cancel("You libvirt version not supported"
                        " attach/detach Serial devices")
    # Prepare test environment and its parameters
    test_params = TestParams(params, env, test)

    # Reset pci controllers num to fix error "No more available PCI slots"
    if params.get("reset_pci_controllers_nums", "no") == "yes" and "VirtualDiskBasic" in dev_obj:
        # Only apply change on some cases with feature:
        # block.multi_virtio_file..hot_attach_hot_vm..name_ref.file_positional.domain_positional
        # block.multi_virtio_file..hot_attach_hot_vm_current.name_ref.file_positional.domain_positional
        # Those cases often fialed on aarch64 due to error "No more available PCI slots"
        if vadu_vdb == 16 and not status_error \
            and not vadu_preboot_error and 'name' in vadu_dom_ref \
                and vadu_file_positional and vadu_domain_positional:

            previous_state_running = test_params.main_vm.is_alive()
            if previous_state_running:
                test_params.main_vm.destroy(gracefully=True)
            libvirt_pcicontr.reset_pci_num(vm_name, 24)
            logging.debug(
                    "Guest XML with many controllers added: %s",
                    test_params.main_vm.get_xml())
            if previous_state_running:
                test_params.main_vm.start()

    remove_non_disks(vm_name, vmxml)
    update_controllers_ppc(vm_name, vmxml)

    if params.get("remove_all_chardev", "no") == "yes":
        remove_chardevs(vm_name, vmxml)

    logging.info("Preparing initial VM state")

    if test_params.start_vm:
        # Make sure VM is working
        test_params.main_vm.verify_alive()
        test_params.main_vm.wait_for_login().close()
    else:  # VM not suppose to be started
        if test_params.main_vm.is_alive():
            test_params.main_vm.destroy(gracefully=True)
    # Capture backup of original XML early in test
    test_params.vmxml = VMXML.new_from_inactive_dumpxml(
        test_params.main_vm.name)
    # All devices should share same access state
    test_params.virsh = virsh.Virsh(ignore_status=True)
    logging.info("Creating %d test device instances", len(test_params.devs))
    # Create test objects from cfg. class names via subclasses above
    test_devices = [globals()[class_name](test_params, test)  # instantiate
                    for class_name in test_params.devs]  # vadu_dev_objs
    operational_results = []
    preboot_results = []
    pstboot_results = []
    try:
        operational_action(test_params, test_devices, operational_results)
        # Fail early if attach-device return value is not expected
        analyze_results(test_params, test,
                        operational_results=operational_results)

        #  Can't do functional testing with a cold VM, only test hot-attach
        preboot_action(test_params, test_devices, preboot_results)

        logging.info("Preparing test VM state for post-boot functional testing")
        if test_params.start_vm:
            # Hard-reboot required
            test_params.main_vm.destroy(gracefully=True,
                                        free_mac_addresses=False)
        try:
            logging.debug("vmxml %s", VMXML.new_from_inactive_dumpxml(vm_name))
            test_params.main_vm.start()
        except virt_vm.VMStartError as details:
            test.fail('VM Failed to start for some reason!: %s' % details)
        # Signal devices reboot is finished
        for test_device in test_devices:
            test_device.booted = True
        logging.debug("Current VMXML %s", test_params.main_vm.get_xml())
        test_params.main_vm.wait_for_login().close()
        postboot_action(test_params, test_devices, pstboot_results)
        analyze_results(test_params, test,
                        preboot_results=preboot_results,
                        pstboot_results=pstboot_results)
    finally:
        logging.info("Restoring VM from backup, then checking results")
        test_params.main_vm.destroy(gracefully=False,
                                    free_mac_addresses=False)
        test_params.vmxml.undefine()
        test_params.vmxml.restore()  # Recover the original XML
        test_params.vmxml.define()
        if not test_params.start_vm:
            # Test began with not start_vm, shut it down.
            test_params.main_vm.destroy(gracefully=True)
        # Device cleanup can raise multiple exceptions, do it last:
        logging.info("Cleaning up test devices")
        try:
            test_params.cleanup(test_devices)
        except RuntimeError as e:
            logging.debug("Error cleaning up devices: %s", e)
        backup_vm_xml.sync()
Пример #17
0
def run(test, params, env):
    """
    Test start domain with nwfilter rules.

    1) Prepare parameters.
    2) Prepare nwfilter rule and update domain interface to apply.
    3) Start domain and check rule.
    4) Clean env
    """
    # Prepare parameters
    filter_name = params.get("filter_name", "testcase")
    exist_filter = params.get("exist_filter", "no-mac-spoofing")
    status_error = "yes" == params.get("status_error", "no")
    mount_noexec_tmp = "yes" == params.get("mount_noexec_tmp", "no")
    kill_libvirtd = "yes" == params.get("kill_libvirtd", "no")
    bug_url = params.get("bug_url", "")
    ipset_command = params.get("ipset_command")
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    username = params.get("username")
    password = params.get("password")
    need_vm2 = "yes" == params.get("need_vm2", "no")
    add_vm_name = params.get("add_vm_name", "vm2")
    vms = [vm]
    dst_outside = params.get("dst_outside", "www.google.com")
    ping_timeout = int(params.get("ping_timeout", "10"))

    # Prepare vm filterref parameters dict list
    filter_param_list = []
    params_key = []
    for i in params.keys():
        if 'parameter_name_' in i:
            params_key.append(i)
    params_key.sort()
    for i in range(len(params_key)):
        params_dict = {}
        params_dict['name'] = params[params_key[i]]
        params_dict['value'] = params['parameter_value_%s' % i]
        if params_dict['value'] == "MAC_of_virbr0":
            virbr0_info = process.run("ip a | grep virbr0: -A1",
                                      shell=True).stdout_text.strip()
            virbr0_mac = re.search(
                r'link/ether\s+(\w{2}:\w{2}:\w{2}:\w{2}:\w{2}:\w{2})',
                virbr0_info, re.M | re.I).group(1)
            params_dict['value'] = virbr0_mac
            logging.debug("params_dict['value'] is %s " % params_dict['value'])
        filter_param_list.append(params_dict)
    filterref_dict = {}
    filterref_dict['name'] = filter_name
    filterref_dict['parameters'] = filter_param_list
    params['filter_uuid'] = process.run("uuidgen",
                                        ignore_status=True,
                                        shell=True).stdout_text.strip()

    # get all the check commands and corresponding expected results form config file and make a dictionary
    cmd_list_ = params.get('check_cmd', '')
    if cmd_list_:
        cmd_list = cmd_list_.split(',')
        expect_res = params.get('expect_match', '').split(',')
        logging.debug("cmd_list is %s" % cmd_list)
        logging.debug("expect_res is %s" % expect_res)
        cmd_result_dict = dict(zip(cmd_list, expect_res))
        logging.debug("the check dict is %s" % cmd_result_dict)
    # backup vm xml
    vmxml_backup = libvirt_xml.VMXML.new_from_inactive_dumpxml(vm_name)

    libvirtd = utils_libvirtd.Libvirtd("virtqemud")
    device_name = None

    def check_nwfilter_rules(check_cmd, expect_match):
        """"check the nwfilter corresponding rule is added by iptables commands"""
        ret = utils_misc.wait_for(lambda: not process.system(
            check_cmd, ignore_status=True, shell=True),
                                  timeout=30)
        if not ret:
            test.fail("Rum command '%s' failed" % check_cmd)
        # This change anchors nwfilter_vm_start.possitive_test.new_filter.variable_notation case
        # The matched destination could be ip address or hostname
        if "iptables -L" in check_cmd and expect_match and 'ACCEPT' in expect_match:
            # ip address that need to be replaced
            replace_param = params.get("parameter_value_2")
            # Get hostname by ip address.
            hostname_info = None
            try:
                hostname_info = socket.gethostbyaddr(replace_param)
            except socket.error as e:
                logging.info(
                    "Failed to get hostname from ip address with error: %s",
                    str(e))
            if hostname_info:
                # String is used to replace ip address
                replace_with = "%s|%s" % (replace_param, hostname_info[0])
                expect_match = r"%s" % expect_match.replace(
                    replace_param, replace_with)
                logging.debug("final iptables match string:%s", expect_match)
        out = astring.to_text(
            process.system_output(check_cmd, ignore_status=False, shell=True))
        if expect_match and not re.search(expect_match, out):
            test.fail("'%s' not found in output: %s" % (expect_match, out))

    def clean_up_dirty_nwfilter_binding():
        cmd_result = virsh.nwfilter_binding_list(debug=True)
        binding_list = cmd_result.stdout_text.strip().splitlines()
        binding_list = binding_list[2:]
        result = []
        # If binding list is not empty.
        if binding_list:
            for line in binding_list:
                # Split on whitespace, assume 1 column
                linesplit = line.split(None, 1)
                result.append(linesplit[0])
        logging.info("nwfilter binding list is: %s", result)
        for binding_uuid in result:
            try:
                virsh.nwfilter_binding_delete(binding_uuid)
            except Exception as e:
                logging.error(
                    "Exception thrown while undefining nwfilter-binding: %s",
                    str(e))
                raise

    try:
        # Clean up dirty nwfilter binding if there are.
        clean_up_dirty_nwfilter_binding()
        rule = params.get("rule")
        if rule:
            # Create new filter xml
            filterxml = utlv.create_nwfilter_xml(params)
            # Define filter xml
            virsh.nwfilter_define(filterxml.xml, debug=True)

        # Update first vm interface with filter
        vmxml = libvirt_xml.VMXML.new_from_inactive_dumpxml(vm_name)
        iface_xml = vmxml.get_devices('interface')[0]
        vmxml.del_device(iface_xml)
        new_iface = interface.Interface('network')
        new_iface.xml = iface_xml.xml
        new_filterref = new_iface.new_filterref(**filterref_dict)
        new_iface.filterref = new_filterref
        logging.debug("new interface xml is: %s" % new_iface)
        vmxml.add_device(new_iface)
        vmxml.sync()

        if mount_noexec_tmp:
            device_name = utlv.setup_or_cleanup_iscsi(is_setup=True)
            utlv.mkfs(device_name, 'ext4')
            cmd = "mount %s /tmp -o noexec,nosuid" % device_name
            process.run(cmd, shell=True)

        if ipset_command:
            pkg = "ipset"
            if not utils_package.package_install(pkg):
                test.cancel("Can't install ipset on host")
            process.run(ipset_command, shell=True)

        # Run command
        try:
            vm.start()
            if not mount_noexec_tmp:
                vm.wait_for_serial_login(username=username, password=password)
            vmxml = libvirt_xml.VMXML.new_from_dumpxml(vm_name)
            iface_xml = vmxml.get_devices('interface')[0]
            iface_target = iface_xml.target['dev']
            iface_mac = iface_xml.mac_address
            logging.debug("iface target dev name is %s", iface_target)

            # Check iptables or ebtables on host
            if need_vm2:
                # Clone more vm for testing
                result = virsh.dom_list('--inactive').stdout_text
                if add_vm_name in result:
                    logging.debug("%s is already exists!" % add_vm_name)
                    vms.append(env.get_vm(add_vm_name))
                else:
                    vm.destroy()
                    ret_clone = utils_libguestfs.virt_clone_cmd(vm_name,
                                                                add_vm_name,
                                                                True,
                                                                timeout=360)
                    if ret_clone.exit_status:
                        test.fail("Error when clone a second vm!")
                    vms.append(vm.clone(add_vm_name))
                    vm.start()
                vm2 = vms[1]
                logging.debug("Now the vms is: %s", [dom.name for dom in vms])
                # update the vm2 interface with the nwfilter
                logging.debug("filter_params_list is %s" % filter_param_list)
                iface_dict = {
                    "filter": filter_name,
                    "filter_parameters": filter_param_list,
                    "del_mac": "yes"
                }
                if vm2.is_alive():
                    vm2.destroy()
                utlv.modify_vm_iface(vm2.name, "update_iface", iface_dict)
                vmxml = libvirt_xml.VMXML.new_from_inactive_dumpxml(vm2.name)
                iface_xml = vmxml.get_devices('interface')[0]
                logging.debug("iface_xml for vm2 is %s" % iface_xml)
                vm2.start()
                vm2_session = vm2.wait_for_serial_login()
                vm2_mac = vm2.get_mac_address()
                vm2_ip = utils_net.get_guest_ip_addr(vm2_session, vm2_mac)
                vm.session = vm.wait_for_serial_login()
                # test network functions, the 2 vms can not access to each other
                gateway_ip = utils_net.get_ip_address_by_interface("virbr0")
                status1, output1 = utils_net.ping(dest=vm2_ip,
                                                  count='3',
                                                  timeout=ping_timeout,
                                                  session=vm.session,
                                                  force_ipv4=True)
                status2, output2 = utils_net.ping(dest=gateway_ip,
                                                  count='3',
                                                  timeout=ping_timeout,
                                                  session=vm.session,
                                                  force_ipv4=True)
                status3, output3 = utils_net.ping(dest=dst_outside,
                                                  count='3',
                                                  timeout=ping_timeout,
                                                  session=vm.session,
                                                  force_ipv4=True)
                if not status1:
                    test.fail(
                        "vm with clean-traffic-gateway ping succeed to %s %s, but it is not expected!"
                        % (vm2.name, vm2_ip))
                if status2 or status3:
                    test.fail("vm ping failed! check %s \n %s" %
                              (output2, output3))
            if cmd_list_:
                loop = 0
                for check_cmd_, expect_match_ in cmd_result_dict.items():
                    check_cmd = check_cmd_.strip()
                    expect_match = expect_match_.strip()
                    if "DEVNAME" in check_cmd:
                        check_cmd = check_cmd.replace("DEVNAME", iface_target)
                    if "VMMAC" in expect_match:
                        expect_match = expect_match.replace("VMMAC", iface_mac)
                    logging.debug(
                        "the check_cmd is %s, and expected result is %s" %
                        (check_cmd, expect_match))
                    check_nwfilter_rules(check_cmd, expect_match)
                    loop += 1
        except virt_vm.VMStartError as e:
            # Starting VM failed.
            if not status_error:
                test.fail("Test failed in positive case.\n error:"
                          " %s\n%s" % (e, bug_url))

        if kill_libvirtd:
            daemon_name = libvirtd.service_name
            pid = process.run('pidof %s' % daemon_name,
                              shell=True).stdout_text.strip()
            cmd = "kill -s TERM %s" % pid
            process.run(cmd, shell=True)
            ret = utils_misc.wait_for(lambda: not libvirtd.is_running(),
                                      timeout=30)
            # After libvirt 5.6.0, libvirtd is using systemd socket activation by default
            if not ret and not libvirt_version.version_compare(5, 6, 0):
                test.fail("Failed to kill libvirtd. %s" % bug_url)

    finally:
        if kill_libvirtd:
            libvirtd.restart()
        # Clean env
        if vm.is_alive():
            vm.destroy(gracefully=False)
        # Recover xml of vm.
        vmxml_backup.sync()
        # Undefine created filter except clean-traffic as it is built-in nwfilter
        if filter_name != exist_filter and filter_name != 'clean-traffic':
            virsh.nwfilter_undefine(filter_name, debug=True)
        if mount_noexec_tmp:
            if device_name:
                process.run("umount -l %s" % device_name,
                            ignore_status=True,
                            shell=True)
            utlv.setup_or_cleanup_iscsi(is_setup=False)
        if ipset_command:
            process.run("ipset destroy blacklist", shell=True)
        # Remove additional vms
        if need_vm2:
            result = virsh.dom_list("--all").stdout_text
            if add_vm_name in result:
                virsh.remove_domain(add_vm_name, "--remove-all-storage")
Пример #18
0
    def cleanup(self):
        """
        Cleanup env
        """
        LOG.info("Start to clean up env")

        undef_opts = "--managed-save --snapshots-metadata"
        if libvirt_version.version_compare(7, 5, 0):
            undef_opts += " --checkpoints-metadata"

        # Destroy vms on src host(if vm is migrated back)\
        # or dest host(if vm is not migrated back)
        LOG.info("Remove vms on src or dest host")
        for vm in self.vms:
            try:
                vm.remove(undef_opts=undef_opts)
            except Exception as detail:
                LOG.warning("Failed to remove vm %s, detail: %s",
                            vm.name, detail)
                continue
            LOG.debug("Vm %s is removed", vm.name)

        # Need to undefine vms on src host(if vm is not migrated back)
        LOG.info("Undefine vms on src host")
        for backup in self.vm_xml_backup:
            try:
                backup.undefine(options=undef_opts)
            except Exception as detail:
                LOG.warning("Failed to undefine vm %s, detail: %s",
                            backup.vm_name, detail)
                continue
            LOG.debug("Vm %s is undefined", backup.vm_name)

        # Recover vm definition on src host
        LOG.info("Recover vm definition on source")
        for backup in self.vm_xml_backup:
            try:
                backup.define()
            except Exception as detail:
                LOG.warning("Failed to define vm %s, detail: %s",
                            backup.vm_name, detail)
                continue
            LOG.debug("Vm %s is restored", backup.vm_name)

        # Clean up ssh, tcp, tls test env
        if self.objs_list and len(self.objs_list) > 0:
            LOG.debug("Clean up test env: ssh, tcp, tls, etc")
            self.objs_list.reverse()
            for obj in self.objs_list:
                obj.__del__()

        # Cleanup migrate_pre_setup
        LOG.debug("Clean up migration setup on dest host")
        self.obj_migration.migrate_pre_setup(self.dest_uri, self.params,
                                             cleanup=True)
        if self.migrate_vm_back == 'yes':
            LOG.debug("Clean up migration setup on src host")
            self.obj_migration.migrate_pre_setup(self.src_uri, self.params,
                                                 cleanup=True)

        # Restore conf files
        LOG.debug("Restore conf files")
        for conf_obj in self.local_conf_objs:
            conf_obj.restore()
        for conf_obj in self.remote_conf_objs:
            del conf_obj

        # Disable opened ports in firewalld
        for port in self.opened_ports_local:
            LOG.debug("Disable port %s in firewalld on local host", port)
            self.open_port_in_iptables(port, cleanup=True)
        for port in self.opened_ports_remote:
            LOG.debug("Disable port %s in firewalld on remote host", port)
            self.open_port_in_iptables(port,
                                       server_dict=self.remote_dict,
                                       session=self.remote_session,
                                       cleanup=True)
Пример #19
0
def run(test, params, env):
    """
    Test interface with unprivileged user
    """
    def create_bridge(br_name, iface_name):
        """
        Create bridge attached to physical interface
        """
        # Make sure the bridge not exist
        if libvirt.check_iface(br_name, "exists", "--all"):
            test.cancel("The bridge %s already exist" % br_name)

        # Create bridge
        utils_package.package_install('tmux')
        cmd = 'tmux -c "ip link add name {0} type bridge; ip link set {1} up;' \
              ' ip link set {1} master {0}; ip link set {0} up; pkill dhclient; ' \
              'sleep 6; dhclient {0}; ifconfig {1} 0"'.format(br_name, iface_name)
        process.run(cmd, shell=True, verbose=True)

    def check_ping(dest_ip,
                   ping_count,
                   timeout,
                   src_ip=None,
                   session=None,
                   expect_success=True):
        """
        Check if ping result meets expectaion
        """
        status, output = utils_net.ping(dest=dest_ip,
                                        count=ping_count,
                                        interface=src_ip,
                                        timeout=timeout,
                                        session=session,
                                        force_ipv4=True)
        success = True if status == 0 else False

        if success != expect_success:
            test.fail('Ping result not met expectation, '
                      'actual result is {}'.format(success))

    if not libvirt_version.version_compare(5, 6, 0):
        test.cancel('Libvirt version is too low for this test.')

    vm_name = params.get('main_vm')
    rand_id = '_' + utils_misc.generate_random_string(3)

    upu_vm_name = 'upu_vm' + rand_id
    user_vm_name = params.get('user_vm_name', 'non_root_vm')
    bridge_name = params.get('bridge_name', 'test_br0') + rand_id
    device_type = params.get('device_type', '')
    iface_name = utils_net.get_net_if(state="UP")[0]
    tap_name = params.get('tap_name', 'mytap0') + rand_id
    macvtap_name = params.get('macvtap_name', 'mymacvtap0') + rand_id
    remote_ip = params.get('remote_ip')
    up_user = params.get('up_user', 'test_upu') + rand_id
    case = params.get('case', '')

    # Create unprivileged user
    logging.info('Create unprivileged user %s', up_user)
    process.run('useradd %s' % up_user, shell=True, verbose=True)
    root_vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)

    upu_args = {
        'unprivileged_user': up_user,
        'ignore_status': False,
        'debug': True,
    }

    try:
        # Create vm as unprivileged user
        logging.info('Create vm as unprivileged user')
        upu_vmxml = root_vmxml.copy()

        # Prepare vm for unprivileged user
        xml_devices = upu_vmxml.devices
        disks = xml_devices.by_device_tag("disk")
        for disk in disks:
            ori_path = disk.source['attrs'].get('file')
            if not ori_path:
                continue

            file_name = ori_path.split('/')[-1]
            new_disk_path = '/home/{}/{}'.format(up_user, file_name)
            logging.debug('New disk path:{}'.format(new_disk_path))

            # Copy disk image file and chown to make sure that
            # unprivileged user has access
            shutil.copyfile(ori_path, new_disk_path)
            shutil.chown(new_disk_path, up_user, up_user)

            # Modify xml to set new path of disk
            disk_index = xml_devices.index(disk)
            source = xml_devices[disk_index].source
            new_attrs = source.attrs
            new_attrs['file'] = new_disk_path
            source.attrs = new_attrs
            xml_devices[disk_index].source = source
            logging.debug(xml_devices[disk_index].source)

        upu_vmxml.devices = xml_devices

        new_xml_path = '/home/{}/upu.xml'.format(up_user)
        shutil.copyfile(upu_vmxml.xml, new_xml_path)

        # Define vm for unprivileged user
        virsh.define(new_xml_path, **upu_args)
        virsh.domrename(vm_name, upu_vm_name, **upu_args)
        logging.debug(virsh.dumpxml(upu_vm_name, **upu_args))
        upu_vmxml = vm_xml.VMXML()
        upu_vmxml.xml = virsh.dumpxml(upu_vm_name, **upu_args).stdout_text

        if case == 'precreated':
            if device_type == 'tap':
                # Create bridge
                create_bridge(bridge_name, iface_name)

                # Create tap device
                tap_cmd = 'ip tuntap add mode tap user {user} group {user} ' \
                          'name {tap};ip link set {tap} up;ip link set {tap} ' \
                          'master {br}'.format(tap=tap_name, user=up_user,
                                               br=bridge_name)

                # Execute command as root
                process.run(tap_cmd, shell=True, verbose=True)

            if device_type == 'macvtap':
                # Create macvtap device
                mac_addr = utils_net.generate_mac_address_simple()
                macvtap_cmd = 'ip link add link {iface} name {macvtap} address' \
                              ' {mac} type macvtap mode bridge;' \
                              'ip link set {macvtap} up'.format(
                               iface=iface_name,
                               macvtap=macvtap_name,
                               mac=mac_addr)
                process.run(macvtap_cmd, shell=True, verbose=True)
                cmd_get_tap = 'ip link show {} | head -1 | cut -d: -f1'.format(
                    macvtap_name)
                tap_index = process.run(cmd_get_tap, shell=True,
                                        verbose=True).stdout_text.strip()
                device_path = '/dev/tap{}'.format(tap_index)
                logging.debug('device_path: {}'.format(device_path))
                # Change owner and group for device
                process.run('chown {user} {path};chgrp {user} {path}'.format(
                    user=up_user, path=device_path),
                            shell=True,
                            verbose=True)
                # Check if device owner is changed to unprivileged user
                process.run('ls -l %s' % device_path, shell=True, verbose=True)

            # Modify interface
            all_devices = upu_vmxml.devices
            iface_list = all_devices.by_device_tag('interface')
            if not iface_list:
                test.error('No iface to modify')
            iface = iface_list[0]

            # Remove other interfaces
            for ifc in iface_list[1:]:
                all_devices.remove(ifc)

            if device_type == 'tap':
                dev_name = tap_name
            elif device_type == 'macvtap':
                dev_name = macvtap_name
            else:
                test.error('Invalid device type: {}'.format(device_type))

            if_index = all_devices.index(iface)
            iface = all_devices[if_index]
            iface.type_name = 'ethernet'
            iface.target = {'dev': dev_name, 'managed': 'no'}

            if device_type == 'macvtap':
                iface.mac_address = mac_addr
            logging.debug(iface)

            upu_vmxml.devices = all_devices
            logging.debug(upu_vmxml)

            # Define updated xml
            shutil.copyfile(upu_vmxml.xml, new_xml_path)
            upu_vmxml.xml = new_xml_path
            virsh.define(new_xml_path, **upu_args)

            # Switch to unprivileged user and modify vm's interface
            # Start vm as unprivileged user and test network
            virsh.start(upu_vm_name,
                        debug=True,
                        ignore_status=False,
                        unprivileged_user=up_user)
            cmd = ("su - %s -c 'virsh console %s'" % (up_user, upu_vm_name))
            session = aexpect.ShellSession(cmd)
            session.sendline()
            remote.handle_prompts(session, params.get("username"),
                                  params.get("password"), r"[\#\$]\s*$", 30)
            logging.debug(session.cmd_output('ifconfig'))
            check_ping(remote_ip, 5, 10, session=session)
            session.close()

    finally:
        if 'upu_virsh' in locals():
            virsh.destroy(upu_vm_name, unprivileged_user=up_user)
            virsh.undefine(upu_vm_name, unprivileged_user=up_user)
        if case == 'precreated':
            try:
                if device_type == 'tap':
                    process.run('ip tuntap del mode tap {}'.format(tap_name),
                                shell=True,
                                verbose=True)
                elif device_type == 'macvtap':
                    process.run('ip l del {}'.format(macvtap_name),
                                shell=True,
                                verbose=True)
            except Exception:
                pass
            finally:
                cmd = 'tmux -c "ip link set {1} nomaster;  ip link delete {0};' \
                      'pkill dhclient; sleep 6; dhclient {1}"'.format(bridge_name, iface_name)
                process.run(cmd, shell=True, verbose=True, ignore_status=True)
        process.run('pkill -u {0};userdel -f -r {0}'.format(up_user),
                    shell=True,
                    verbose=True,
                    ignore_status=True)
Пример #20
0
def run(test, params, env):
    """
    Test svirt in adding disk to VM.

    (1).Init variables for test.
    (2).Config qemu conf if need
    (3).Label the VM and disk with proper label.
    (4).Start VM and check the context.
    (5).Destroy VM and check the context.
    """
    # Get general variables.
    status_error = ('yes' == params.get("status_error", 'no'))
    host_sestatus = params.get("svirt_start_destroy_host_selinux", "enforcing")
    # Get variables about seclabel for VM.
    sec_type = params.get("svirt_start_destroy_vm_sec_type", "dynamic")
    sec_model = params.get("svirt_start_destroy_vm_sec_model", "selinux")
    sec_label = params.get("svirt_start_destroy_vm_sec_label", None)
    sec_baselabel = params.get("svirt_start_destroy_vm_sec_baselabel", None)
    security_driver = params.get("security_driver", None)
    security_default_confined = params.get("security_default_confined", None)
    security_require_confined = params.get("security_require_confined", None)
    no_sec_model = 'yes' == params.get("no_sec_model", 'no')
    xattr_check = 'yes' == params.get("xattr_check", 'no')
    sec_relabel = params.get("svirt_start_destroy_vm_sec_relabel", "yes")
    sec_dict = {'type': sec_type, 'relabel': sec_relabel}
    sec_dict_list = []

    def _set_sec_model(model):
        """
        Set sec_dict_list base on given sec model type
        """
        sec_dict_copy = sec_dict.copy()
        sec_dict_copy['model'] = model
        if sec_type != "none":
            if sec_type == "dynamic" and sec_baselabel:
                sec_dict_copy['baselabel'] = sec_baselabel
            else:
                sec_dict_copy['label'] = sec_label
        sec_dict_list.append(sec_dict_copy)

    if not no_sec_model:
        if "," in sec_model:
            sec_models = sec_model.split(",")
            for model in sec_models:
                _set_sec_model(model)
        else:
            _set_sec_model(sec_model)
    else:
        sec_dict_list.append(sec_dict)

    logging.debug("sec_dict_list is: %s" % sec_dict_list)
    poweroff_with_destroy = ("destroy" == params.get(
        "svirt_start_destroy_vm_poweroff", "destroy"))
    # Get variables about VM and get a VM object and VMXML instance.
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    vmxml = VMXML.new_from_inactive_dumpxml(vm_name)
    backup_xml = vmxml.copy()

    # Get variables about image.
    img_label = params.get('svirt_start_destroy_disk_label')
    # Backup disk Labels.
    disks = vm.get_disk_devices()
    backup_labels_of_disks = {}
    backup_ownership_of_disks = {}
    for disk in list(disks.values()):
        disk_path = disk['source']
        backup_labels_of_disks[disk_path] = utils_selinux.get_context_of_file(
            filename=disk_path)
        stat_re = os.stat(disk_path)
        backup_ownership_of_disks[disk_path] = "%s:%s" % (stat_re.st_uid,
                                                          stat_re.st_gid)
    # Backup selinux of host.
    backup_sestatus = utils_selinux.get_status()

    qemu_conf = utils_config.LibvirtQemuConfig()
    libvirtd = utils_libvirtd.Libvirtd()

    def _resolve_label(label_string):
        labels = label_string.split(":")
        label_type = labels[2]
        if len(labels) == 4:
            label_range = labels[3]
        elif len(labels) > 4:
            label_range = "%s:%s" % (labels[3], labels[4])
        else:
            label_range = None
        return (label_type, label_range)

    def _check_label_equal(label1, label2):
        label1s = label1.split(":")
        label2s = label2.split(":")
        for i in range(len(label1s)):
            if label1s[i] != label2s[i]:
                return False
        return True

    try:
        # Set disk label
        (img_label_type, img_label_range) = _resolve_label(img_label)
        for disk in list(disks.values()):
            disk_path = disk['source']
            dir_path = "%s(/.*)?" % os.path.dirname(disk_path)
            try:
                utils_selinux.del_defcon(img_label_type, pathregex=dir_path)
            except Exception as err:
                logging.debug("Delete label failed: %s", err)
            # Using semanage set context persistently
            utils_selinux.set_defcon(context_type=img_label_type,
                                     pathregex=dir_path,
                                     context_range=img_label_range)
            utils_selinux.verify_defcon(pathname=disk_path,
                                        readonly=False,
                                        forcedesc=True)
            if sec_relabel == "no" and sec_type == 'none':
                os.chown(disk_path, 107, 107)

        # Set selinux of host.
        utils_selinux.set_status(host_sestatus)

        # Set qemu conf
        if security_driver:
            qemu_conf.set_string('security_driver', security_driver)
        if security_default_confined:
            qemu_conf.security_default_confined = security_default_confined
        if security_require_confined:
            qemu_conf.security_require_confined = security_require_confined
        if (security_driver or security_default_confined
                or security_require_confined):
            logging.debug("the qemu.conf content is: %s" % qemu_conf)
            libvirtd.restart()

        # Set the context of the VM.
        vmxml.set_seclabel(sec_dict_list)
        vmxml.sync()
        logging.debug("the domain xml is: %s" % vmxml.xmltreefile)

        # restart libvirtd
        libvirtd.restart()

        # Start VM to check the VM is able to access the image or not.
        try:
            # Need another guest to test the xattr added by libvirt
            if xattr_check:
                blklist = virsh.domblklist(vm_name, debug=True)
                target_disk = re.findall(r"[v,s]d[a-z]",
                                         blklist.stdout.strip())[0]
                guest_name = "%s_%s" % (vm_name, '1')
                cmd = "virt-clone --original %s --name %s " % (vm_name,
                                                               guest_name)
                cmd += "--auto-clone --skip-copy=%s" % target_disk
                process.run(cmd, shell=True, verbose=True)
            vm.start()
            # Start VM successfully.
            # VM with seclabel can access the image with the context.
            if status_error:
                test.fail("Test succeeded in negative case.")
            # Start another vm with the same disk image.
            # The xattr will not be changed.
            if xattr_check:
                virsh.start(guest_name, ignore_status=True, debug=True)
            # Check the label of VM and image when VM is running.
            vm_context = utils_selinux.get_context_of_process(vm.get_pid())
            if (sec_type == "static") and (not vm_context == sec_label):
                test.fail("Label of VM is not expected after "
                          "starting.\n"
                          "Detail: vm_context=%s, sec_label=%s" %
                          (vm_context, sec_label))
            disk_context = utils_selinux.get_context_of_file(
                filename=list(disks.values())[0]['source'])
            if (sec_relabel == "no") and (not disk_context == img_label):
                test.fail("Label of disk is not expected after VM "
                          "starting.\n"
                          "Detail: disk_context=%s, img_label=%s." %
                          (disk_context, img_label))
            if sec_relabel == "yes" and not no_sec_model:
                vmxml = VMXML.new_from_dumpxml(vm_name)
                imagelabel = vmxml.get_seclabel()[0]['imagelabel']
                # the disk context is 'system_u:object_r:svirt_image_t:s0',
                # when VM started, the MLS/MCS Range will be added automatically.
                # imagelabel turns to be 'system_u:object_r:svirt_image_t:s0:cxx,cxxx'
                # but we shouldn't check the MCS range.
                if not _check_label_equal(disk_context, imagelabel):
                    test.fail("Label of disk is not relabeled by "
                              "VM\nDetal: disk_context="
                              "%s, imagelabel=%s" % (disk_context, imagelabel))
                expected_results = "trusted.libvirt.security.ref_dac=\"1\"\n"
                expected_results += "trusted.libvirt.security.ref_selinux=\"1\""
                cmd = "getfattr -m trusted.libvirt.security -d %s " % list(
                    disks.values())[0]['source']
                utils_test.libvirt.check_cmd_output(cmd,
                                                    content=expected_results)

            # Check the label of disk after VM being destroyed.
            if poweroff_with_destroy:
                vm.destroy(gracefully=False)
            else:
                vm.wait_for_login()
                vm.shutdown()
            filename = list(disks.values())[0]['source']
            img_label_after = utils_selinux.get_context_of_file(filename)
            stat_re = os.stat(filename)
            ownership_of_disk = "%s:%s" % (stat_re.st_uid, stat_re.st_gid)
            logging.debug("The ownership of disk after guest starting is:\n")
            logging.debug(ownership_of_disk)
            logging.debug("The ownership of disk before guest starting is:\n")
            logging.debug(backup_ownership_of_disks[filename])
            if not (sec_relabel == "no" and sec_type == 'none'):
                if not libvirt_version.version_compare(5, 6, 0):
                    if img_label_after != img_label:
                        # Bug 547546 - RFE: the security drivers must remember original
                        # permissions/labels and restore them after
                        # https://bugzilla.redhat.com/show_bug.cgi?id=547546
                        err_msg = "Label of disk is not restored in VM shutting down.\n"
                        err_msg += "Detail: img_label_after=%s, " % img_label_after
                        err_msg += "img_label_before=%s.\n" % img_label
                        err_msg += "More info in https://bugzilla.redhat.com/show_bug"
                        err_msg += ".cgi?id=547546"
                        test.fail(err_msg)
                elif (img_label_after != img_label or ownership_of_disk !=
                      backup_ownership_of_disks[filename]):
                    err_msg = "Label of disk is not restored in VM shutting down.\n"
                    err_msg += "Detail: img_label_after=%s, %s " % (
                        img_label_after, ownership_of_disk)
                    err_msg += "img_label_before=%s, %s\n" % (
                        img_label, backup_ownership_of_disks[filename])
                    test.fail(err_msg)
            # The xattr should be cleaned after guest shutoff.
            cmd = "getfattr -m trusted.libvirt.security -d %s " % list(
                disks.values())[0]['source']
            utils_test.libvirt.check_cmd_output(cmd, content="")
        except virt_vm.VMStartError as e:
            # Starting VM failed.
            # VM with seclabel can not access the image with the context.
            if not status_error:
                test.fail("Test failed in positive case." "error: %s" % e)
    finally:
        # clean up
        for path, label in list(backup_labels_of_disks.items()):
            # Using semanage set context persistently
            dir_path = "%s(/.*)?" % os.path.dirname(path)
            (img_label_type, img_label_range) = _resolve_label(label)
            try:
                utils_selinux.del_defcon(img_label_type, pathregex=dir_path)
            except Exception as err:
                logging.debug("Delete label failed: %s", err)
            utils_selinux.set_defcon(context_type=img_label_type,
                                     pathregex=dir_path,
                                     context_range=img_label_range)
            utils_selinux.verify_defcon(pathname=path,
                                        readonly=False,
                                        forcedesc=True)
        for path, label in list(backup_ownership_of_disks.items()):
            label_list = label.split(":")
            os.chown(path, int(label_list[0]), int(label_list[1]))
        backup_xml.sync()
        if xattr_check:
            virsh.undefine(guest_name, ignore_status=True)
        utils_selinux.set_status(backup_sestatus)
        if (security_driver or security_default_confined
                or security_require_confined):
            qemu_conf.restore()
            libvirtd.restart()
Пример #21
0
def run(test, params, env):
    """
    Test command: virsh blockpull <domain> <path>

    1) Prepare test environment.
    2) Populate a disk from its backing image.
    3) Recover test environment.
    4) Check result.
    """

    def make_disk_snapshot(snapshot_take):
        """
        Make external snapshots for disks only.

        :param snapshot_take: snapshots taken.
        """
        for count in range(1, snapshot_take + 1):
            snap_xml = snapshot_xml.SnapshotXML()
            snapshot_name = "snapshot_test%s" % count
            snap_xml.snap_name = snapshot_name
            snap_xml.description = "Snapshot Test %s" % count

            # Add all disks into xml file.
            vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
            disks = vmxml.devices.by_device_tag('disk')
            new_disks = []
            for src_disk_xml in disks:
                disk_xml = snap_xml.SnapDiskXML()
                disk_xml.xmltreefile = src_disk_xml.xmltreefile

                # Skip cdrom
                if disk_xml.device == "cdrom":
                    continue
                del disk_xml.device
                del disk_xml.address
                disk_xml.snapshot = "external"
                disk_xml.disk_name = disk_xml.target['dev']

                # Only qcow2 works as external snapshot file format, update it
                # here
                driver_attr = disk_xml.driver
                driver_attr.update({'type': 'qcow2'})
                disk_xml.driver = driver_attr

                new_attrs = disk_xml.source.attrs
                if 'file' in disk_xml.source.attrs:
                    file_name = disk_xml.source.attrs['file']
                    new_file = "%s.snap%s" % (file_name.split('.')[0],
                                              count)
                    snapshot_external_disks.append(new_file)
                    new_attrs.update({'file': new_file})
                    hosts = None
                elif ('name' in disk_xml.source.attrs and
                      disk_src_protocol == 'gluster'):
                    src_name = disk_xml.source.attrs['name']
                    new_name = "%s.snap%s" % (src_name.split('.')[0],
                                              count)
                    new_attrs.update({'name': new_name})
                    snapshot_external_disks.append(new_name)
                    hosts = disk_xml.source.hosts
                elif ('dev' in disk_xml.source.attrs or
                      'name' in disk_xml.source.attrs):
                    if (disk_xml.type_name == 'block' or
                            disk_src_protocol in ['iscsi', 'rbd']):
                        # Use local file as external snapshot target for block
                        # and iscsi network type.
                        # As block device will be treat as raw format by
                        # default, it's not fit for external disk snapshot
                        # target. A work around solution is use qemu-img again
                        # with the target.
                        # And external active snapshots are not supported on
                        # 'network' disks using 'iscsi' protocol
                        disk_xml.type_name = 'file'
                        if 'dev' in new_attrs:
                            del new_attrs['dev']
                        elif 'name' in new_attrs:
                            del new_attrs['name']
                            del new_attrs['protocol']
                        new_file = "%s/blk_src_file.snap%s" % (tmp_dir, count)
                        snapshot_external_disks.append(new_file)
                        new_attrs.update({'file': new_file})
                        hosts = None

                new_src_dict = {"attrs": new_attrs}
                if hosts:
                    new_src_dict.update({"hosts": hosts})
                disk_xml.source = disk_xml.new_disk_source(**new_src_dict)

                new_disks.append(disk_xml)

            snap_xml.set_disks(new_disks)
            snapshot_xml_path = snap_xml.xml
            logging.debug("The snapshot xml is: %s" % snap_xml.xmltreefile)

            options = "--disk-only --xmlfile %s " % snapshot_xml_path

            snapshot_result = virsh.snapshot_create(
                vm_name, options, debug=True)

            if snapshot_result.exit_status != 0:
                test.fail(snapshot_result.stderr)

            # Create a file flag in VM after each snapshot
            flag_file = tempfile.NamedTemporaryFile(prefix=("snapshot_test_"),
                                                    dir="/tmp")
            file_path = flag_file.name
            flag_file.close()

            status, output = session.cmd_status_output("touch %s" % file_path)
            if status:
                test.fail("Touch file in vm failed. %s" % output)
            snapshot_flag_files.append(file_path)

    def get_first_disk_source():
        """
        Get disk source of first device
        :return: first disk of first device.
        """
        first_device = vm.get_first_disk_devices()
        firt_disk_src = first_device['source']
        return firt_disk_src

    def make_relative_path_backing_files():
        """
        Create backing chain files of relative path.

        :return: absolute path of top active file
        """
        first_disk_source = get_first_disk_source()
        basename = os.path.basename(first_disk_source)
        root_dir = os.path.dirname(first_disk_source)
        cmd = "mkdir -p %s" % os.path.join(root_dir, '{b..d}')
        ret = process.run(cmd, shell=True)
        libvirt.check_exit_status(ret)

        # Make three external relative path backing files.
        backing_file_dict = collections.OrderedDict()
        backing_file_dict["b"] = "../%s" % basename
        backing_file_dict["c"] = "../b/b.img"
        backing_file_dict["d"] = "../c/c.img"
        for key, value in list(backing_file_dict.items()):
            backing_file_path = os.path.join(root_dir, key)
            cmd = ("cd %s && qemu-img create -f qcow2 -o backing_file=%s,backing_fmt=qcow2 %s.img"
                   % (backing_file_path, value, key))
            ret = process.run(cmd, shell=True)
            libvirt.check_exit_status(ret)
        return os.path.join(backing_file_path, "d.img")

    def check_chain_backing_files(disk_src_file, expect_backing_file=False):
        """
        Check backing chain files of relative path after blockcommit.

        :param disk_src_file: first disk src file.
        :param expect_backing_file: whether it expect to have backing files.
        """
        first_disk_source = get_first_disk_source()
        # Validate source image need refer to original one after active blockcommit
        if not expect_backing_file and disk_src_file not in first_disk_source:
            test.fail("The disk image path:%s doesn't include the origin image: %s" % (first_disk_source, disk_src_file))
        # Validate source image doesn't have backing files after active blockcommit
        cmd = "qemu-img info %s --backing-chain" % first_disk_source
        if qemu_img_locking_feature_support:
            cmd = "qemu-img info -U %s --backing-chain" % first_disk_source
        ret = process.run(cmd, shell=True).stdout_text.strip()
        if expect_backing_file:
            if 'backing file' not in ret:
                test.fail("The disk image doesn't have backing files")
            else:
                logging.debug("The actual qemu-img output:%s\n", ret)
        else:
            if 'backing file' in ret:
                test.fail("The disk image still have backing files")
            else:
                logging.debug("The actual qemu-img output:%s\n", ret)

    # MAIN TEST CODE ###
    # Process cartesian parameters
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    snapshot_take = int(params.get("snapshot_take", '0'))
    needs_agent = "yes" == params.get("needs_agent", "yes")
    replace_vm_disk = "yes" == params.get("replace_vm_disk", "no")
    snap_in_mirror = "yes" == params.get("snap_in_mirror", 'no')
    snap_in_mirror_err = "yes" == params.get("snap_in_mirror_err", 'no')
    bandwidth = params.get("bandwidth", None)
    with_timeout = ("yes" == params.get("with_timeout_option", "no"))
    status_error = ("yes" == params.get("status_error", "no"))
    base_option = params.get("base_option", None)
    keep_relative = "yes" == params.get("keep_relative", 'no')
    virsh_dargs = {'debug': True}

    # Check whether qemu-img need add -U suboption since locking feature was added afterwards qemu-2.10
    qemu_img_locking_feature_support = libvirt_storage.check_qemu_image_lock_support()
    backing_file_relative_path = "yes" == params.get("backing_file_relative_path", "no")

    # Process domain disk device parameters
    disk_type = params.get("disk_type")
    disk_target = params.get("disk_target", 'vda')
    disk_src_protocol = params.get("disk_source_protocol")
    restart_tgtd = params.get("restart_tgtd", "no")
    vol_name = params.get("vol_name")
    tmp_dir = data_dir.get_tmp_dir()
    pool_name = params.get("pool_name", "gluster-pool")
    brick_path = os.path.join(tmp_dir, pool_name)

    # This is brought by new feature:block-dev
    if (libvirt_version.version_compare(6, 0, 0) and
       params.get("transport", "") == "rdma"):
        test.cancel("If blockdev is enabled, the transport protocol 'rdma' is "
                    "not yet supported.")

    # A backup of original vm
    vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    logging.debug("original xml is %s", vmxml_backup)

    # Abort the test if there are snapshots already
    exsiting_snaps = virsh.snapshot_list(vm_name)
    if len(exsiting_snaps) != 0:
        test.fail("There are snapshots created for %s already" % vm_name)

    snapshot_external_disks = []
    # Prepare a blank params to confirm if delete the configure at the end of the test
    ceph_cfg = ""
    try:
        if disk_src_protocol == 'iscsi' and disk_type == 'network':
            if not libvirt_version.version_compare(1, 0, 4):
                test.cancel("'iscsi' disk doesn't support in"
                            " current libvirt version.")
        if disk_src_protocol == 'gluster':
            if not libvirt_version.version_compare(1, 2, 7):
                test.cancel("Snapshot on glusterfs not"
                            " support in current "
                            "version. Check more info "
                            " with https://bugzilla.re"
                            "dhat.com/show_bug.cgi?id="
                            "1017289")

        # Set vm xml and guest agent
        if replace_vm_disk:
            if disk_src_protocol == "rbd" and disk_type == "network":
                src_host = params.get("disk_source_host", "EXAMPLE_HOSTS")
                mon_host = params.get("mon_host", "EXAMPLE_MON_HOST")
                # Create config file if it doesn't exist
                ceph_cfg = ceph.create_config_file(mon_host)
                if src_host.count("EXAMPLE") or mon_host.count("EXAMPLE"):
                    test.cancel("Please provide ceph host first.")

                params.update(
                   {"disk_source_name": os.path.join(
                      pool_name,
                      'rbd_blockpull_' + utils_misc.generate_random_string(4) +
                      '.img')})
                if utils_package.package_install(["ceph-common"]):
                    ceph.rbd_image_rm(
                        mon_host, *params.get("disk_source_name").split('/'))
                else:
                    test.error('Failed to install ceph-common package.')
            if backing_file_relative_path:
                if vm.is_alive():
                    vm.destroy(gracefully=False)
                first_src_file = get_first_disk_source()
                blk_source_image = os.path.basename(first_src_file)
                blk_source_folder = os.path.dirname(first_src_file)
                replace_disk_image = make_relative_path_backing_files()
                params.update({'disk_source_name': replace_disk_image,
                               'disk_type': 'file',
                               'disk_src_protocol': 'file'})
                vm.start()
            libvirt.set_vm_disk(vm, params, tmp_dir)

        if needs_agent:
            vm.prepare_guest_agent()

        # The first disk is supposed to include OS
        # We will perform blockpull operation for it.
        first_disk = vm.get_first_disk_devices()
        blk_source = first_disk['source']
        blk_target = first_disk['target']
        snapshot_flag_files = []

        # get a vm session before snapshot
        session = vm.wait_for_login()
        # do snapshot
        make_disk_snapshot(snapshot_take)

        vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
        logging.debug("The domain xml after snapshot is %s" % vmxml)

        # snapshot src file list
        snap_src_lst = [blk_source]
        snap_src_lst += snapshot_external_disks

        if snap_in_mirror:
            blockpull_options = "--bandwidth 1"
        else:
            blockpull_options = "--wait --verbose"

        if with_timeout:
            blockpull_options += " --timeout 1"

        if bandwidth:
            blockpull_options += " --bandwidth %s" % bandwidth

        if base_option == "async":
            blockpull_options += " --async"

        base_image = None
        base_index = None
        if (libvirt_version.version_compare(1, 2, 4) or
                disk_src_protocol == 'gluster'):
            # For libvirt is older version than 1.2.4 or source protocol is gluster
            # there are various base image,which depends on base option:shallow,base,top respectively
            if base_option == "shallow":
                base_index = 1
                base_image = "%s[%s]" % (disk_target, base_index)
            elif base_option == "base":
                base_index = 2
                base_image = "%s[%s]" % (disk_target, base_index)
            elif base_option == "top":
                base_index = 0
                base_image = "%s[%s]" % (disk_target, base_index)
        else:
            if base_option == "shallow":
                base_image = snap_src_lst[3]
            elif base_option == "base":
                base_image = snap_src_lst[2]
            elif base_option == "top":
                base_image = snap_src_lst[4]

        if base_option and base_image:
            blockpull_options += " --base %s" % base_image

        if keep_relative:
            blockpull_options += " --keep-relative"

        if backing_file_relative_path:
            # Use block commit to shorten previous snapshots.
            blockcommit_options = "  --active --verbose --shallow --pivot --keep-relative"
            for count in range(1, snapshot_take + 1):
                res = virsh.blockcommit(vm_name, blk_target,
                                        blockcommit_options, **virsh_dargs)
                libvirt.check_exit_status(res, status_error)

            #Use block pull with --keep-relative flag,and reset base_index to 2.
            base_index = 2
            for count in range(1, snapshot_take):
                if count >= 3:
                    if libvirt_version.version_compare(6, 0, 0):
                        break
                    # If block pull operations are more than or equal to 3,
                    # it need reset base_index to 1. It only affects the test
                    # of libvirt < 6.0.0
                    base_index = 1
                base_image = "%s[%s]" % (disk_target, base_index)
                blockpull_options = "  --wait --verbose --base %s --keep-relative" % base_image
                res = virsh.blockpull(vm_name, blk_target,
                                      blockpull_options, **virsh_dargs)
                libvirt.check_exit_status(res, status_error)

                if libvirt_version.version_compare(6, 0, 0):
                    base_index += 1
            # Check final backing chain files.
            check_chain_backing_files(blk_source_image, True)
            return
        # Run test case
        result = virsh.blockpull(vm_name, blk_target,
                                 blockpull_options, **virsh_dargs)
        status = result.exit_status

        # If pull job aborted as timeout, the exit status is different
        # on RHEL6(0) and RHEL7(1)
        if with_timeout and 'Pull aborted' in result.stdout.strip():
            if libvirt_version.version_compare(1, 1, 1):
                status_error = True
            else:
                status_error = False

        # Check status_error
        libvirt.check_exit_status(result, status_error)

        if not status and not with_timeout:
            if snap_in_mirror:
                snap_mirror_path = "%s/snap_mirror" % tmp_dir
                snap_options = "--diskspec vda,snapshot=external,"
                snap_options += "file=%s --disk-only" % snap_mirror_path
                snapshot_external_disks.append(snap_mirror_path)
                ret = virsh.snapshot_create_as(vm_name, snap_options,
                                               ignore_status=True,
                                               debug=True)
                libvirt.check_exit_status(ret, snap_in_mirror_err)
                return

            vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
            disks = vmxml.devices.by_device_tag('disk')
            for disk in disks:
                if disk.target['dev'] != blk_target:
                    continue
                else:
                    disk_xml = disk.xmltreefile
                    break

            logging.debug("after pull the disk xml is: %s"
                          % disk_xml)
            if libvirt_version.version_compare(1, 2, 4):
                err_msg = "Domain image backing chain check failed"
                if not base_option or "async" in base_option:
                    chain_lst = snap_src_lst[-1:]
                    ret = check_chain_xml(disk_xml, chain_lst)
                    if not ret:
                        test.fail(err_msg)
                elif "base" or "shallow" in base_option:
                    if not base_index and base_image:
                        base_index = chain_lst.index(base_image)

                    chain_lst = snap_src_lst[:base_index][::-1]
                    if not libvirt_version.version_compare(6, 0, 0):
                        chain_lst = snap_src_lst[::-1][base_index:]
                    chain_lst.insert(0, snap_src_lst[-1])

                    ret = check_chain_xml(disk_xml, chain_lst)
                    if not ret:
                        test.fail(err_msg)

        # If base image is the top layer of snapshot chain,
        # virsh blockpull should fail, return directly
        if base_option == "top":
            return

        # Check flag files
        for flag in snapshot_flag_files:
            status, output = session.cmd_status_output("cat %s" % flag)
            if status:
                test.fail("blockpull failed: %s" % output)

    finally:
        # Remove ceph configure file if created
        if ceph_cfg:
            os.remove(ceph_cfg)
        if vm.is_alive():
            vm.destroy(gracefully=False)
        # Recover xml of vm.
        vmxml_backup.sync("--snapshots-metadata")
        # Clean ceph image if used in test
        if 'mon_host' in locals():
            if utils_package.package_install(["ceph-common"]):
                disk_source_name = params.get("disk_source_name")
                cmd = ("rbd -m {0} info {1} && rbd -m {0} rm "
                       "{1}".format(mon_host, disk_source_name))
                cmd_result = process.run(cmd, ignore_status=True, shell=True)
                logging.debug("result of rbd removal: %s", cmd_result)
            else:
                logging.debug('Failed to install ceph-common to clean ceph.')

        if not disk_src_protocol or disk_src_protocol != 'gluster':
            for disk in snapshot_external_disks:
                if os.path.exists(disk):
                    os.remove(disk)

        if backing_file_relative_path:
            libvirt.clean_up_snapshots(vm_name, domxml=vmxml_backup)
            process.run("cd %s && rm -rf b c d" % blk_source_folder, shell=True)

        libvirtd = utils_libvirtd.Libvirtd()

        if disk_src_protocol == 'iscsi':
            libvirt.setup_or_cleanup_iscsi(is_setup=False,
                                           restart_tgtd=restart_tgtd)
        elif disk_src_protocol == 'gluster':
            logging.info("clean gluster env")
            gluster.setup_or_cleanup_gluster(False, brick_path=brick_path, **params)
            libvirtd.restart()
        elif disk_src_protocol == 'netfs':
            restore_selinux = params.get('selinux_status_bak')
            libvirt.setup_or_cleanup_nfs(is_setup=False,
                                         restore_selinux=restore_selinux)
Пример #22
0
def set_domiftune_parameter(params, test, libvirtd):
    """
    Set the domiftune parameters
    :params: the parameter dictionary
    :param test: test instance
    :param libvirtd: libvirt daemon instance
    """
    vm_name = params.get("main_vm")
    inbound = params.get("inbound", "")
    outbound = params.get("outbound", "")
    outbound_new = params.get("outbound_new")
    options = params.get("options", None)
    interface = params.get("iface_dev")
    check_clear = params.get("check_clear", "no")
    status_error = params.get("status_error", "no")

    test_clear = False
    if check_clear == "yes":
        # In libvirt 1.2.3, commit id '14973382' it will now be possible
        # to pass a zero (0) as an inbound or outbound parameter to virsh
        # domiftune in order to clear all the settings found. So we'll
        # handle that difference here
        if libvirt_version.version_compare(1, 2, 3):
            test_clear = True
            # Although the .cfg file has "0" for these that will
            # not test whether we can clear the value. So let's
            # set it to "1", then after we are sure we can set it
            # we will clear it and check that it's clear
            if inbound:
                save_inbound = inbound
                # average,peak,burst
                inbound = "2,4,7"
                params['inbound'] = "2,4,7"
            if outbound:
                save_outbound = outbound
                # average,peak,burst
                outbound = "2,4,7"
                params['outbound'] = "2,4,7"
        else:
            # Prior to libvirt 1.2.3 this would be an error
            # So let's just treat it as such. Leaving the
            # inbound/outbound as zero should result in an
            # error on the following set, but a pass for
            # the test since the error is expected.
            status_error = "yes"
    if libvirt_version.version_compare(7, 3, 0) and outbound_new:
        outbound = outbound_new
    result = virsh.domiftune(vm_name,
                             interface,
                             options,
                             inbound,
                             outbound,
                             debug=True)
    status = result.exit_status

    if status_error == "yes":
        if status:
            check_libvirtd(test, libvirtd)
            logging.info("It's an expected error: %s", result.stderr)
        else:
            test.fail("%d not a expected command return value" % status)
    elif status_error == "no":
        if status:
            test.fail("Unexpected set domiftune error: %s" % result.stderr)
        else:
            logging.debug("set domiftune successfully!!!")
            if not check_domiftune(params, False):
                test.fail("The 'inbound' or/and 'outbound' are"
                          " inconsistent with domiftune XML"
                          " and/or virsh command output")

    # If supported, then here's where we reset the inbound/outbound
    # back to what they were input as and then run the same domiftune
    # command.  That should result in a successful return and should
    # clear the parameter.
    if test_clear:
        params['set_clear'] = 'yes'
        if inbound:
            inbound = save_inbound
            params['inbound'] = save_inbound
        if outbound:
            outbound = save_outbound
            params['outbound'] = save_outbound
        result = virsh.domiftune(vm_name, interface, options, inbound,
                                 outbound)
        status = result.exit_status
        if status:
            test.fail("Unexpected failure when clearing: %s" % result.stderr)
        else:
            logging.debug("clear the inbound/outbound successfully!!!")
            params['set_clear'] = "yes"
            if not check_domiftune(params, True):
                test.fail("The 'inbound' or/and 'outbound' were "
                          "not cleared.")
Пример #23
0
def run(test, params, env):
    """
    Test rbd disk device.

    1.Prepare test environment,destroy or suspend a VM.
    2.Prepare disk image.
    3.Edit disks xml and start the domain.
    4.Perform test operation.
    5.Recover test environment.
    """
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    virsh_dargs = {'debug': True, 'ignore_status': True}
    # Global variable to store max/current memory,
    # it may change after attach/detach
    new_max_mem = None
    new_cur_mem = None

    def consume_vm_mem(size=1000, timeout=360):
        """
        To consume guest memory, default size is 1000M
        """
        session = vm.wait_for_login()
        # Mount tmpfs on /mnt and write to a file on it,
        # it is the memory operation
        sh_cmd = ("swapoff -a; mount -t tmpfs -o size={0}M tmpfs "
                  "/mnt; dd if=/dev/urandom of=/mnt/test bs=1M"
                  " count={0}".format(size))
        session.cmd(sh_cmd, timeout=timeout)
        session.close()

    def mount_hugepages(page_size):
        """
        To mount hugepages

        :param page_size: unit is kB, it can be 4,2048,1048576,etc
        """
        if page_size == 4:
            perm = ""
        else:
            perm = "pagesize=%dK" % page_size

        tlbfs_status = utils_misc.is_mounted("hugetlbfs", "/dev/hugepages",
                                             "hugetlbfs")
        if tlbfs_status:
            utils_misc.umount("hugetlbfs", "/dev/hugepages", "hugetlbfs")
        utils_misc.mount("hugetlbfs", "/dev/hugepages", "hugetlbfs", perm)

    def setup_hugepages(page_size=2048, shp_num=2000):
        """
        To setup hugepages

        :param page_size: unit is kB, it can be 4,2048,1048576,etc
        :param shp_num: number of hugepage, string type
        """
        mount_hugepages(page_size)
        utils_memory.set_num_huge_pages(shp_num)
        config.hugetlbfs_mount = ["/dev/hugepages"]
        utils_libvirtd.libvirtd_restart()

    def restore_hugepages(page_size=4):
        """
        To recover hugepages
        :param page_size: unit is kB, it can be 4,2048,1048576,etc
        """
        mount_hugepages(page_size)
        config.restore()
        utils_libvirtd.libvirtd_restart()

    def check_qemu_cmd(max_mem_rt, tg_size):
        """
        Check qemu command line options.
        :param max_mem_rt: size of max memory
        :param tg_size: Target hotplug memory size
        :return: None
        """
        cmd = ("ps -ef | grep %s | grep -v grep " % vm_name)
        if discard:
            cmd += " | grep 'discard-data=yes'"
        elif max_mem_rt:
            cmd += (" | grep 'slots=%s,maxmem=%sk'" %
                    (max_mem_slots, max_mem_rt))
            if tg_size:
                size = int(tg_size) * 1024
                if huge_pages or discard or cold_plug_discard:
                    cmd_str = 'memdimm.\|memory-backend-file,id=ram-node.'
                    cmd += (
                        " | grep 'memory-backend-file,id=%s' | grep 'size=%s" %
                        (cmd_str, size))
                else:
                    cmd_str = 'mem.\|memory-backend-ram,id=ram-node.'
                    cmd += (
                        " | grep 'memory-backend-ram,id=%s' | grep 'size=%s" %
                        (cmd_str, size))

                if pg_size:
                    cmd += ",host-nodes=%s" % node_mask
                    if numa_memnode:
                        for node in numa_memnode:
                            if ('nodeset' in node
                                    and node['nodeset'] in node_mask):
                                cmd += ",policy=%s" % node['mode']
                    cmd += ".*pc-dimm,node=%s" % tg_node
                if mem_addr:
                    cmd += (".*slot=%s" % (mem_addr['slot']))
                cmd += "'"
            if cold_plug_discard:
                cmd += " | grep 'discard-data=yes'"

        # Run the command
        result = process.run(cmd, shell=True, verbose=True, ignore_status=True)
        if result.exit_status:
            test.fail('Qemu command check fail.')

    def check_guest_meminfo(old_mem, check_option):
        """
        Check meminfo on guest.
        """
        assert old_mem is not None
        session = vm.wait_for_login()
        # Hot-plugged memory should be online by udev rules
        udev_file = "/lib/udev/rules.d/80-hotplug-cpu-mem.rules"
        udev_rules = ('SUBSYSTEM=="memory", ACTION=="add", TEST=="state",'
                      ' ATTR{state}=="offline", ATTR{state}="online"')
        cmd = ("grep memory %s || echo '%s' >> %s" %
               (udev_file, udev_rules, udev_file))
        session.cmd(cmd)
        # Wait a while for new memory to be detected.
        utils_misc.wait_for(
            lambda: vm.get_totalmem_sys(online) != int(old_mem),
            30,
            first=20.0)
        new_mem = vm.get_totalmem_sys(online)
        session.close()
        logging.debug("Memtotal on guest: %s", new_mem)
        no_of_times = 1
        if at_times:
            no_of_times = at_times
        if check_option == "attach":
            if new_mem != int(old_mem) + (int(tg_size) * no_of_times):
                test.fail("Total memory on guest couldn't changed after "
                          "attach memory device")

        if check_option == "detach":
            if new_mem != int(old_mem) - (int(tg_size) * no_of_times):
                test.fail("Total memory on guest couldn't changed after "
                          "detach memory device")

    def check_dom_xml(at_mem=False, dt_mem=False):
        """
        Check domain xml options.
        """
        # Global variable to store max/current memory
        global new_max_mem
        global new_cur_mem
        if attach_option.count("config"):
            dom_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
        else:
            dom_xml = vm_xml.VMXML.new_from_dumpxml(vm_name)
        try:
            xml_max_mem_rt = int(dom_xml.max_mem_rt)
            xml_max_mem = int(dom_xml.max_mem)
            xml_cur_mem = int(dom_xml.current_mem)
            assert int(max_mem_rt) == xml_max_mem_rt

            # Check attached/detached memory
            logging.info("at_mem=%s,dt_mem=%s", at_mem, dt_mem)
            logging.info("detach_device is %s", detach_device)
            if at_mem:
                if at_times:
                    assert int(max_mem) + (int(tg_size) *
                                           at_times) == xml_max_mem
                else:
                    assert int(max_mem) + int(tg_size) == xml_max_mem
                # Bug 1220702, skip the check for current memory
                if at_times:
                    assert int(cur_mem) + (int(tg_size) *
                                           at_times) == xml_cur_mem
                else:
                    assert int(cur_mem) + int(tg_size) == xml_cur_mem
                new_max_mem = xml_max_mem
                new_cur_mem = xml_cur_mem
                mem_dev = dom_xml.get_devices("memory")
                memory_devices = 1
                if at_times:
                    memory_devices = at_times
                if len(mem_dev) != memory_devices:
                    test.fail("Found wrong number of memory device")
                assert int(tg_size) == int(mem_dev[0].target.size)
                assert int(tg_node) == int(mem_dev[0].target.node)
            elif dt_mem:
                if at_times:
                    assert int(new_max_mem) - (int(tg_size) *
                                               at_times) == xml_max_mem
                    assert int(new_cur_mem) - (int(tg_size) *
                                               at_times) == xml_cur_mem
                else:
                    assert int(new_max_mem) - int(tg_size) == xml_max_mem
                    # Bug 1220702, skip the check for current memory
                    assert int(new_cur_mem) - int(tg_size) == xml_cur_mem
        except AssertionError:
            utils_misc.log_last_traceback()
            test.fail("Found unmatched memory setting from domain xml")

    def check_mem_align():
        """
        Check if set memory align to 256
        """
        dom_xml = vm_xml.VMXML.new_from_dumpxml(vm_name)
        dom_mem = {}
        dom_mem['maxMemory'] = int(dom_xml.max_mem_rt)
        dom_mem['memory'] = int(dom_xml.memory)
        dom_mem['currentMemory'] = int(dom_xml.current_mem)

        cpuxml = dom_xml.cpu
        numa_cell = cpuxml.numa_cell
        dom_mem['numacellMemory'] = int(numa_cell[0]['memory'])
        sum_numa_mem = sum([int(cell['memory']) for cell in numa_cell])

        attached_mem = dom_xml.get_devices(device_type='memory')[0]
        dom_mem['attached_mem'] = attached_mem.target.size

        all_align = True
        for key in dom_mem:
            logging.info('%-20s:%15d', key, dom_mem[key])
            if dom_mem[key] % 262144:
                logging.error('%s not align to 256', key)
                if key == 'currentMemory':
                    continue
                all_align = False

        if not all_align:
            test.fail('Memory not align to 256')

        if dom_mem['memory'] == sum_numa_mem + dom_mem['attached_mem']:
            logging.info(
                'Check Pass: Memory is equal to (all numa memory + memory device)'
            )
        else:
            test.fail(
                'Memory is not equal to (all numa memory + memory device)')

        return dom_mem

    def check_save_restore():
        """
        Test save and restore operation
        """
        save_file = os.path.join(data_dir.get_tmp_dir(), "%s.save" % vm_name)
        ret = virsh.save(vm_name, save_file, **virsh_dargs)
        libvirt.check_exit_status(ret)

        def _wait_for_restore():
            try:
                virsh.restore(save_file, debug=True, ignore_status=False)
                return True
            except Exception as e:
                logging.error(e)

        utils_misc.wait_for(_wait_for_restore, 30, step=5)
        if os.path.exists(save_file):
            os.remove(save_file)
        # Login to check vm status
        vm.wait_for_login().close()

    def add_device(dev_xml, attach, at_error=False):
        """
        Add memory device by attachment or modify domain xml.
        """
        if attach:
            ret = virsh.attach_device(vm_name,
                                      dev_xml.xml,
                                      flagstr=attach_option,
                                      debug=True)
            libvirt.check_exit_status(ret, at_error)
        else:
            vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm.name)
            if numa_cells:
                del vmxml.max_mem
                del vmxml.current_mem
            vmxml.add_device(dev_xml)
            vmxml.sync()

    def modify_domain_xml():
        """
        Modify domain xml and define it.
        """
        vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm.name)
        mem_unit = params.get("mem_unit", "KiB")
        vcpu = params.get("vcpu", "4")
        if max_mem_rt:
            vmxml.max_mem_rt = int(max_mem_rt)
            vmxml.max_mem_rt_slots = max_mem_slots
            vmxml.max_mem_rt_unit = mem_unit
        if memory_val:
            vmxml.memory = int(memory_val)
        if vcpu:
            vmxml.vcpu = int(vcpu)
            vcpu_placement = params.get("vcpu_placement", "static")
            vmxml.placement = vcpu_placement
        if numa_memnode:
            vmxml.numa_memory = {}
            vmxml.numa_memnode = numa_memnode
        else:
            try:
                del vmxml.numa_memory
                del vmxml.numa_memnode
            except Exception:
                # Not exists
                pass

        if numa_cells:
            cells = [ast.literal_eval(x) for x in numa_cells]
            # Rounding the numa memory values
            if align_mem_values:
                for cell in range(cells.__len__()):
                    memory_value = str(
                        utils_numeric.align_value(cells[cell]["memory"],
                                                  align_to_value))
                    cells[cell]["memory"] = memory_value
            cpu_xml = vm_xml.VMCPUXML()
            cpu_xml.xml = "<cpu><numa/></cpu>"
            cpu_mode = params.get("cpu_mode")
            model_fallback = params.get("model_fallback")
            if cpu_mode:
                cpu_xml.mode = cpu_mode
            if model_fallback:
                cpu_xml.fallback = model_fallback
            cpu_xml.numa_cell = cpu_xml.dicts_to_cells(cells)
            vmxml.cpu = cpu_xml
            # Delete memory and currentMemory tag,
            # libvirt will fill it automatically
            del vmxml.max_mem
            del vmxml.current_mem

        # hugepages setting
        if huge_pages or discard or cold_plug_discard:
            membacking = vm_xml.VMMemBackingXML()
            membacking.discard = True
            membacking.source = ''
            membacking.source_type = 'file'
            if huge_pages:
                hugepages = vm_xml.VMHugepagesXML()
                pagexml_list = []
                for i in range(len(huge_pages)):
                    pagexml = hugepages.PageXML()
                    pagexml.update(huge_pages[i])
                    pagexml_list.append(pagexml)
                hugepages.pages = pagexml_list
                membacking.hugepages = hugepages
            vmxml.mb = membacking

        logging.debug("vm xml: %s", vmxml)
        vmxml.sync()

    pre_vm_state = params.get("pre_vm_state", "running")
    attach_device = "yes" == params.get("attach_device", "no")
    detach_device = "yes" == params.get("detach_device", "no")
    detach_alias = "yes" == params.get("detach_alias", "no")
    detach_alias_options = params.get("detach_alias_options")
    attach_error = "yes" == params.get("attach_error", "no")
    start_error = "yes" == params.get("start_error", "no")
    detach_error = "yes" == params.get("detach_error", "no")
    maxmem_error = "yes" == params.get("maxmem_error", "no")
    attach_option = params.get("attach_option", "")
    test_qemu_cmd = "yes" == params.get("test_qemu_cmd", "no")
    wait_before_save_secs = int(params.get("wait_before_save_secs", 0))
    test_managedsave = "yes" == params.get("test_managedsave", "no")
    test_save_restore = "yes" == params.get("test_save_restore", "no")
    test_mem_binding = "yes" == params.get("test_mem_binding", "no")
    restart_libvirtd = "yes" == params.get("restart_libvirtd", "no")
    add_mem_device = "yes" == params.get("add_mem_device", "no")
    test_dom_xml = "yes" == params.get("test_dom_xml", "no")
    max_mem = params.get("max_mem")
    max_mem_rt = params.get("max_mem_rt")
    max_mem_slots = params.get("max_mem_slots", "16")
    memory_val = params.get('memory_val', '')
    mem_align = 'yes' == params.get('mem_align', 'no')
    hot_plug = 'yes' == params.get('hot_plug', 'no')
    cur_mem = params.get("current_mem")
    numa_cells = params.get("numa_cells", "").split()
    set_max_mem = params.get("set_max_mem")
    align_mem_values = "yes" == params.get("align_mem_values", "no")
    align_to_value = int(params.get("align_to_value", "65536"))
    hot_reboot = "yes" == params.get("hot_reboot", "no")
    rand_reboot = "yes" == params.get("rand_reboot", "no")
    guest_known_unplug_errors = []
    guest_known_unplug_errors.append(params.get("guest_known_unplug_errors"))
    host_known_unplug_errors = []
    host_known_unplug_errors.append(params.get("host_known_unplug_errors"))
    discard = "yes" == params.get("discard", "no")
    cold_plug_discard = "yes" == params.get("cold_plug_discard", "no")
    if cold_plug_discard or discard:
        mem_discard = 'yes'
    else:
        mem_discard = 'no'

    # params for attached device
    mem_model = params.get("mem_model", "dimm")
    tg_size = params.get("tg_size")
    tg_sizeunit = params.get("tg_sizeunit", 'KiB')
    tg_node = params.get("tg_node", 0)
    pg_size = params.get("page_size")
    pg_unit = params.get("page_unit", "KiB")
    huge_page_num = int(params.get('huge_page_num', 2000))
    node_mask = params.get("node_mask", "0")
    mem_addr = ast.literal_eval(params.get("memory_addr", "{}"))
    huge_pages = [
        ast.literal_eval(x) for x in params.get("huge_pages", "").split()
    ]
    numa_memnode = [
        ast.literal_eval(x) for x in params.get("numa_memnode", "").split()
    ]
    at_times = int(params.get("attach_times", 1))
    online = params.get("mem_online", "no")

    config = utils_config.LibvirtQemuConfig()
    setup_hugepages_flag = params.get("setup_hugepages")
    if (setup_hugepages_flag == "yes"):
        cpu_arch = cpu_util.get_cpu_arch()
        if cpu_arch == 'power8':
            pg_size = '16384'
            huge_page_num = 200
        elif cpu_arch == 'power9':
            pg_size = '2048'
            huge_page_num = 2000
        [x.update({'size': pg_size}) for x in huge_pages]
        setup_hugepages(int(pg_size), shp_num=huge_page_num)

    # Back up xml file.
    vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)

    if not libvirt_version.version_compare(1, 2, 14):
        test.cancel("Memory hotplug not supported in current libvirt version.")

    if 'align_256m' in params.get('name', ''):
        arch = platform.machine()
        if arch.lower() != 'ppc64le':
            test.cancel('This case is for ppc64le only.')

    if align_mem_values:
        # Rounding the following values to 'align'
        max_mem = utils_numeric.align_value(max_mem, align_to_value)
        max_mem_rt = utils_numeric.align_value(max_mem_rt, align_to_value)
        cur_mem = utils_numeric.align_value(cur_mem, align_to_value)
        tg_size = utils_numeric.align_value(tg_size, align_to_value)

    try:
        # Drop caches first for host has enough memory
        drop_caches()
        # Destroy domain first
        if vm.is_alive():
            vm.destroy(gracefully=False)
        modify_domain_xml()
        numa_info = utils_misc.NumaInfo()
        logging.debug(numa_info.get_all_node_meminfo())

        # Start the domain any way if attach memory device
        old_mem_total = None
        if attach_device:
            vm.start()
            session = vm.wait_for_login()
            old_mem_total = vm.get_totalmem_sys(online)
            logging.debug("Memtotal on guest: %s", old_mem_total)
            session.close()
        elif discard:
            vm.start()
            session = vm.wait_for_login()
            check_qemu_cmd(max_mem_rt, tg_size)
        dev_xml = None

        # To attach the memory device.
        if (add_mem_device and not hot_plug) or cold_plug_discard:
            at_times = int(params.get("attach_times", 1))
            randvar = 0
            if rand_reboot:
                rand_value = random.randint(15, 25)
                logging.debug("reboots at %s", rand_value)
            for x in xrange(at_times):
                # If any error excepted, command error status should be
                # checked in the last time
                device_alias = "ua-" + str(uuid.uuid4())
                dev_xml = utils_hotplug.create_mem_xml(
                    tg_size, pg_size, mem_addr, tg_sizeunit, pg_unit, tg_node,
                    node_mask, mem_model, mem_discard, device_alias)
                randvar = randvar + 1
                logging.debug("attaching device count = %s", x)
                if x == at_times - 1:
                    add_device(dev_xml, attach_device, attach_error)
                else:
                    add_device(dev_xml, attach_device)
                if hot_reboot:
                    vm.reboot()
                    vm.wait_for_login()
                if rand_reboot and randvar == rand_value:
                    vm.reboot()
                    vm.wait_for_login()
                    randvar = 0
                    rand_value = random.randint(15, 25)
                    logging.debug("reboots at %s", rand_value)

        # Check domain xml after attach device.
        if test_dom_xml:
            check_dom_xml(at_mem=attach_device)

        # Set domain state
        if pre_vm_state == "transient":
            logging.info("Creating %s...", vm_name)
            vmxml_for_test = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
            if vm.is_alive():
                vm.destroy(gracefully=False)
            vm.undefine()
            if virsh.create(vmxml_for_test.xml, **virsh_dargs).exit_status:
                vmxml_backup.define()
                test.fail("Cann't create the domain")
        elif vm.is_dead():
            try:
                vm.start()
                vm.wait_for_login().close()
            except virt_vm.VMStartError as detail:
                if start_error:
                    pass
                else:
                    except_msg = "memory hotplug isn't supported by this QEMU binary"
                    if except_msg in detail.reason:
                        test.cancel(detail)
                    test.fail(detail)

        # Set memory operation
        if set_max_mem:
            max_mem_option = params.get("max_mem_option", "")
            ret = virsh.setmaxmem(vm_name, set_max_mem, flagstr=max_mem_option)
            libvirt.check_exit_status(ret, maxmem_error)

        # Hotplug memory device
        if add_mem_device and hot_plug:
            process.run('ps -ef|grep qemu', shell=True, verbose=True)
            session = vm.wait_for_login()
            original_mem = vm.get_totalmem_sys()
            dev_xml = utils_hotplug.create_mem_xml(tg_size, pg_size, mem_addr,
                                                   tg_sizeunit, pg_unit,
                                                   tg_node, node_mask,
                                                   mem_model)
            add_device(dev_xml, True)
            mem_after = vm.get_totalmem_sys()
            params['delta'] = mem_after - original_mem

        # Check domain xml after start the domain.
        if test_dom_xml:
            check_dom_xml(at_mem=attach_device)

        if mem_align:
            dom_mem = check_mem_align()
            check_qemu_cmd(dom_mem['maxMemory'], dom_mem['attached_mem'])
            if hot_plug and params['delta'] != dom_mem['attached_mem']:
                test.fail(
                    'Memory after attach not equal to original mem + attached mem'
                )

        # Check qemu command line
        if test_qemu_cmd:
            check_qemu_cmd(max_mem_rt, tg_size)

        # Check guest meminfo after attachment
        if (attach_device and not attach_option.count("config")
                and not any([attach_error, start_error])):
            check_guest_meminfo(old_mem_total, check_option="attach")

        # Consuming memory on guest,
        # to verify memory changes by numastat
        if test_mem_binding:
            pid = vm.get_pid()
            old_numastat = read_from_numastat(pid, "Total")
            logging.debug("Numastat: %s", old_numastat)
            # Increase the memory consumed to  1500
            consume_vm_mem(1500)
            new_numastat = read_from_numastat(pid, "Total")
            logging.debug("Numastat: %s", new_numastat)
            # Only check total memory which is the last element
            if float(new_numastat[-1]) - float(old_numastat[-1]) < 0:
                test.fail("Numa memory can't be consumed on guest")

        # Run managedsave command to check domain xml.
        if test_managedsave:
            # Wait 10s for vm to be ready before managedsave
            time.sleep(wait_before_save_secs)
            ret = virsh.managedsave(vm_name, **virsh_dargs)
            libvirt.check_exit_status(ret)

            def _wait_for_vm_start():
                try:
                    vm.start()
                    return True
                except Exception as e:
                    logging.error(e)

            utils_misc.wait_for(_wait_for_vm_start, timeout=30, step=5)
            vm.wait_for_login().close()
            if test_dom_xml:
                check_dom_xml(at_mem=attach_device)

        # Run save and restore command to check domain xml
        if test_save_restore:
            # Wait 10s for vm to be ready before save
            time.sleep(wait_before_save_secs)
            check_save_restore()
            if test_dom_xml:
                check_dom_xml(at_mem=attach_device)

        # Check domain xml after restarting libvirtd
        if restart_libvirtd:
            libvirtd = utils_libvirtd.Libvirtd()
            libvirtd.restart()
            if test_dom_xml:
                check_dom_xml(at_mem=attach_device)

        # Detach the memory device
        unplug_failed_with_known_error = False
        if detach_device:
            dev_xml = utils_hotplug.create_mem_xml(tg_size, pg_size, mem_addr,
                                                   tg_sizeunit, pg_unit,
                                                   tg_node, node_mask,
                                                   mem_model, mem_discard)
            for x in xrange(at_times):
                if not detach_alias:
                    ret = virsh.detach_device(vm_name,
                                              dev_xml.xml,
                                              flagstr=attach_option,
                                              debug=True)
                else:
                    ret = virsh.detach_device_alias(vm_name,
                                                    device_alias,
                                                    detach_alias_options,
                                                    debug=True)
                if ret.stderr and host_known_unplug_errors:
                    for known_error in host_known_unplug_errors:
                        if (known_error[0] == known_error[-1]) and \
                           known_error.startswith(("'")):
                            known_error = known_error[1:-1]
                        if known_error in ret.stderr:
                            unplug_failed_with_known_error = True
                            logging.debug(
                                "Known error occured in Host, while"
                                " hot unplug: %s", known_error)
                if unplug_failed_with_known_error:
                    break
                try:
                    libvirt.check_exit_status(ret, detach_error)
                except Exception as detail:
                    dmesg_file = tempfile.mktemp(dir=data_dir.get_tmp_dir())
                    try:
                        session = vm.wait_for_login()
                        utils_misc.verify_dmesg(dmesg_log_file=dmesg_file,
                                                ignore_result=True,
                                                session=session,
                                                level_check=5)
                    except Exception:
                        session.close()
                        test.fail("After memory unplug Unable to connect to VM"
                                  " or unable to collect dmesg")
                    session.close()
                    if os.path.exists(dmesg_file):
                        with open(dmesg_file, 'r') as f:
                            flag = re.findall(
                                r'memory memory\d+?: Offline failed', f.read())
                        if not flag:
                            # The attached memory is used by vm, and it could
                            #  not be unplugged.The result is expected
                            os.remove(dmesg_file)
                            test.fail(detail)
                        unplug_failed_with_known_error = True
                        os.remove(dmesg_file)
            # Check whether a known error occured or not
            dmesg_file = tempfile.mktemp(dir=data_dir.get_tmp_dir())
            try:
                session = vm.wait_for_login()
                utils_misc.verify_dmesg(dmesg_log_file=dmesg_file,
                                        ignore_result=True,
                                        session=session,
                                        level_check=4)
            except Exception:
                session.close()
                test.fail("After memory unplug Unable to connect to VM"
                          " or unable to collect dmesg")
            session.close()
            if guest_known_unplug_errors and os.path.exists(dmesg_file):
                for known_error in guest_known_unplug_errors:
                    if (known_error[0] == known_error[-1]) and \
                       known_error.startswith(("'")):
                        known_error = known_error[1:-1]
                    with open(dmesg_file, 'r') as f:
                        if known_error in f.read():
                            unplug_failed_with_known_error = True
                            logging.debug(
                                "Known error occured, while hot"
                                " unplug: %s", known_error)
            if test_dom_xml and not unplug_failed_with_known_error:
                check_dom_xml(dt_mem=detach_device)
                # Remove dmesg temp file
                if os.path.exists(dmesg_file):
                    os.remove(dmesg_file)

    finally:
        # Delete snapshots.
        snapshot_lists = virsh.snapshot_list(vm_name)
        if len(snapshot_lists) > 0:
            libvirt.clean_up_snapshots(vm_name, snapshot_lists)
            for snap in snapshot_lists:
                virsh.snapshot_delete(vm_name, snap, "--metadata")

        # Recover VM.
        if vm.is_alive():
            vm.destroy(gracefully=False)
        logging.info("Restoring vm...")
        if (setup_hugepages_flag == "yes"):
            restore_hugepages()
        vmxml_backup.sync()
Пример #24
0
def run(test, params, env):
    '''
    Test the command virsh pool-create-as

    (1) Prepare backend storage device
    (2) Define secret xml and set secret value
    (3) Test pool-create-as or virsh pool-define with authentication
    '''

    pool_options = params.get('pool_options', '')
    pool_name = params.get('pool_name')
    pool_type = params.get('pool_type')
    pool_target = params.get('pool_target', '')
    status_error = params.get('status_error') == "yes"

    # iscsi options
    emulated_size = params.get("iscsi_image_size", "1")
    iscsi_host = params.get("iscsi_host", "127.0.0.1")
    chap_user = params.get("iscsi_user")
    chap_passwd = params.get("iscsi_password")

    # ceph options
    ceph_auth_user = params.get("ceph_auth_user")
    ceph_auth_key = params.get("ceph_auth_key")
    ceph_host_ip = params.get("ceph_host_ip", "EXAMPLE_HOSTS")
    ceph_mon_ip = params.get("ceph_mon_ip", "EXAMPLE_MON_HOST")
    ceph_disk_name = params.get("ceph_disk_name", "EXAMPLE_SOURCE_NAME")
    ceph_client_name = params.get("ceph_client_name")
    ceph_client_key = params.get("ceph_client_key")
    key_file = os.path.join(data_dir.get_tmp_dir(), "ceph.key")
    key_opt = "--keyring %s" % key_file

    # auth options
    auth_usage = (params.get('auth_usage') == 'yes')
    auth_uuid = (params.get('auth_uuid') == 'yes')
    sec_ephemeral = params.get("secret_ephemeral", "no")
    sec_private = params.get("secret_private", "yes")
    sec_desc = params.get("secret_description")
    auth_type = params.get("auth_type")
    sec_usage = params.get("secret_usage_type")
    sec_target = params.get("secret_usage_target")
    sec_name = params.get("secret_name")
    auth_sec_dict = {
        "sec_ephemeral": sec_ephemeral,
        "sec_private": sec_private,
        "sec_desc": sec_desc,
        "sec_usage": sec_usage,
        "sec_target": sec_target,
        "sec_name": sec_name
    }

    if sec_usage == "iscsi":
        auth_username = chap_user
        sec_password = chap_passwd
        secret_usage = sec_target

    if sec_usage == "ceph":
        auth_username = ceph_auth_user
        sec_password = ceph_auth_key
        secret_usage = sec_name

    if pool_target and not os.path.isdir(pool_target):
        if os.path.isfile(pool_target):
            logging.error('<target> must be a directory')
        else:
            os.makedirs(pool_target)

    def setup_ceph_auth():
        disk_path = ("rbd:%s:mon_host=%s" % (ceph_disk_name, ceph_mon_ip))
        disk_path += (":id=%s:key=%s" % (ceph_auth_user, ceph_auth_key))

        if not utils_package.package_install(["ceph-common"]):
            test.error("Failed to install ceph-common")

        with open(key_file, 'w') as f:
            f.write("[%s]\n\tkey = %s\n" % (ceph_client_name, ceph_client_key))

        # Delete the disk if it exists
        cmd = ("rbd -m {0} {1} info {2} && rbd -m {0} {1} rm "
               "{2}".format(ceph_mon_ip, key_opt, ceph_disk_name))
        process.run(cmd, ignore_status=True, shell=True)

        # Create an local image and make FS on it.
        img_file = os.path.join(data_dir.get_tmp_dir(), "test.img")
        disk_cmd = ("qemu-img create -f raw {0} 10M && mkfs.ext4 -F {0}".
                    format(img_file))
        process.run(disk_cmd, ignore_status=False, shell=True)

        # Convert the image to remote storage
        # Ceph can only support raw format
        disk_cmd = ("qemu-img convert -O %s %s %s" %
                    ("raw", img_file, disk_path))
        process.run(disk_cmd, ignore_status=False, shell=True)

    def setup_iscsi_auth():
        iscsi_target, lun_num = libvirt.setup_or_cleanup_iscsi(
            is_setup=True,
            is_login=False,
            image_size=emulated_size,
            chap_user=chap_user,
            chap_passwd=chap_passwd)
        return iscsi_target

    def check_auth_in_xml(dparams):
        sourcexml = pool_xml.PoolXML.new_from_dumpxml(pool_name).get_source()
        with open(sourcexml.xml) as xml_f:
            logging.debug("Source XML is: \n%s", xml_f.read())

        # Check result
        try:
            for name, v_expect in dparams.items():
                if v_expect != sourcexml[name]:
                    test.fail("Expect to find %s=%s, but got %s=%s" %
                              (name, v_expect, name, sourcexml[name]))
        except xcepts.LibvirtXMLNotFoundError as details:
            if "usage not found" in str(details) and auth_uuid:
                pass  # Not a auth_usage test
            elif "uuid not found" in str(details) and auth_usage:
                pass  # Not a auth_uuid test
            else:
                test.fail(details)

    def check_result(result, expect_error=False):
        # pool-define-as return CmdResult
        if isinstance(result, process.CmdResult):
            result = (result.exit_status == 0)  # True means run success

        if expect_error:
            if result:
                test.fail("Expect to fail but run success")
        elif not expect_error:
            if not result:
                test.fail("Expect to succeed but run failure")
        else:
            logging.info("It's an expected error")

    if not libvirt_version.version_compare(3, 9, 0):
        test.cancel("Pool create/define with authentication"
                    " not support in this libvirt version")

    sec_uuid = ""
    img_file = ""
    # Prepare a blank params to confirm if delete the configure at the end of the test
    ceph_cfg = ""
    libvirt_pool = libvirt_storage.StoragePool()
    try:
        # Create secret xml and set value
        encode = True
        if sec_usage == "ceph":
            encode = False  # Ceph key already encoded
        sec_uuid = libvirt.create_secret(auth_sec_dict)
        virsh.secret_set_value(sec_uuid,
                               sec_password,
                               encode=encode,
                               debug=True)

        if sec_usage == "iscsi":
            iscsi_dev = setup_iscsi_auth()
            pool_options += (" --source-host %s --source-dev %s"
                             " --auth-type %s --auth-username %s" %
                             (iscsi_host, iscsi_dev, auth_type, auth_username))

        if sec_usage == "ceph":
            # Create config file if it doesn't exist
            ceph_cfg = ceph.create_config_file(ceph_mon_ip)
            setup_ceph_auth()
            rbd_pool = ceph_disk_name.split('/')[0]
            pool_options += (
                " --source-host %s --source-name %s"
                " --auth-type %s --auth-username %s" %
                (ceph_host_ip, rbd_pool, auth_type, auth_username))

        if auth_usage:
            pool_options += " --secret-usage %s" % secret_usage

        if auth_uuid:
            pool_options += " --secret-uuid %s" % sec_uuid

        # Run test cases
        func_name = params.get("test_func", "pool_create_as")
        logging.info('Perform test runner: %s', func_name)
        if func_name == "pool_create_as":
            func = virsh.pool_create_as
        if func_name == "pool_define_as":
            func = virsh.pool_define_as
        result = func(pool_name,
                      pool_type,
                      pool_target,
                      extra=pool_options,
                      debug=True)

        # Check status_error
        check_result(result, expect_error=status_error)
        if not status_error:
            # Check pool status
            pool_status = libvirt_pool.pool_state(pool_name)
            if ((pool_status == 'inactive' and func_name == "pool_define_as")
                    or
                (pool_status == "active" and func_name == "pool_create_as")):
                logging.info("Expected pool status:%s" % pool_status)
            else:
                test.fail("Not an expected pool status: %s" % pool_status)
            # Check pool dumpxml
            dict_expect = {
                "auth_type": auth_type,
                "auth_username": auth_username,
                "secret_usage": secret_usage,
                "secret_uuid": sec_uuid
            }
            check_auth_in_xml(dict_expect)
    finally:
        # Clean up
        logging.info("Start to cleanup")
        # Remove ceph configure file if created.
        if ceph_cfg:
            os.remove(ceph_cfg)
        if os.path.exists(img_file):
            os.remove(img_file)
        virsh.secret_undefine(sec_uuid, ignore_status=True)
        libvirt.setup_or_cleanup_iscsi(is_setup=False)
        if libvirt_pool.pool_exists(pool_name):
            libvirt_pool.delete_pool(pool_name)
Пример #25
0
def run(test, params, env):
    """
    Test command: nodedev-list [--tree] [--cap <string>]

    1) Run nodedev-list command and check return code.
    2) If `cap_option == one`, results are also compared
       with devices get from sysfs.
    """
    def _check_result(cap, ref_list, result, mode):
        """
        Check test result agains a device list retrived from sysfs.

        :param cap:        Capability being checked, current available caps are
                           defined in variable `caps`.
        :param ref_list:   Reference device list retrived from sysfs.
        :param result:     Stdout returned from virsh nodedev-list command.
        :param mode:       How to compare sysfs info with command output:
                           "exact" or "similar".
        """
        check_list = result.strip().splitlines()
        are_not_equivalent = True
        if mode == "similar":
            listed = [x for x in ref_list if x in result]
            all_sysfs_info_listed = len(ref_list) == len(listed)
            same_number_of_devices = len(ref_list) == len(check_list)
            are_not_equivalent = (not all_sysfs_info_listed
                                  or not same_number_of_devices)
        elif mode == "exact":
            are_not_equivalent = set(ref_list) != set(check_list)
        else:
            logging.error("Unknown comparison mode in result check: %s", mode)
            return False

        uavail_caps = ['system', 'vports', 'fc_host']

        if are_not_equivalent and cap not in uavail_caps:
            logging.error('Difference in capability %s:', cap)
            logging.error('Expected devices: %s', ref_list)
            logging.error('Result devices  : %s', check_list)
            return False
        return True

    mode = params.get("comparison_mode", "exact")
    all_caps = [
        'system', 'pci', 'usb_device', 'usb', 'net', 'scsi_host',
        'scsi_target', 'scsi', 'storage', 'fc_host', 'vports', 'scsi_generic',
        'ccw', 'css'
    ]
    expect_succeed = params.get('expect_succeed', 'yes')
    tree_option = params.get('tree_option', 'off')
    cap_option = params.get('cap_option', 'off')
    caps = get_avail_caps(all_caps)
    check_failed = False

    # acl polkit params
    uri = params.get("virsh_uri")
    unprivileged_user = params.get('unprivileged_user')
    if unprivileged_user:
        if unprivileged_user.count('EXAMPLE'):
            unprivileged_user = '******'

    if not libvirt_version.version_compare(1, 1, 1):
        if params.get('setup_libvirt_polkit') == 'yes':
            test.cancel("API acl test not supported in current"
                        " libvirt version.")

    virsh_dargs = {}
    if params.get('setup_libvirt_polkit') == 'yes':
        virsh_dargs['unprivileged_user'] = unprivileged_user
        virsh_dargs['uri'] = uri

    tree = (tree_option == 'on')
    if cap_option == 'one':
        devices = {}
        for cap in caps:
            devices[cap] = get_devices_by_cap(cap)

        for cap in devices:
            logging.debug(cap + ':')
            for device in devices[cap]:
                logging.debug('    ' + device)

        for cap in caps:
            result = virsh.nodedev_list(tree=tree, cap=cap, **virsh_dargs)
            if result.exit_status != 0 and expect_succeed == 'yes':
                break
            elif result.exit_status == 0 and expect_succeed == 'no':
                break
            if not _check_result(cap, devices[cap], result.stdout.strip(),
                                 mode):
                check_failed = True
                break
    else:
        cap = ''
        if cap_option != 'off':
            if cap_option == 'multi':
                cap = ','.join(caps)
            elif cap_option == 'long':
                cap = ','.join(['pci', 'usb', 'net', 'storage', 'scsi'] * 5000)
            else:
                cap = cap_option
        result = virsh.nodedev_list(tree=tree, cap=cap, **virsh_dargs)

    logging.debug(result)
    if expect_succeed == 'yes':
        if result.exit_status != 0:
            test.fail('Expected succeed, but failed with result:\n%s' % result)
    elif expect_succeed == 'no':
        if result.exit_status == 0:
            test.fail('Expected fail, but succeed with result:\n%s' % result)
    if check_failed:
        test.fail('Check failed. result:\n%s' % result)
Пример #26
0
    def check_cpu(xml, cpu_match, arch, model, policies):
        """
        Check the dumpxml result for --update-cpu option

        Note, function custom_cpu() hard code these features and policy,
        so after run virsh dumpxml --update-cpu:
        1. For match='minimum', all host support features will added,
           and match change to 'exact'. Since libvirt-3.0, cpu update is
           reworked, and the custom CPU with minimum match is converted
           similarly to host-model.
        2. policy='optional' features(support by host) will update to
           policy='require'
        3. policy='optional' features(unsupport by host) will update to
           policy='disable'
        4. Other policy='disable|force|forbid|require' with keep the
           original values
        """
        vmxml = vm_xml.VMXML()
        vmxml['xml'] = xml
        vmcpu_xml = vmxml['cpu']
        check_pass = True
        require_count = 0
        expect_require_features = 0
        cpu_feature_list = vmcpu_xml.get_feature_list()
        host_capa = capability_xml.CapabilityXML()
        is_supported_on_host = is_supported_on_host_func(host_capa)
        for i in range(len(cpu_feature_list)):
            f_name = vmcpu_xml.get_feature_name(i)
            f_policy = vmcpu_xml.get_feature_policy(i)
            err_msg = "Policy of '%s' is not expected: %s" % (f_name, f_policy)
            expect_policy = "disable"
            if f_name in policies:
                if policies[f_name] == "optional" and arch != "s390x":
                    if is_supported_on_host(f_name):
                        expect_policy = "require"
                else:
                    expect_policy = policies[f_name]
                if f_policy != expect_policy:
                    logging.error(err_msg)
                    check_pass = False
            # Count expect require features
            if expect_policy == "require":
                expect_require_features += 1
            # Count actual require features
            if f_policy == "require":
                require_count += 1

        # Check optional feature is changed to require/disable
        expect_model = model

        if cpu_match == "minimum":
            # libvirt commit 3b6be3c0 change the behavior of update-cpu
            # Check model is changed to host cpu-model given in domcapabilities
            if libvirt_version.version_compare(3, 0, 0):
                expect_model = host_capa.model
            expect_match = "exact"
            # For different host, the support require features are different,
            # so just check the actual require features greater than the
            # expect number
            if require_count < expect_require_features:
                logging.error("Found %d require features, but expect >=%s",
                              require_count, expect_require_features)
                check_pass = False
        else:
            expect_match = cpu_match
            if require_count != expect_require_features:
                logging.error("Found %d require features, but expect %s",
                              require_count, expect_require_features)
                check_pass = False

        logging.debug("Expect 'match' value is: %s", expect_match)
        match = vmcpu_xml['match']
        if match != expect_match:
            logging.error("CPU match '%s' is not expected", match)
            check_pass = False
        logging.debug("Expect 'model' value is: %s", expect_model)
        if vmcpu_xml['model'] != expect_model:
            logging.error("CPU model %s is not expected", vmcpu_xml['model'])
            check_pass = False
        return check_pass
Пример #27
0
def run(test, params, env):
    """
    Test command: virsh domblkinfo.
    1.Prepare test environment.
    2.Get vm's driver.
    3.According to driver perform virsh domblkinfo operation.
    4.Recover test environment.
    5.Confirm the test result.
    """
    def attach_disk_test(test_disk_source, front_dev):
        """
        Attach-disk testcase.
        1.Attach a disk to guest.
        2.Perform domblkinfo operation.
        3.Detach the disk.

        :param: test_disk_source disk source file path.
        :param: front_dev front end device name.
        :return: Command status and output.
        """
        try:
            disk_source = test_disk_source
            front_device = front_dev
            with open(disk_source, 'wb') as source_file:
                source_file.seek((512 * 1024 * 1024) - 1)
                source_file.write(str(0).encode())
            virsh.attach_disk(vm_name, disk_source, front_device, debug=True)
            vm_ref = vm_name
            if "--all" in extra:
                disk_source = ""
                vm_ref = "%s %s" % (vm_name, extra)
            result_source = virsh.domblkinfo(vm_ref,
                                             disk_source,
                                             ignore_status=True,
                                             debug=True)
            status_source = result_source.exit_status
            output_source = result_source.stdout.strip()
            if driver == "qemu":
                if "--all" in extra:
                    front_device = ""
                result_target = virsh.domblkinfo(vm_ref,
                                                 front_device,
                                                 ignore_status=True,
                                                 debug=True)
                status_target = result_target.exit_status
                output_target = result_target.stdout.strip()
            else:
                status_target = 0
                output_target = "Xen doesn't support domblkinfo target!"
            front_device = front_dev
            virsh.detach_disk(vm_name, front_device, debug=True)
            return status_target, output_target, status_source, output_source
        except (process.CmdError, IOError):
            return 1, "", 1, ""

    def check_disk_info():
        """
        Ckeck virsh domblkinfo output.
        """
        if driver == "qemu" and output_source.strip() != output_target.strip():
            test.fail("Command domblkinfo target/source"
                      " got different information!")
        if output_source != "":
            lines = output_source.splitlines()
            if "--human" in extra and not any(
                    re.findall(r'GiB|MiB', lines[0], re.IGNORECASE)):
                test.fail("Command domblkinfo human output is wrong")
            if "--all" in extra:
                blocklist = vm_xml.VMXML.get_disk_blk(vm_name)
                if not all(
                        re.findall(r''.join(block), output_source,
                                   re.IGNORECASE) for block in blocklist):
                    test.fail("Command domblkinfo --all output is wrong")
                return
            if disk_size_check:
                capacity_cols = lines[0].split(":")
                if "--human" in extra:
                    size = float(capacity_cols[1].strip().split(" ")[0])
                else:
                    size = int(capacity_cols[1].strip())
                if disk_size != size:
                    test.fail("Command domblkinfo output is wrong! "
                              "'%d' != '%d'" % (disk_size, size))
        else:
            test.fail("Command domblkinfo has no output!")

    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)

    # Get all parameters from configuration.
    vm_ref = params.get("domblkinfo_vm_ref")
    device = params.get("domblkinfo_device", "yes")
    front_dev = params.get("domblkinfo_front_dev", "vdd")
    extra = params.get("domblkinfo_extra", "")
    status_error = params.get("status_error", "no")
    test_attach_disk = os.path.join(test.virtdir, "tmp.img")

    domid = vm.get_id()
    domuuid = vm.get_uuid()
    driver = virsh.driver()

    blklist = vm_xml.VMXML.get_disk_blk(vm_name)
    sourcelist = vm_xml.VMXML.get_disk_source(vm_name)
    test_disk_target = blklist[0]
    test_disk_source = sourcelist[0].find('source').get('file')
    test_disk_format = sourcelist[0].find('driver').get('type')

    disk_size_check = False
    if test_disk_format == "raw":
        disk_size_check = True
    if device == "no":
        test_disk_target = ""
        test_disk_source = ""

    if vm_ref == "id":
        vm_ref = domid
    elif vm_ref == "hex_id":
        vm_ref = hex(int(domid))
    elif vm_ref.find("invalid") != -1:
        vm_ref = params.get(vm_ref)
    elif vm_ref == "name":
        vm_ref = "%s %s" % (vm_name, extra)
    elif vm_ref == "uuid":
        vm_ref = domuuid

    if any(re.findall(
            r'--all|--human', extra,
            re.IGNORECASE)) and not libvirt_version.version_compare(4, 5, 0):
        test.cancel(
            "--all and --human options are supported until libvirt 4.5.0 version"
        )

    if vm_ref == "test_attach_disk":
        test_disk_source = test_attach_disk
        disk_size_check = True
        (status_target, output_target, status_source,
         output_source) = attach_disk_test(test_disk_source, front_dev)
    else:
        result_source = virsh.domblkinfo(vm_ref,
                                         test_disk_source,
                                         ignore_status=True,
                                         debug=True)
        status_source = result_source.exit_status
        output_source = result_source.stdout.strip()
        if driver == "qemu":
            result_target = virsh.domblkinfo(vm_ref,
                                             test_disk_target,
                                             ignore_status=True,
                                             debug=True)
            status_target = result_target.exit_status
            output_target = result_target.stdout.strip()
        else:
            status_target = 0
            output_target = "xen doesn't support domblkinfo target!"
    disk_size = 0
    if os.path.exists(test_disk_source):
        disk_size = os.path.getsize(test_disk_source)

    # Recover environment
    if os.path.exists(test_attach_disk):
        os.remove(test_attach_disk)

    # Check status_error
    if status_error == "yes":
        if status_target == 0 or status_source == 0:
            test.fail("Run successfully with wrong command!")
    elif status_error == "no":
        if status_target != 0 or status_source != 0:
            test.fail("Run failed with right command")
        # Check source information.
        check_disk_info()
    else:
        test.fail("The status_error must be 'yes' or 'no'!")
Пример #28
0
def run(test, params, env):
    """
    Test command: virsh dumpxml.

    1) Prepare parameters.
    2) Set options of virsh dumpxml.
    3) Prepare environment: vm_state, etc.
    4) Run dumpxml command.
    5) Recover environment.
    6) Check result.
    """
    def is_dumpxml_of_running_vm(dumpxml, domid):
        """
        To check whether the dumpxml is got during vm is running.
        (Verify the domid in dumpxml)

        :param dumpxml: the output of virsh dumpxml.
        :param domid: the id of vm
        """
        match_string = "<domain.*id='%s'/>" % domid
        if re.search(dumpxml, match_string):
            return True
        return False

    def custom_cpu(vm_name, cpu_match, model, policies):
        """
        Custom guest cpu match/model/features for --update-cpu option.

        :param vm_name: name of the domain
        :param cpu_match: match mode
        :param model: cpu model
        :param policies: features and their policies dict
        """
        vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
        vmcpu_xml = vm_xml.VMCPUXML()
        vmcpu_xml['match'] = cpu_match
        vmcpu_xml['model'] = model
        for feature in policies:
            vmcpu_xml.add_feature(feature, policies[feature])
        vmxml['cpu'] = vmcpu_xml
        logging.debug('Custom VM CPU: %s', vmcpu_xml.xmltreefile)
        vmxml.sync()

    def get_cpu_features():
        """
        Get all supported CPU features

        :return: list of feature string
        """
        features = []
        dom_capa = domcapability_xml.DomCapabilityXML()
        modelname = dom_capa.get_hostmodel_name()
        for item in dom_capa.get_additional_feature_list('host-model'):
            for key, value in item.items():
                if value == 'require':
                    features.append(key)
        return list(set(features) | set(cpu.get_model_features(modelname)))

    def get_cpu_model_policies(arch):
        """
        Get model and policies to be set
        :param arch: architecture, e.g. x86_64
        :return model, policies: cpu model and features with their policies
        """
        if arch == "s390x":
            return "z13.2-base", {
                "msa1": "require",
                "msa2": "force",
                "edat": "disable",
                "vmx": "forbid"
            }
        else:
            return "Penryn", {
                "xtpr": "optional",
                "tm2": "disable",
                "est": "force",
                "vmx": "forbid",
                # Unsupported feature 'ia64'
                "ia64": "optional",
                "vme": "optional"
            }

    def is_supported_on_host_func(host_capa):
        """
        Create function to determine if feature is supported on host

        :param host_capa: previously loaded host capability xml
        :return: func to determine if supported on host
        """
        # Check if feature is supported on the host
        # Since libvirt3.9, libvirt queries qemu/kvm
        # to get one feature support or not
        if libvirt_version.version_compare(3, 9, 0):
            cpu_features = get_cpu_features()

            def is_supported_on_host(f_name):
                return f_name in cpu_features
        else:

            def is_supported_on_host(f_name):
                return host_capa.check_feature_name(f_name)

        return is_supported_on_host

    def check_cpu(xml, cpu_match, arch, model, policies):
        """
        Check the dumpxml result for --update-cpu option

        Note, function custom_cpu() hard code these features and policy,
        so after run virsh dumpxml --update-cpu:
        1. For match='minimum', all host support features will added,
           and match change to 'exact'. Since libvirt-3.0, cpu update is
           reworked, and the custom CPU with minimum match is converted
           similarly to host-model.
        2. policy='optional' features(support by host) will update to
           policy='require'
        3. policy='optional' features(unsupport by host) will update to
           policy='disable'
        4. Other policy='disable|force|forbid|require' with keep the
           original values
        """
        vmxml = vm_xml.VMXML()
        vmxml['xml'] = xml
        vmcpu_xml = vmxml['cpu']
        check_pass = True
        require_count = 0
        expect_require_features = 0
        cpu_feature_list = vmcpu_xml.get_feature_list()
        host_capa = capability_xml.CapabilityXML()
        is_supported_on_host = is_supported_on_host_func(host_capa)
        for i in range(len(cpu_feature_list)):
            f_name = vmcpu_xml.get_feature_name(i)
            f_policy = vmcpu_xml.get_feature_policy(i)
            err_msg = "Policy of '%s' is not expected: %s" % (f_name, f_policy)
            expect_policy = "disable"
            if f_name in policies:
                if policies[f_name] == "optional" and arch != "s390x":
                    if is_supported_on_host(f_name):
                        expect_policy = "require"
                else:
                    expect_policy = policies[f_name]
                if f_policy != expect_policy:
                    logging.error(err_msg)
                    check_pass = False
            # Count expect require features
            if expect_policy == "require":
                expect_require_features += 1
            # Count actual require features
            if f_policy == "require":
                require_count += 1

        # Check optional feature is changed to require/disable
        expect_model = model

        if cpu_match == "minimum":
            # libvirt commit 3b6be3c0 change the behavior of update-cpu
            # Check model is changed to host cpu-model given in domcapabilities
            if libvirt_version.version_compare(3, 0, 0):
                expect_model = host_capa.model
            expect_match = "exact"
            # For different host, the support require features are different,
            # so just check the actual require features greater than the
            # expect number
            if require_count < expect_require_features:
                logging.error("Found %d require features, but expect >=%s",
                              require_count, expect_require_features)
                check_pass = False
        else:
            expect_match = cpu_match
            if require_count != expect_require_features:
                logging.error("Found %d require features, but expect %s",
                              require_count, expect_require_features)
                check_pass = False

        logging.debug("Expect 'match' value is: %s", expect_match)
        match = vmcpu_xml['match']
        if match != expect_match:
            logging.error("CPU match '%s' is not expected", match)
            check_pass = False
        logging.debug("Expect 'model' value is: %s", expect_model)
        if vmcpu_xml['model'] != expect_model:
            logging.error("CPU model %s is not expected", vmcpu_xml['model'])
            check_pass = False
        return check_pass

    # Prepare parameters
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)

    vm_ref = params.get("dumpxml_vm_ref", "domname")
    options_ref = params.get("dumpxml_options_ref", "")
    options_suffix = params.get("dumpxml_options_suffix", "")
    vm_state = params.get("dumpxml_vm_state", "running")
    security_pwd = params.get("dumpxml_security_pwd", "123456")
    status_error = "yes" == params.get("status_error", "no")
    cpu_match = params.get("cpu_match", "minimum")

    arch = platform.machine()
    if arch == 's390x' and cpu_match == 'minimum':
        test.cancel("Minimum mode not supported on s390x")

    # acl polkit params
    setup_libvirt_polkit = "yes" == params.get('setup_libvirt_polkit')
    if not libvirt_version.version_compare(1, 1, 1):
        if setup_libvirt_polkit:
            test.cancel(
                "API acl test not supported in current libvirt version")
    uri = params.get("virsh_uri")
    unprivileged_user = params.get('unprivileged_user')
    if unprivileged_user and setup_libvirt_polkit:
        if unprivileged_user.count('EXAMPLE'):
            unprivileged_user = '******'

    backup_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    if options_ref.count("update-cpu"):
        model, policies = get_cpu_model_policies(arch)
        custom_cpu(vm_name, cpu_match, model, policies)
    elif options_ref.count("security-info"):
        new_xml = backup_xml.copy()
        try:
            vm_xml.VMXML.add_security_info(new_xml, security_pwd)
        except Exception as info:
            test.cancel(info)
    domuuid = vm.get_uuid()
    domid = vm.get_id()

    # Prepare vm state for test
    if vm_state == "shutoff" and vm.is_alive():
        vm.destroy()  # Confirm vm is shutoff

    if vm_ref == "domname":
        vm_ref = vm_name
    elif vm_ref == "domid":
        vm_ref = domid
    elif vm_ref == "domuuid":
        vm_ref = domuuid
    elif vm_ref == "hex_id":
        if domid == "-":
            vm_ref = domid
        else:
            vm_ref = hex(int(domid))

    if options_suffix:
        options_ref = "%s %s" % (options_ref, options_suffix)

    # Run command
    try:
        cmd_result = virsh.dumpxml(vm_ref,
                                   extra=options_ref,
                                   uri=uri,
                                   unprivileged_user=unprivileged_user,
                                   debug=True)
        utlv.check_exit_status(cmd_result, status_error)
        output = cmd_result.stdout.strip()

        # Check result
        if not status_error:
            if (options_ref.count("inactive")
                    and is_dumpxml_of_running_vm(output, domid)):
                test.fail("Found domain id in XML when run virsh dumpxml"
                          " with --inactive option")
            elif options_ref.count("update-cpu"):
                if not check_cpu(output, cpu_match, arch, model, policies):
                    test.fail("update-cpu option check failed")
            elif options_ref.count("security-info"):
                if not output.count("passwd='%s'" % security_pwd):
                    test.fail("No security info found")
            else:
                if (vm_state == "shutoff"
                        and is_dumpxml_of_running_vm(output, domid)):
                    test.fail("Found domain id in XML when run virsh dumpxml"
                              " for a shutoff VM")
    finally:
        backup_xml.sync()
Пример #29
0
def run(test, params, env):
    """
    Test iothreads related tests

    1) configuration tests for iothreadids/iothreads/iothreadpin/iothreadsched
    2) check for iothreadadd/del/pin operation
    3) check for iothread with disk attached
    4) set and check iothread parameters when vm is running
    5) configure iothread_quota/iothread_period for vm
       without defining iothreads

    :param test: test object
    :param params: Dictionary with the test parameters
    :param env: Dictionary with test environment.
    """

    def update_iothread_xml(define_error=False):
        """
        Update xml for test
        """
        vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
        del vmxml.cputune
        del vmxml.iothreadids
        del vmxml.iothreads

        vm_is_active = vm.is_alive()

        # Set iothreads first
        if iothread_ids:
            ids_xml = vm_xml.VMIothreadidsXML()
            ids_xml.iothread = iothread_ids.split()
            vmxml.iothreadids = ids_xml
        # Set cputune
        if any([iothreadpins, iothreadscheds, iothread_quota, iothread_period]):
            cputune_xml = vm_xml.VMCPUTuneXML()
            if iothreadpins:
                io_pins = []
                for pins in iothreadpins.split():
                    thread, cpuset = pins.split(':')
                    io_pins.append({"iothread": thread,
                                    "cpuset": cpuset})
                cputune_xml.iothreadpins = io_pins
            if iothreadscheds:
                io_scheds = []
                for sched in iothreadscheds.split():
                    thread, scheduler = sched.split(":")
                    io_scheds.append({"iothreads": thread,
                                      "scheduler": scheduler})
                cputune_xml.iothreadscheds = io_scheds
            if iothread_period:
                cputune_xml.iothread_period = int(iothread_period)
            if iothread_quota:
                cputune_xml.iothread_quota = int(iothread_quota)

            vmxml.cputune = cputune_xml

        # Set iothread
        if iothread_num:
            vmxml.iothreads = int(iothread_num)

        logging.debug("Pre-test xml is %s", vmxml)
        if not define_error:
            vmxml.sync()
            if vm_is_active:
                vm.start()
                vm.wait_for_login().close()
        else:
            result = virsh.define(vmxml.xml, debug=True)
            libvirt.check_exit_status(result, True)
            if err_msg:
                libvirt.check_result(result, err_msg)

    def get_default_cpuset():
        """
        Get default cpuset

        :return: default cpuset value
        """
        default_cpuset = ""
        vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
        try:
            default_cpuset = vmxml.cpuset
        except LibvirtXMLNotFoundError:
            cmd = "lscpu | awk '/On-line CPU/ {print $NF}'"
            default_cpuset = process.run(cmd, shell=True).stdout_text.strip()
        logging.debug("default cpuset is %s", default_cpuset)
        return default_cpuset

    def default_iothreadinfo():
        """
        Generate default iothreadinfo output from xml settings

        :return: default iothreadinfo dict
        """
        exp_info = {}
        cpu_affinity = get_default_cpuset()

        if iothread_ids:
            for iothread_id in iothread_ids.split():
                exp_info[iothread_id] = cpu_affinity
        if iothread_num:
            if iothread_ids:
                iothreadid_list = iothread_ids.split()
                if int(iothread_num) > len(iothreadid_list):
                    needs = int(iothread_num) - len(iothreadid_list)
                    for id in range(1, max(int(iothread_num),
                                           max([int(x)
                                                for x in iothreadid_list])) + 1):
                        if needs > 0:
                            if str(id) not in iothreadid_list:
                                exp_info[str(id)] = cpu_affinity
                                needs = needs - 1
                        else:
                            break
            else:
                for id in range(1, int(iothread_num)+1):
                    exp_info[str(id)] = cpu_affinity

        logging.debug("exp_iothread_info is %s", exp_info)
        return exp_info

    def update_expected_iothreadinfo(org_info, id, act="add", cpuset=None):
        """
        Update expected iothreadinfo dict

        :param org_info: original iothreadinfo dict
        :param id: thread id
        :param act: action to do, it may be "add", "del" or "updated"
        :param cpuset: cpuset to be updated
        """
        if act == "add":
            org_info[id] = get_default_cpuset()
        elif act == "del":
            if id in org_info:
                del org_info[id]
            else:
                logging.debug("No such key {} in {}".format(id, org_info))
        elif act == "update":
            if not cpuset:
                cpuset = get_default_cpuset()
            org_info[id] = cpuset
        else:
            logging.error("Incorrect action!")

    def get_iothread_pool(vm_name, thread_id):
        """
        Get iothread pool values for the specified iothread id

        :param vm_name: name of vm
        :param thread_id: thread id
        :return: iothread pool time values
        """
        iothread_pool = {}
        domstats_output = virsh.domstats(vm_name, "--iothread", debug=True)

        for item in re.findall("iothread."+thread_id+".poll.*",
                               domstats_output.stdout):
            iothread_pool[item.split("=")[0]] = item.split("=")[1]

        logging.debug("iothread pool values for thread id {} are {}."
                      .format(thread_id, iothread_pool))
        return iothread_pool

    def exec_iothreaddel():
        """
        Run "virsh iothreaddel" and check if xml is updated correctly

        :raise: test.fail if virsh command failed
        """
        logging.debug("doing iothread del")
        result = virsh.iothreaddel(vm_name, iothreaddel,
                                   debug=True, ignore_status=True)
        libvirt.check_exit_status(result, status_error)
        if not status_error:
            update_expected_iothreadinfo(exp_iothread_info,
                                         iothreaddel, "del")
            xml_info = vm_xml.VMXML.new_from_dumpxml(vm_name)
            try:
                iothreads = xml_info.iothreadids.iothread
            except LibvirtXMLNotFoundError:
                logging.debug("No iothreadids in xml")
            else:
                if iothreaddel in iothreads:
                    test.fail("The iothread id {} is not removed from xml."
                              .format(iothreaddel))
        else:
            if err_msg:
                libvirt.check_result(result, err_msg)

    def exec_iothreadadd():
        """
        Run "virsh iothreadadd" and check xml

        :raise: test.fail if virsh command failed
        """

        virsh.iothreadadd(vm_name, iothreadadd, debug=True)
        update_expected_iothreadinfo(exp_iothread_info,
                                     iothreadadd, "add")
        # Check xml
        xml_info = vm_xml.VMXML.new_from_dumpxml(vm_name)
        if iothreadadd not in xml_info.iothreadids.iothread:
            test.fail("The iothread id {} is not added into xml"
                      .format(iothreadadd))

    def exec_iothreadpin():
        """
        Run "virsh iothreadpin" and check xml

        :raise: test.fail if virsh command failed
        """

        thread_id, cpuset = iothreadpin.split()
        virsh.iothreadpin(vm_name, thread_id, cpuset, debug=True)
        update_expected_iothreadinfo(exp_iothread_info,
                                     thread_id, "update", cpuset)
        # Check xml
        xml_info = vm_xml.VMXML.new_from_dumpxml(vm_name)
        item = {'cpuset': cpuset, 'iothread': thread_id}
        if item not in xml_info.cputune.iothreadpins:
            test.fail("Unable to get {} from xml".format(item))

    def exec_iothreadset():
        """
        Run "virsh iothreadset" and check if iothread pool values are updated
        or not

        :raise: test.fail if the result of virsh command is not as expected
        """
        # The command "virsh iothreadset" needs vm in running stats
        if not vm.is_alive():
            vm.start()
            vm.wait_for_login().close()

        # Check domstats before run virsh iothreadset
        global ORG_IOTHREAD_POOL
        ORG_IOTHREAD_POOL = get_iothread_pool(vm_name, iothreadset_id)
        result = virsh.iothreadset(vm_name, iothreadset_id, iothreadset_val,
                                   debug=True, ignore_status=True)

        libvirt.check_exit_status(result, status_error)
        if err_msg:
            libvirt.check_result(result, expected_fails=err_msg)

        # Check domstats again
        global UPDATE_IOTHREAD_POOL
        UPDATE_IOTHREAD_POOL = get_iothread_pool(vm_name, iothreadset_id)
        check_iothread_pool(ORG_IOTHREAD_POOL, UPDATE_IOTHREAD_POOL,
                            status_error)

        # Check if the values are updated as expected
        if not status_error:
            lst = iothreadset_val.split()
            exp_pool = {re.sub('--', "iothread."+iothreadset_id+".",
                        lst[i]): lst[i + 1] for i in range(0, len(lst), 2)}
            check_iothread_pool(UPDATE_IOTHREAD_POOL, exp_pool, True)

    def exec_attach_disk(vm_name, source, target, thread_id,
                         ignore_status=False):
        """
        Attach disk with iothread and check the result

        :param vm_name: name of guest
        :param source: source of disk device
        :param target: target of disk device
        :param thread_id: thread id
        :param ignore_status: True - not raise exception when failed
                              False - raise exception when failed
        :raise: test.fail
        """

        result = virsh.attach_disk(vm_name, source, target,
                                   "--iothread "+thread_id,
                                   ignore_status=ignore_status, debug=True)
        libvirt.check_exit_status(result, ignore_status)
        if not ignore_status:
            act_id = vmxml.get_disk_attr(vm_name, target, "driver", "iothread")
            if thread_id != act_id:
                test.fail("The iothread id in xml is incorrect. Expected: {} "
                          "Actual: {}".format(thread_id, act_id))
        else:
            if err_msg:
                libvirt.check_result(result, err_msg)

    def exec_detach_disk(vm_name, target, disk_path):
        """
        Detach disk with iothread and check the result

        :param vm_name: name of guest
        :param target: target of disk device
        :param disk_path: disk image path
        :param dargs: standardized virsh function API keywords
        :raise: test.fail if disk is not detached
        """
        virsh.detach_disk(vm_name, disk_path, debug=True)

        def _check_disk(target):
            return target not in vm.get_blk_devices()

        if not utils_misc.wait_for(lambda: _check_disk(target), 10):
            test.fail("Disk {} is not detached.".format(target))

    def exec_iothreaddel_without_detach_disk(vm_name, disk_path, disk_target,
                                             disk_thread_id):
        """
        Test iothreaddel without detach disk which is attached with iothread

        :param vm_name: name of guest
        :param disk_path: disk image path
        :param disk_target: target of disk source
        :param disk_thread_id: thread id to be attached
        """
        exec_iothreadadd()
        exec_attach_disk(vm_name, disk_path, disk_target, disk_thread_id)
        exec_iothreaddel()

    def check_iothread_pool(org_pool, act_pool, is_equal=False):
        """
        Compare the iothread pool values between original and actual ones

        :param org_pool: original pool
        :param act_pool: actual pool
        :param is_equal: True to assume they are some values
                         False to check if they are different
        :raise: test.fail if result does not show as expected
        """
        if (org_pool == act_pool) != is_equal:
            err_info = ("The iothread pool values haven't been updated!"
                        "Expected: {}, Actual: {}".format(org_pool, act_pool))
            if is_equal:
                err_info = ("The iothread pool values have been updated "
                            "unexpectedly! Expected: {}, Actual: {}"
                            .format(org_pool, act_pool))
            test.fail(err_info)

    def check_schedinfo():
        """
        Check schedinfo operation
        """
        def _exec_schedinfo(items, update_error=False):
            """
            Run "virsh schedinfo" command and check result

            :param items: items to be matched
            :param update_error: True - raise exception when items are updated
                                 False - raise exception when items are
                                         not updated
            :raise: test.fail when "virsh schedinfo" command failed
            """
            result = virsh.schedinfo(vm_name, debug=True)
            libvirt.check_exit_status(result)
            if update_error:
                items.update({"iothread_period": 100000,
                              "iothread_quota": '(17592186044415|-1)'})
            for key, val in items.items():
                if not re.findall(key+'\s*:\s+'+str(val), result.stdout):
                    test.fail("Unable to find expected value {}:{} from {}"
                              .format(key, val, result))

        items = {}
        if iothread_quota:
            items["iothread_quota"] = int(iothread_quota)
        if iothread_period:
            items["iothread_period"] = int(iothread_period)

        if not items:
            test.error("schedinfo: Nothing to check!")

        _exec_schedinfo(items)
        if not vm.is_alive():
            vm.start()
            vm.wait_for_login().close()
        _exec_schedinfo(items, True)

    vm_name = params.get('main_vm')
    vm = env.get_vm(vm_name)

    iothread_num = params.get("iothread_num")
    iothread_ids = params.get("iothread_ids")
    iothreadpins = params.get("iothreadpins")
    iothreaddel = params.get("iothreaddel")
    iothreadadd = params.get("iothreadadd")
    iothreadpin = params.get("iothreadpin")
    iothreadset_id = params.get("iothreadset_id")
    iothreadset_val = params.get("iothreadset_val")
    iothreadscheds = params.get("iothreadscheds")
    iothread_quota = params.get("iothread_quota")
    iothread_period = params.get("iothread_period")

    # For attach/detach disk test
    create_disk = "yes" == params.get("create_disk", "no")
    disk_size = params.get("disk_size", "30M")
    disk_format = params.get("disk_format", "qcow2")
    disk_target = params.get("disk_target", "vdb")
    disk_img = params.get("disk_img", "test_disk.qcow2")
    disk_thread_id = params.get("disk_thread_id", "1")

    pre_vm_stats = params.get("pre_vm_stats")
    restart_libvirtd = "yes" == params.get("restart_libvirtd", "no")
    restart_vm = "yes" == params.get("restart_vm", "no")
    start_vm = "yes" == params.get("start_vm", "no")
    test_operations = params.get("test_operations")

    status_error = "yes" == params.get("status_error", "no")
    define_error = "yes" == params.get("define_error", "no")
    err_msg = params.get("err_msg")

    vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    bkxml = vmxml.copy()

    try:
        if iothreadset_id and not libvirt_version.version_compare(5, 0, 0):
            test.cancel('This version of libvirt does\'nt support '
                        'virsh command: iothreadset')

        if pre_vm_stats == "running":
            if not vm.is_alive():
                vm.start()
                vm.wait_for_login().close()
        else:
            if vm.is_alive():
                vm.destroy()

        # Update xml for test
        if define_error:
            update_iothread_xml(True)
        else:
            update_iothread_xml()
            exp_iothread_info = default_iothreadinfo()

            # For disk attach/detach test
            if create_disk:
                disk_path = os.path.join(data_dir.get_tmp_dir(), disk_img)
                image_cmd = "qemu-img create -f %s %s %s" % (disk_format,
                                                             disk_path,
                                                             disk_size)
                logging.info("Create image for disk: %s", image_cmd)
                process.run(image_cmd, shell=True)

            if test_operations:
                for action in test_operations.split(","):
                    if action == "iothreaddel":
                        exec_iothreaddel()
                    elif action == "iothreadadd":
                        exec_iothreadadd()
                    elif action == "iothreadpin":
                        exec_iothreadpin()
                    elif action == "iothreadset":
                        exec_iothreadset()
                    elif action == "checkschedinfo":
                        check_schedinfo()
                    elif action == "attachdisk":
                        exec_attach_disk(vm_name, disk_path, disk_target,
                                         disk_thread_id,
                                         ignore_status=status_error)
                    elif action == "detachdisk":
                        exec_detach_disk(vm_name, disk_target, disk_path)
                    elif action == "deletewithoutdetach":
                        exec_iothreaddel_without_detach_disk(vm_name, disk_path,
                                                             disk_target,
                                                             disk_thread_id)
                    else:
                        test.error("Unknown operation: %s" % action)

            if restart_libvirtd:
                utils_libvirtd.libvirtd_restart()
                if iothreadset_id and iothreadset_val:
                    after_restart_domstas = get_iothread_pool(vm_name,
                                                              iothreadset_id)
                    check_iothread_pool(UPDATE_IOTHREAD_POOL,
                                        after_restart_domstas, True)

            # Check if vm could start successfully
            if start_vm:
                if vm.is_alive():
                    vm.destroy()
                result = virsh.start(vm_name, debug=True)
                libvirt.check_exit_status(result, status_error)
                if err_msg:
                    libvirt.check_result(result, expected_fails=err_msg)

            if not status_error:
                iothread_info = libvirt.get_iothreadsinfo(vm_name)
                if exp_iothread_info != iothread_info:
                    test.fail("Unexpected value! Expect {} but get {}."
                              .format(exp_iothread_info, iothread_info))
                if restart_vm:
                    logging.debug("restarting vm")
                    if vm.is_alive():
                        vm.destroy()
                    vm.start()
                    vm.wait_for_login()
                    if iothreadset_id and iothreadset_val:
                        restart_vm_domstas = get_iothread_pool(vm_name,
                                                               iothreadset_id)
                        check_iothread_pool(ORG_IOTHREAD_POOL,
                                            restart_vm_domstas, True)

    finally:
        logging.debug("Recover test environment")
        if vm.is_alive():
            vm.destroy()

        bkxml.sync()
Пример #30
0
def run(test, params, env):
    """
    Test auto_dump_* parameter in qemu.conf.

    1) Change auto_dump_* in qemu.conf;
    2) Restart libvirt daemon;
    4) Check if file open state changed accordingly.
    """
    vm_name = params.get("main_vm", "avocado-vt-vm1")
    bypass_cache = params.get("auto_dump_bypass_cache", "not_set")
    panic_model = params.get("panic_model")
    addr_type = params.get("addr_type")
    addr_iobase = params.get("addr_iobase")
    vm = env.get_vm(vm_name)
    target_flags = int(params.get('target_flags', '0o40000'), 8)

    if panic_model and not libvirt_version.version_compare(1, 3, 1):
        test.cancel("panic device model attribute not supported"
                    "on current libvirt version")

    vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    backup_xml = vmxml.copy()

    config = utils_config.LibvirtQemuConfig()
    libvirtd = utils_libvirtd.Libvirtd()
    dump_path = os.path.join(data_dir.get_data_dir(), "dump")
    try:
        if not vmxml.xmltreefile.find('devices').findall('panic'):
            # Set panic device
            panic_dev = Panic()
            if panic_model:
                panic_dev.model = panic_model
            if addr_type:
                panic_dev.addr_type = addr_type
            if addr_iobase:
                panic_dev.addr_iobase = addr_iobase
            vmxml.add_device(panic_dev)
        vmxml.on_crash = "coredump-restart"
        vmxml.sync()

        vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
        if not vmxml.xmltreefile.find('devices').findall('panic'):
            test.cancel("No 'panic' device in the guest, maybe "
                        "your libvirt version doesn't support it")

        # Setup qemu.conf
        if bypass_cache == 'not_set':
            del config.auto_dump_bypass_cache
        else:
            config.auto_dump_bypass_cache = bypass_cache

        config.auto_dump_path = dump_path
        if os.path.exists(dump_path):
            os.rmdir(dump_path)
        os.mkdir(dump_path)

        # Restart libvirtd to make change valid.
        libvirtd.restart()

        # Restart VM to create a new qemu process.
        if vm.is_alive():
            vm.destroy()
        vm.start()

        def get_flags(dump_path, result_dict):
            cmd = "lsof -w %s/* |awk '/libvirt_i/{print $2}'" % dump_path
            start_time = time.time()
            while (time.time() - start_time) < 30:
                ret = process.run(cmd, shell=True, ignore_status=True)
                status, iohelper_pid = ret.exit_status, ret.stdout_text.strip()
                if status:
                    time.sleep(0.1)
                    continue
                if not len(iohelper_pid):
                    continue
                else:
                    logging.info('pid: %s', iohelper_pid)
                    result_dict['pid'] = iohelper_pid
                    break

            # Get file open flags containing bypass cache information.
            with open('/proc/%s/fdinfo/1' % iohelper_pid, 'r') as fdinfo:
                flags = 0
                for line in fdinfo.readlines():
                    if line.startswith('flags:'):
                        flags = int(line.split()[1], 8)
                        logging.debug('file open flag is: %o', flags)
            result_dict['flags'] = flags
            with open('/proc/%s/cmdline' % iohelper_pid) as cmdinfo:
                cmdline = cmdinfo.readline()
                logging.debug(cmdline.split())

        session = vm.wait_for_login()
        result_dict = multiprocessing.Manager().dict()
        child_process = multiprocessing.Process(target=get_flags,
                                                args=(dump_path, result_dict))
        child_process.start()

        # Stop kdump in the guest
        session.cmd("service kdump stop", ignore_all_errors=True)
        # Enable sysRq
        session.cmd("echo 1 > /proc/sys/kernel/sysrq")
        try:
            # Crash the guest
            session.cmd("echo c > /proc/sysrq-trigger", timeout=1)
        except ShellTimeoutError:
            pass
        session.close()

        child_process.join(10)
        if child_process.is_alive():
            child_process.terminate()
        flags = result_dict['flags']
        iohelper_pid = result_dict['pid']

        # Kill core dump process to speed up test
        try:
            process.run('kill %s' % iohelper_pid)
        except process.CmdError as detail:
            logging.debug("Dump already done:\n%s", detail)

        arch = platform.machine()

        if arch in ['x86_64', 'ppc64le', 's390x']:
            # Check if bypass cache flag set or unset accordingly.
            cond1 = (flags & target_flags) and bypass_cache != '1'
            cond2 = not (flags & target_flags) and bypass_cache == '1'
            if cond1 or cond2:
                test.fail('auto_dump_bypass_cache is %s but flags '
                          'is %o' % (bypass_cache, flags))
        else:
            test.cancel("Unknown Arch. Do the necessary changes to" " support")

    finally:
        backup_xml.sync()
        config.restore()
        libvirtd.restart()
        if os.path.exists(dump_path):
            shutil.rmtree(dump_path)