Пример #1
0
def run(test, params, env):
    """
    Test huge page allocation
    Available huge page size is arch dependent
    """
    try:
        expected_size = params.get("expected_size", "")
        number_of_huge_pages = params.get("number_of_hugepages", "")
        previous_allocation = ""
        previous_allocation = get_num_huge_pages()

        logging.debug(
            "Try to set '%s' huge pages."
            " Expected size: '%s'. Actual size: '%s'." %
            (number_of_huge_pages, expected_size, get_huge_page_size()))
        virsh.allocpages(expected_size,
                         number_of_huge_pages,
                         ignore_status=False)

        actual_allocation = get_num_huge_pages()
        if str(actual_allocation) != str(number_of_huge_pages):
            test.fail("Huge page allocation failed."
                      " Expected '%s', got '%s'." %
                      (number_of_huge_pages, actual_allocation))
    finally:
        if previous_allocation:
            logging.debug("Restore huge page allocation")
            set_num_huge_pages(previous_allocation)
Пример #2
0
def run(test, params, env):
    """
    Test interafce xml options.

    1.Prepare test environment,destroy or suspend a VM.
    2.Edit xml and start the domain.
    3.Perform test operation.
    4.Recover test environment.
    5.Confirm the test result.
    """
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    host_arch = platform.machine()
    virsh_dargs = {'debug': True, 'ignore_status': False}

    if not utils_package.package_install(["lsof"]):
        test.cancel("Failed to install dependency package lsof" " on host")

    def create_iface_xml(iface_mac):
        """
        Create interface xml file
        """
        iface = Interface(type_name=iface_type)
        source = ast.literal_eval(iface_source)
        if source:
            iface.source = source
        iface.model = iface_model if iface_model else "virtio"
        iface.mac_address = iface_mac
        driver_dict = {}
        driver_host = {}
        driver_guest = {}
        if iface_driver:
            driver_dict = ast.literal_eval(iface_driver)
        if iface_driver_host:
            driver_host = ast.literal_eval(iface_driver_host)
        if iface_driver_guest:
            driver_guest = ast.literal_eval(iface_driver_guest)
        iface.driver = iface.new_driver(driver_attr=driver_dict,
                                        driver_host=driver_host,
                                        driver_guest=driver_guest)
        if test_target:
            iface.target = {"dev": target_dev}
        logging.debug("Create new interface xml: %s", iface)
        return iface

    def modify_iface_xml(update, status_error=False):
        """
        Modify interface xml options
        """
        vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
        xml_devices = vmxml.devices
        iface_index = xml_devices.index(
            xml_devices.by_device_tag("interface")[0])
        iface = xml_devices[iface_index]
        if iface_model:
            iface.model = iface_model
        else:
            del iface.model
        if iface_type:
            iface.type_name = iface_type
        del iface.source
        source = ast.literal_eval(iface_source)
        if source:
            net_ifs = utils_net.get_net_if(state="UP")
            # Check source device is valid or not,
            # if it's not in host interface list, try to set
            # source device to first active interface of host
            if (iface.type_name == "direct" and 'dev' in source
                    and source['dev'] not in net_ifs):
                logging.warn(
                    "Source device %s is not a interface"
                    " of host, reset to %s", source['dev'], net_ifs[0])
                source['dev'] = net_ifs[0]
            iface.source = source
        backend = ast.literal_eval(iface_backend)
        if backend:
            iface.backend = backend
        driver_dict = {}
        driver_host = {}
        driver_guest = {}
        if iface_driver:
            driver_dict = ast.literal_eval(iface_driver)
        if iface_driver_host:
            driver_host = ast.literal_eval(iface_driver_host)
        if iface_driver_guest:
            driver_guest = ast.literal_eval(iface_driver_guest)
        iface.driver = iface.new_driver(driver_attr=driver_dict,
                                        driver_host=driver_host,
                                        driver_guest=driver_guest)
        if test_target:
            logging.debug("iface.target is %s" % target_dev)
            iface.target = {"dev": target_dev}
        if iface.address:
            del iface.address
        if set_ip:
            iface.ips = [ast.literal_eval(x) for x in set_ips]
        logging.debug("New interface xml file: %s", iface)
        if unprivileged_user:
            # Create disk image for unprivileged user
            disk_index = xml_devices.index(
                xml_devices.by_device_tag("disk")[0])
            disk_xml = xml_devices[disk_index]
            logging.debug("source: %s", disk_xml.source)
            disk_source = disk_xml.source.attrs["file"]
            cmd = ("cp -fZ {0} {1} && chown {2}:{2} {1}"
                   "".format(disk_source, dst_disk, unprivileged_user))
            process.run(cmd, shell=True)
            disk_xml.source = disk_xml.new_disk_source(
                attrs={"file": dst_disk})
            vmxml.devices = xml_devices
            # Remove all channels to avoid of permission problem
            channels = vmxml.get_devices(device_type="channel")
            for channel in channels:
                vmxml.del_device(channel)
            logging.info("Unprivileged users can't use 'dac' security driver,"
                         " removing from domain xml if present...")
            vmxml.del_seclabel([('model', 'dac')])

            # Set vm memory to 2G if it's larger than 2G
            if vmxml.memory > 2097152:
                vmxml.memory = vmxml.current_mem = 2097152

            vmxml.xmltreefile.write()
            logging.debug("New VM xml: %s", vmxml)
            process.run("chmod a+rw %s" % vmxml.xml, shell=True)
            virsh.define(vmxml.xml, **virsh_dargs)
        # Try to modify interface xml by update-device or edit xml
        elif update:
            iface.xmltreefile.write()
            ret = virsh.update_device(vm_name, iface.xml, ignore_status=True)
            libvirt.check_exit_status(ret, status_error)
        else:
            vmxml.devices = xml_devices
            vmxml.xmltreefile.write()
            try:
                vmxml.sync()
                if define_error:
                    test.fail("Define VM succeed, but it should fail")
            except xcepts.LibvirtXMLError as e:
                if not define_error:
                    test.fail("Define VM fail: %s" % e)

    def check_offloads_option(if_name, driver_options, session=None):
        """
        Check interface offloads by ethtool output
        """
        offloads = {
            "csum": "tx-checksumming",
            "tso4": "tcp-segmentation-offload",
            "tso6": "tx-tcp6-segmentation",
            "ecn": "tx-tcp-ecn-segmentation",
            "ufo": "udp-fragmentation-offload"
        }
        if session:
            ret, output = session.cmd_status_output("ethtool -k %s | head"
                                                    " -18" % if_name)
        else:
            out = process.run("ethtool -k %s | head -18" % if_name, shell=True)
            ret, output = out.exit_status, out.stdout_text
        if ret:
            test.fail("ethtool return error code")
        logging.debug("ethtool output: %s", output)
        for offload in list(driver_options.keys()):
            if offload in offloads:
                if (output.count(offloads[offload]) and not output.count(
                        "%s: %s" %
                    (offloads[offload], driver_options[offload]))):
                    test.fail("offloads option %s: %s isn't"
                              " correct in ethtool output" %
                              (offloads[offload], driver_options[offload]))

    def run_xml_test(iface_mac):
        """
        Test for interface options in vm xml
        """
        # Get the interface object according the mac address
        vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
        iface_devices = vmxml.get_devices(device_type="interface")
        iface = None
        for iface_dev in iface_devices:
            if iface_dev.mac_address == iface_mac:
                iface = iface_dev
        if not iface:
            test.fail("Can't find interface with mac"
                      " '%s' in vm xml" % iface_mac)
        driver_dict = {}
        if iface_driver:
            driver_dict = ast.literal_eval(iface_driver)
        for driver_opt in list(driver_dict.keys()):
            if not driver_dict[driver_opt] == iface.driver.driver_attr[
                    driver_opt]:
                test.fail("Can't see driver option %s=%s in vm xml" %
                          (driver_opt, driver_dict[driver_opt]))
            else:
                logging.info("Find %s=%s in vm xml" %
                             (driver_opt, driver_dict[driver_opt]))
        if iface_target:
            if ("dev" not in iface.target
                    or not iface.target["dev"].startswith(iface_target)):
                test.fail("Can't see device target dev in vm xml")
            # Check macvtap mode by ip link command
            if iface_target == "macvtap" and "mode" in iface.source:
                cmd = "ip -d link show %s" % iface.target["dev"]
                output = process.run(cmd, shell=True).stdout_text
                logging.debug("ip link output: %s", output)
                mode = iface.source["mode"]
                if mode == "passthrough":
                    mode = "passthru"
                if not re.search(r"macvtap\s+mode %s" % mode, output):
                    test.fail("Failed to verify macvtap mode")
        # Check if the "target dev" is set successfully
        # 1. Target dev name with prefix as "vnet" will always be override;
        # 2. Target dev name with prefix as "macvtap" or "macvlan" with direct
        # type interface will be override;
        # 3. Other scenarios, the target dev should be set successfully.
        if test_target:
            if target_dev != iface.target["dev"]:
                if target_dev.startswith("vnet") or \
                        (iface_type == "direct" and
                         (target_dev.startswith("macvtap") or
                          target_dev.startswith("macvlan"))):
                    logging.debug("target dev %s is override" % target_dev)
                else:
                    test.fail("Failed to set target dev to %s", target_dev)
            else:
                logging.debug("target dev set successfully to %s",
                              iface.target["dev"])

    def run_cmdline_test(iface_mac, host_arch):
        """
        Test qemu command line
        :param iface_mac: expected MAC
        :param host_arch: host architecture, e.g. x86_64
        :raise avocado.core.exceptions.TestError: if preconditions are not met
        :raise avocado.core.exceptions.TestFail: if commandline doesn't match
        :return: None
        """
        cmd = ("ps -ef | grep %s | grep -v grep " % vm_name)
        ret = process.run(cmd, shell=True)
        logging.debug("Command line %s", ret.stdout_text)
        if test_vhost_net:
            if not ret.stdout_text.count("vhost=on") and not rm_vhost_driver:
                test.fail("Can't see vhost options in"
                          " qemu-kvm command line")

        if iface_model == "virtio":
            if host_arch == 's390x':
                model_option = "device virtio-net-ccw"
            else:
                model_option = "device virtio-net-pci"
        elif iface_model == 'rtl8139':
            model_option = "device rtl8139"
        else:
            test.error(
                "Don't know which device driver to expect on qemu cmdline"
                " for iface_model %s" % iface_model)
        iface_cmdline = re.findall(
            r"%s,(.+),mac=%s" % (model_option, iface_mac), ret.stdout_text)
        if not iface_cmdline:
            test.fail("Can't see %s with mac %s in command"
                      " line" % (model_option, iface_mac))

        cmd_opt = {}
        for opt in iface_cmdline[0].split(','):
            tmp = opt.rsplit("=")
            cmd_opt[tmp[0]] = tmp[1]
        logging.debug("Command line options %s", cmd_opt)

        driver_dict = {}
        # Test <driver> xml options.
        if iface_driver:
            iface_driver_dict = ast.literal_eval(iface_driver)
            for driver_opt in list(iface_driver_dict.keys()):
                if driver_opt == "name":
                    continue
                elif driver_opt == "txmode":
                    if iface_driver_dict["txmode"] == "iothread":
                        driver_dict["tx"] = "bh"
                    else:
                        driver_dict["tx"] = iface_driver_dict["txmode"]
                elif driver_opt == "queues":
                    driver_dict["mq"] = "on"
                    if "pci" in model_option:
                        driver_dict["vectors"] = str(
                            int(iface_driver_dict["queues"]) * 2 + 2)
                else:
                    driver_dict[driver_opt] = iface_driver_dict[driver_opt]
        # Test <driver><host/><driver> xml options.
        if iface_driver_host:
            driver_dict.update(ast.literal_eval(iface_driver_host))
        # Test <driver><guest/><driver> xml options.
        if iface_driver_guest:
            driver_dict.update(ast.literal_eval(iface_driver_guest))

        for driver_opt in list(driver_dict.keys()):
            if (driver_opt not in cmd_opt
                    or not cmd_opt[driver_opt] == driver_dict[driver_opt]):
                test.fail("Can't see option '%s=%s' in qemu-kvm "
                          " command line" %
                          (driver_opt, driver_dict[driver_opt]))
            logging.info("Find %s=%s in qemu-kvm command line" %
                         (driver_opt, driver_dict[driver_opt]))
        if test_backend:
            guest_pid = ret.stdout_text.rsplit()[1]
            cmd = "lsof %s | grep %s" % (backend["tap"], guest_pid)
            if process.system(cmd, ignore_status=True, shell=True):
                test.fail("Guest process didn't open backend file"
                          " %s" % backend["tap"])
            cmd = "lsof %s | grep %s" % (backend["vhost"], guest_pid)
            if process.system(cmd, ignore_status=True, shell=True):
                test.fail("Guest process didn't open backend file"
                          " %s" % backend["vhost"])

    def get_guest_ip(session, mac):
        """
        Wrapper function to get guest ip address
        """
        utils_net.restart_guest_network(session, mac)
        # Wait for IP address is ready
        utils_misc.wait_for(lambda: utils_net.get_guest_ip_addr(session, mac),
                            10)
        return utils_net.get_guest_ip_addr(session, mac)

    def check_user_network(session):
        """
        Check user network ip address on guest
        """
        vm_ips = []
        vm_ips.append(get_guest_ip(session, iface_mac_old))
        if attach_device:
            vm_ips.append(get_guest_ip(session, iface_mac))
        logging.debug("IP address on guest: %s", vm_ips)
        if len(vm_ips) != len(set(vm_ips)):
            logging.debug(
                "Duplicated IP address on guest. Check bug: "
                "https://bugzilla.redhat.com/show_bug.cgi?id=1147238")
        for vm_ip in vm_ips:
            if not vm_ip or vm_ip != expect_ip:
                logging.debug("vm_ip is %s, expect_ip is %s", vm_ip, expect_ip)
                test.fail("Found wrong IP address" " on guest")
        # Check gateway address
        gateway = str(utils_net.get_default_gateway(False, session))
        if expect_gw not in gateway:
            test.fail("The gateway on guest is %s, while expect is %s" %
                      (gateway, expect_gw))
        # Check dns server address
        ns_list = utils_net.get_guest_nameserver(session)
        if expect_ns not in ns_list:
            test.fail("The dns found is %s, which expect is %s" %
                      (ns_list, expect_ns))

    def check_mcast_network(session, add_session):
        """
        Check multicast ip address on guests

        :param session: vm session
        :param add_session: additional vm session
        """
        src_addr = ast.literal_eval(iface_source)['address']
        vms_sess_dict = {vm_name: session, additional_vm.name: add_session}

        # Check mcast address on host
        cmd = "netstat -g | grep %s" % src_addr
        if process.run(cmd, ignore_status=True, shell=True).exit_status:
            test.fail("Can't find multicast ip address" " on host")
        vms_ip_dict = {}
        # Get ip address on each guest
        for vms in list(vms_sess_dict.keys()):
            vm_mac = vm_xml.VMXML.get_first_mac_by_name(vms)
            vm_ip = get_guest_ip(vms_sess_dict[vms], vm_mac)
            if not vm_ip:
                test.fail("Can't get multicast ip" " address on guest")
            vms_ip_dict.update({vms: vm_ip})
        if len(set(vms_ip_dict.values())) != len(vms_sess_dict):
            test.fail("Got duplicated multicast ip address")
        logging.debug("Found ips on guest: %s", vms_ip_dict)

        # Run omping server on host
        if not utils_package.package_install(["omping"]):
            test.error("Failed to install omping" " on host")
        cmd = ("iptables -F;omping -m %s %s" %
               (src_addr,
                "192.168.122.1 %s" % ' '.join(list(vms_ip_dict.values()))))
        # Run a backgroup job waiting for connection of client
        bgjob = utils_misc.AsyncJob(cmd)

        # Run omping client on guests
        for vms in list(vms_sess_dict.keys()):
            # omping should be installed first
            if not utils_package.package_install(["omping"],
                                                 vms_sess_dict[vms]):
                test.error("Failed to install omping" " on guest")
            cmd = ("iptables -F; omping -c 5 -T 5 -m %s %s" %
                   (src_addr, "192.168.122.1 %s" % vms_ip_dict[vms]))
            ret, output = vms_sess_dict[vms].cmd_status_output(cmd)
            logging.debug("omping ret: %s, output: %s", ret, output)
            if (not output.count('multicast, xmt/rcv/%loss = 5/5/0%')
                    or not output.count('unicast, xmt/rcv/%loss = 5/5/0%')):
                test.fail("omping failed on guest")
        # Kill the backgroup job
        bgjob.kill_func()

    def get_iface_model(iface_model, host_arch):
        """
        Get iface_model. On s390x use default model 'virtio' if non-virtio given
        :param iface_model: value as by test configuration or default
        :param host_arch: host architecture, e.g. x86_64
        :return: iface_model
        """
        if 's390x' == host_arch and 'virtio' not in iface_model:
            return "virtio"
        else:
            return iface_model

    def check_vhostuser_guests(session1, session2):
        """
        Check the vhostuser interface in guests

        param session1: Session of original guest
        param session2: Session of original additional guest
        """
        logging.debug("iface details is %s" %
                      libvirt.get_interface_details(vm_name))
        vm1_mac = str(libvirt.get_interface_details(vm_name)[0]['mac'])
        vm2_mac = str(libvirt.get_interface_details(add_vm_name)[0]['mac'])

        utils_net.set_guest_ip_addr(session1, vm1_mac, guest1_ip)
        utils_net.set_guest_ip_addr(session2, vm2_mac, guest2_ip)
        ping_status, ping_output = utils_net.ping(dest=guest2_ip,
                                                  count='3',
                                                  timeout=5,
                                                  session=session1)
        logging.info("output:%s" % ping_output)
        if ping_status != 0:
            if ping_expect_fail:
                logging.info("Can not ping guest2 as expected")
            else:
                test.fail("Can not ping guest2 from guest1")
        else:
            if ping_expect_fail:
                test.fail("Ping guest2 successfully not expected")
            else:
                logging.info("Can ping guest2 from guest1")

    def get_ovs_statis(ovs):
        """
        Get ovs-vsctl interface statistics and format in dict

        param ovs: openvswitch instance
        """
        ovs_statis_dict = {}
        ovs_iface_info = ovs.ovs_vsctl(["list",
                                        "interface"]).stdout_text.strip()
        ovs_iface_list = re.findall(
            'name\s+: (\S+)\n.*?statistics\s+: {(.*?)}\n', ovs_iface_info,
            re.S)
        logging.info("ovs iface list is %s", ovs_iface_list)
        # Dict of iface name and statistics
        for iface_name in vhostuser_names.split():
            for ovs_iface in ovs_iface_list:
                if iface_name == eval(ovs_iface[0]):
                    format_statis = dict(
                        re.findall(r'(\S*?)=(\d*?),', ovs_iface[1]))
                    ovs_statis_dict[iface_name] = format_statis
                    break
        return ovs_statis_dict

    status_error = "yes" == params.get("status_error", "no")
    start_error = "yes" == params.get("start_error", "no")
    define_error = "yes" == params.get("define_error", "no")
    unprivileged_user = params.get("unprivileged_user")

    # Interface specific attributes.
    iface_type = params.get("iface_type", "network")
    iface_source = params.get("iface_source", "{}")
    iface_driver = params.get("iface_driver")
    iface_model = get_iface_model(params.get("iface_model", "virtio"),
                                  host_arch)
    iface_target = params.get("iface_target")
    iface_backend = params.get("iface_backend", "{}")
    iface_driver_host = params.get("iface_driver_host")
    iface_driver_guest = params.get("iface_driver_guest")
    ovs_br_name = params.get("ovs_br_name")
    vhostuser_names = params.get("vhostuser_names")
    attach_device = params.get("attach_iface_device")
    expect_tx_size = params.get("expect_tx_size")
    guest1_ip = params.get("vhostuser_guest1_ip", "192.168.100.1")
    guest2_ip = params.get("vhostuser_guest2_ip", "192.168.100.2")
    change_option = "yes" == params.get("change_iface_options", "no")
    update_device = "yes" == params.get("update_iface_device", "no")
    additional_guest = "yes" == params.get("additional_guest", "no")
    serial_login = "******" == params.get("serial_login", "no")
    rm_vhost_driver = "yes" == params.get("rm_vhost_driver", "no")
    test_option_cmd = "yes" == params.get("test_iface_option_cmd", "no")
    test_option_xml = "yes" == params.get("test_iface_option_xml", "no")
    test_vhost_net = "yes" == params.get("test_vhost_net", "no")
    test_option_offloads = "yes" == params.get("test_option_offloads", "no")
    test_iface_user = "******" == params.get("test_iface_user", "no")
    test_iface_mcast = "yes" == params.get("test_iface_mcast", "no")
    test_libvirtd = "yes" == params.get("test_libvirtd", "no")
    restart_libvirtd = "yes" == params.get("restart_libvirtd", "no")
    restart_vm = "yes" == params.get("restart_vm", "no")
    test_guest_ip = "yes" == params.get("test_guest_ip", "no")
    test_backend = "yes" == params.get("test_backend", "no")
    check_guest_trans = "yes" == params.get("check_guest_trans", "no")
    set_ip = "yes" == params.get("set_user_ip", "no")
    set_ips = params.get("set_ips", "").split()
    expect_ip = params.get("expect_ip")
    expect_gw = params.get("expect_gw")
    expect_ns = params.get("expect_ns")
    test_target = "yes" == params.get("test_target", "no")
    target_dev = params.get("target_dev", None)

    # test params for vhostuser test
    huge_page = ast.literal_eval(params.get("huge_page", "{}"))
    numa_cell = ast.literal_eval(params.get("numa_cell", "{}"))
    additional_iface_source = ast.literal_eval(
        params.get("additional_iface_source", "{}"))
    vcpu_num = params.get("vcpu_num")
    cpu_mode = params.get("cpu_mode")
    hugepage_num = params.get("hugepage_num")
    log_pattern = params.get("log_pattern")

    # judgement params for vhostuer test
    need_vhostuser_env = "yes" == params.get("need_vhostuser_env", "no")
    ping_expect_fail = "yes" == params.get("ping_expect_fail", "no")
    check_libvirtd_log = "yes" == params.get("check_libvirtd_log", "no")
    check_statistics = "yes" == params.get("check_statistics", "no")
    enable_multiqueue = "yes" == params.get("enable_multiqueue", "no")

    queue_size = None
    if iface_driver:
        driver_dict = ast.literal_eval(iface_driver)
        if "queues" in driver_dict:
            queue_size = int(driver_dict.get("queues"))

    if iface_driver_host or iface_driver_guest or test_backend:
        if not libvirt_version.version_compare(1, 2, 8):
            test.cancel("Offloading/backend options not "
                        "supported in this libvirt version")
    if iface_driver and "queues" in ast.literal_eval(iface_driver):
        if not libvirt_version.version_compare(1, 0, 6):
            test.cancel("Queues options not supported"
                        " in this libvirt version")

    if unprivileged_user:
        if not libvirt_version.version_compare(1, 1, 1):
            test.cancel("qemu-bridge-helper not supported" " on this host")
        virsh_dargs["unprivileged_user"] = unprivileged_user
        # Create unprivileged user if needed
        cmd = ("grep {0} /etc/passwd || "
               "useradd {0}".format(unprivileged_user))
        process.run(cmd, shell=True)
        # Need another disk image for unprivileged user to access
        dst_disk = "/tmp/%s.img" % unprivileged_user

    # Destroy VM first
    if vm.is_alive():
        vm.destroy(gracefully=False)

    # Back up xml file.
    vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    iface_mac_old = vm_xml.VMXML.get_first_mac_by_name(vm_name)
    # iface_mac will update if attach a new interface
    iface_mac = iface_mac_old
    # Additional vm for test
    additional_vm = None
    libvirtd = utils_libvirtd.Libvirtd()

    libvirtd_log_path = None
    libvirtd_conf = None
    if check_libvirtd_log:
        libvirtd_log_path = os.path.join(test.tmpdir, "libvirtd.log")
        libvirtd_conf = utils_config.LibvirtdConfig()
        libvirtd_conf["log_outputs"] = '"1:file:%s"' % libvirtd_log_path
        libvirtd.restart()

    # Prepare vhostuser
    ovs = None
    if need_vhostuser_env:
        # Reserve selinux status
        selinux_mode = utils_selinux.get_status()
        # Reserve orig page size
        orig_size = utils_memory.get_num_huge_pages()
        ovs_dir = data_dir.get_tmp_dir()
        ovs = utils_net.setup_ovs_vhostuser(hugepage_num, ovs_dir, ovs_br_name,
                                            vhostuser_names, queue_size)

    try:
        # Build the xml and run test.
        try:
            # Prepare interface backend files
            if test_backend:
                if not os.path.exists("/dev/vhost-net"):
                    process.run("modprobe vhost-net", shell=True)
                backend = ast.literal_eval(iface_backend)
                backend_tap = "/dev/net/tun"
                backend_vhost = "/dev/vhost-net"
                if not backend:
                    backend["tap"] = backend_tap
                    backend["vhost"] = backend_vhost
                if not start_error:
                    # Create backend files for normal test
                    if not os.path.exists(backend["tap"]):
                        os.rename(backend_tap, backend["tap"])
                    if not os.path.exists(backend["vhost"]):
                        os.rename(backend_vhost, backend["vhost"])
            # Edit the interface xml.
            if change_option:
                modify_iface_xml(update=False)
                if define_error:
                    return

            if test_target:
                logging.debug("Setting target device name to %s", target_dev)
                modify_iface_xml(update=False)

            if rm_vhost_driver:
                # remove vhost driver on host and
                # the character file /dev/vhost-net
                cmd = ("modprobe -r {0}; "
                       "rm -f /dev/vhost-net".format("vhost_net"))
                if process.system(cmd, ignore_status=True, shell=True):
                    test.error("Failed to remove vhost_net driver")
            else:
                # Load vhost_net driver by default
                cmd = "modprobe vhost_net"
                process.system(cmd, shell=True)

            # Attach a interface when vm is shutoff
            if attach_device == 'config':
                iface_mac = utils_net.generate_mac_address_simple()
                iface_xml_obj = create_iface_xml(iface_mac)
                iface_xml_obj.xmltreefile.write()
                ret = virsh.attach_device(vm_name,
                                          iface_xml_obj.xml,
                                          flagstr="--config",
                                          ignore_status=True)
                libvirt.check_exit_status(ret)

            # Add hugepage and update cpu for vhostuser testing
            if huge_page:
                vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
                membacking = vm_xml.VMMemBackingXML()
                hugepages = vm_xml.VMHugepagesXML()
                pagexml = hugepages.PageXML()
                pagexml.update(huge_page)
                hugepages.pages = [pagexml]
                membacking.hugepages = hugepages
                vmxml.mb = membacking

                vmxml.vcpu = int(vcpu_num)
                cpu_xml = vm_xml.VMCPUXML()
                cpu_xml.xml = "<cpu><numa/></cpu>"
                cpu_xml.numa_cell = cpu_xml.dicts_to_cells([numa_cell])
                cpu_xml.mode = cpu_mode
                if cpu_mode == "custom":
                    vm_capability = capability_xml.CapabilityXML()
                    cpu_xml.model = vm_capability.model
                vmxml.cpu = cpu_xml

                vmxml.sync()
                logging.debug("xmltreefile:%s", vmxml.xmltreefile)

            # Clone additional vm
            if additional_guest:
                add_vm_name = "%s_%s" % (vm_name, '1')
                # Clone additional guest
                timeout = params.get("clone_timeout", 360)
                utils_libguestfs.virt_clone_cmd(vm_name,
                                                add_vm_name,
                                                True,
                                                timeout=timeout)
                additional_vm = vm.clone(add_vm_name)
                # Update iface source if needed
                if additional_iface_source:
                    add_vmxml = vm_xml.VMXML.new_from_dumpxml(add_vm_name)
                    add_xml_devices = add_vmxml.devices
                    add_iface_index = add_xml_devices.index(
                        add_xml_devices.by_device_tag("interface")[0])
                    add_iface = add_xml_devices[add_iface_index]
                    add_iface.source = additional_iface_source
                    add_vmxml.devices = add_xml_devices
                    add_vmxml.xmltreefile.write()
                    add_vmxml.sync()

                    logging.debug("add vm xmltreefile:%s",
                                  add_vmxml.xmltreefile)
                additional_vm.start()
                # additional_vm.wait_for_login()
                username = params.get("username")
                password = params.get("password")
                add_session = additional_vm.wait_for_serial_login(
                    username=username, password=password)

            # Start the VM.
            if unprivileged_user:
                virsh.start(vm_name, **virsh_dargs)
                cmd = ("su - %s -c 'virsh console %s'" %
                       (unprivileged_user, vm_name))
                session = aexpect.ShellSession(cmd)
                session.sendline()
                remote.handle_prompts(session, params.get("username"),
                                      params.get("password"), r"[\#\$]\s*$",
                                      60)
                # Get ip address on guest
                if not get_guest_ip(session, iface_mac):
                    test.error("Can't get ip address on guest")
            else:
                # Will raise VMStartError exception if start fails
                vm.start()
                if serial_login:
                    session = vm.wait_for_serial_login()
                else:
                    session = vm.wait_for_login()
            if start_error:
                test.fail("VM started unexpectedly")

            # Attach a interface when vm is running
            if attach_device == 'live':
                iface_mac = utils_net.generate_mac_address_simple()
                iface_xml_obj = create_iface_xml(iface_mac)
                iface_xml_obj.xmltreefile.write()
                ret = virsh.attach_device(vm_name,
                                          iface_xml_obj.xml,
                                          flagstr="--live",
                                          ignore_status=True,
                                          debug=True)
                libvirt.check_exit_status(ret, status_error)
                # Need sleep here for attachment take effect
                time.sleep(5)

            # Update a interface options
            if update_device:
                modify_iface_xml(update=True, status_error=status_error)

            # Run tests for qemu-kvm command line options
            if test_option_cmd:
                run_cmdline_test(iface_mac, host_arch)
            # Run tests for vm xml
            if test_option_xml:
                run_xml_test(iface_mac)
            # Run tests for offloads options
            if test_option_offloads:
                if iface_driver_host:
                    ifname_guest = utils_net.get_linux_ifname(
                        session, iface_mac)
                    check_offloads_option(ifname_guest,
                                          ast.literal_eval(iface_driver_host),
                                          session)
                if iface_driver_guest:
                    ifname_host = libvirt.get_ifname_host(vm_name, iface_mac)
                    check_offloads_option(ifname_host,
                                          ast.literal_eval(iface_driver_guest))

            if test_iface_user:
                # Test user type network
                check_user_network(session)
            if test_iface_mcast:
                # Test mcast type network
                check_mcast_network(session, add_session)
            # Check guest ip address
            if test_guest_ip:
                if not get_guest_ip(session, iface_mac):
                    test.fail("Guest can't get a" " valid ip address")
            # Check guest RX/TX ring
            if check_guest_trans:
                ifname_guest = utils_net.get_linux_ifname(session, iface_mac)
                ret, outp = session.cmd_status_output("ethtool -g %s" %
                                                      ifname_guest)
                if ret:
                    test.fail("ethtool return error code")
                logging.info("ethtool output is %s", outp)
                driver_dict = ast.literal_eval(iface_driver)
                if expect_tx_size:
                    driver_dict['tx_queue_size'] = expect_tx_size
                for outp_p in outp.split("Current hardware"):
                    if 'rx_queue_size' in driver_dict:
                        if re.search(
                                r"RX:\s*%s" % driver_dict['rx_queue_size'],
                                outp_p):
                            logging.info("Find RX setting RX:%s by ethtool",
                                         driver_dict['rx_queue_size'])
                        else:
                            test.fail("Cannot find matching rx setting")
                    if 'tx_queue_size' in driver_dict:
                        if re.search(
                                r"TX:\s*%s" % driver_dict['tx_queue_size'],
                                outp_p):
                            logging.info("Find TX settint TX:%s by ethtool",
                                         driver_dict['tx_queue_size'])
                        else:
                            test.fail("Cannot find matching tx setting")
            if test_target:
                logging.debug("Check if the target dev is set")
                run_xml_test(iface_mac)

            # Check vhostuser guest
            if additional_iface_source:
                check_vhostuser_guests(session, add_session)

            # Check libvirtd log
            if check_libvirtd_log:
                find = 0
                with open(libvirtd_log_path) as f:
                    lines = "".join(f.readlines())
                    if log_pattern in lines:
                        logging.info("Finding msg<%s> in libvirtd log",
                                     log_pattern)
                    else:
                        test.fail("Can not find msg:<%s> in libvirtd.log" %
                                  log_pattern)

            # Check statistics
            if check_statistics:
                session.sendline("ping %s" % guest2_ip)
                add_session.sendline("ping %s" % guest1_ip)
                time.sleep(5)
                vhost_name = vhostuser_names.split()[0]
                ovs_statis_dict = get_ovs_statis(ovs)[vhost_name]
                domif_info = {}
                domif_info = libvirt.get_interface_details(vm_name)
                virsh.domiflist(vm_name, debug=True)
                domif_stat_result = virsh.domifstat(vm_name, vhost_name)
                if domif_stat_result.exit_status != 0:
                    test.fail("domifstat cmd fail with msg:%s" %
                              domif_stat_result.stderr)
                else:
                    domif_stat = domif_stat_result.stdout.strip()
                logging.debug("vhost_name is %s, domif_stat is %s", vhost_name,
                              domif_stat)
                domif_stat_dict = dict(
                    re.findall("%s (\S*) (\d*)" % vhost_name, domif_stat))
                logging.debug("ovs_statis is %s, domif_stat is %s",
                              ovs_statis_dict, domif_stat_dict)
                ovs_cmp_dict = {
                    'tx_bytes': ovs_statis_dict['rx_bytes'],
                    'tx_drop': ovs_statis_dict['rx_dropped'],
                    'tx_errs': ovs_statis_dict['rx_errors'],
                    'tx_packets': ovs_statis_dict['rx_packets'],
                    'rx_bytes': ovs_statis_dict['tx_bytes'],
                    'rx_drop': ovs_statis_dict['tx_dropped']
                }
                logging.debug("ovs_cmp_dict is %s", ovs_cmp_dict)
                for dict_key in ovs_cmp_dict.keys():
                    if domif_stat_dict[dict_key] != ovs_cmp_dict[dict_key]:
                        test.fail(
                            "Find ovs %s result (%s) different with domifstate result (%s)"
                            % (dict_key, ovs_cmp_dict[dict_key],
                               domif_stat_dict[dict_key]))
                    else:
                        logging.info("ovs %s value %s is same with domifstate",
                                     dict_key, domif_stat_dict[dict_key])

            # Check multi_queue
            if enable_multiqueue:
                ifname_guest = utils_net.get_linux_ifname(session, iface_mac)
                for comb_size in (queue_size, queue_size - 1):
                    logging.info("Setting multiqueue size to %s" % comb_size)
                    session.cmd_status("ethtool -L %s combined %s" %
                                       (ifname_guest, comb_size))
                    ret, outp = session.cmd_status_output("ethtool -l %s" %
                                                          ifname_guest)
                    logging.debug("ethtool cmd output:%s" % outp)
                    if not ret:
                        pre_comb = re.search(
                            "Pre-set maximums:[\s\S]*?Combined:.*?(\d+)",
                            outp).group(1)
                        cur_comb = re.search(
                            "Current hardware settings:[\s\S]*?Combined:.*?(\d+)",
                            outp).group(1)
                        if int(pre_comb) != queue_size or int(cur_comb) != int(
                                comb_size):
                            test.fail(
                                "Fail to check the combined size: setting: %s,"
                                "Pre-set: %s, Current-set: %s, queue_size: %s"
                                % (comb_size, pre_comb, cur_comb, queue_size))
                        else:
                            logging.info(
                                "Getting correct Pre-set and Current set value"
                            )
                    else:
                        test.error("ethtool list fail: %s" % outp)

            session.close()
            if additional_guest:
                add_session.close()

            # Restart libvirtd and guest, then test again
            if restart_libvirtd:
                libvirtd.restart()

            if restart_vm:
                vm.destroy(gracefully=True)
                vm.start()
                if test_option_xml:
                    run_xml_test(iface_mac)

            # Detach hot/cold-plugged interface at last
            if attach_device and not status_error:
                ret = virsh.detach_device(vm_name,
                                          iface_xml_obj.xml,
                                          flagstr="",
                                          ignore_status=True,
                                          debug=True)
                libvirt.check_exit_status(ret)

        except virt_vm.VMStartError as e:
            logging.info(str(e))
            if not start_error:
                test.fail('VM failed to start\n%s' % e)

    finally:
        # Recover VM.
        logging.info("Restoring vm...")
        # Restore interface backend files
        if test_backend:
            if not os.path.exists(backend_tap):
                os.rename(backend["tap"], backend_tap)
            if not os.path.exists(backend_vhost):
                os.rename(backend["vhost"], backend_vhost)
        if rm_vhost_driver:
            # Restore vhost_net driver
            process.system("modprobe vhost_net", shell=True)
        if unprivileged_user:
            virsh.remove_domain(vm_name, **virsh_dargs)
            process.run('rm -f %s' % dst_disk, shell=True)
        if additional_vm:
            virsh.remove_domain(additional_vm.name, "--remove-all-storage")
            # Kill all omping server process on host
            process.system("pidof omping && killall omping",
                           ignore_status=True,
                           shell=True)
        if vm.is_alive():
            vm.destroy(gracefully=True)
        vmxml_backup.sync()

        if need_vhostuser_env:
            utils_net.clean_ovs_env(selinux_mode=selinux_mode,
                                    page_size=orig_size,
                                    clean_ovs=True)

        if libvirtd_conf:
            libvirtd_conf.restore()
            libvirtd.restart()

        if libvirtd_log_path and os.path.exists(libvirtd_log_path):
            os.unlink(libvirtd_log_path)
Пример #3
0
def run(test, params, env):
    """
    Test virtiofs filesystem device:

    1.Start guest with 1/2 virtiofs filesystem devices.
    2.Start 2 guest with the same virtiofs filesystem device.
    3.Coldplug/Coldunplug virtiofs filesystem device
    4.Share data between guests and host.
    5.Lifecycle for guest with virtiofs filesystem device.
    """
    def generate_expected_process_option(expected_results):
        """
        Generate expected virtiofsd process option
        """
        if cache_mode != "auto":
            expected_results = "cache=%s" % cache_mode
        if xattr == "on":
            expected_results += ",xattr"
        elif xattr == "off":
            expected_results += ",no_xattr"
        if flock == "on":
            expected_results += ",flock"
        else:
            expected_results += ",no_flock"
        if lock_posix == "on":
            expected_results += ",posix_lock"
        else:
            expected_results += ",no_posix_lock"
        logging.debug(expected_results)
        return expected_results

    def shared_data(vm_names, fs_devs):
        """
        Shared data between guests and host:
        1.Mount dir in guest;
        2.Write a file in guest;
        3.Check the md5sum value are the same in guests and host;
        """
        md5s = []
        for vm in vms:
            session = vm.wait_for_login()
            for fs_dev in fs_devs:
                logging.debug(fs_dev)
                mount_dir = '/var/tmp/' + fs_dev.target['dir']
                session.cmd('rm -rf %s' % mount_dir, ignore_all_errors=False)
                session.cmd('mkdir -p %s' % mount_dir)
                logging.debug("mount virtiofs dir in guest")
                cmd = "mount -t virtiofs %s %s" % (fs_dev.target['dir'],
                                                   mount_dir)
                status, output = session.cmd_status_output(cmd, timeout=300)
                if status != 0:
                    session.close()
                    test.fail("mount virtiofs dir failed: %s" % output)
                if vm == vms[0]:
                    filename_guest = mount_dir + '/' + vm.name
                    cmd = "dd if=/dev/urandom of=%s bs=1M count=512 oflag=direct" % filename_guest
                    status, output = session.cmd_status_output(cmd,
                                                               timeout=300)
                    if status != 0:
                        session.close()
                        test.fail("Write data failed: %s" % output)
                md5_value = session.cmd_status_output(
                    "md5sum %s" % filename_guest)[1].strip().split()[0]
                md5s.append(md5_value)
                logging.debug(md5_value)
                md5_value = process.run(
                    "md5sum %s" %
                    filename_guest).stdout_text.strip().split()[0]
                logging.debug(md5_value)
                md5s.append(md5_value)
            session.close()
        if len(set(md5s)) != len(fs_devs):
            test.fail("The md5sum value are not the same in guests and host")

    def launch_externally_virtiofs(source_dir, source_socket):
        """
        Launch externally virtiofs

        :param source_dir:  the dir shared on host
        :param source_socket: the socket file listened on
        """
        process.run('chcon -t virtd_exec_t %s' % path,
                    ignore_status=False,
                    shell=True)
        cmd = "systemd-run %s --socket-path=%s -o source=%s" % (
            path, source_socket, source_dir)
        try:
            process.run(cmd, ignore_status=False, shell=True)
            # Make sure the socket is created
            utils_misc.wait_for(lambda: os.path.isdir(source_socket),
                                timeout=3)
            process.run("chown qemu:qemu %s" % source_socket,
                        ignore_status=False)
            process.run('chcon -t svirt_image_t %s' % source_socket,
                        ignore_status=False,
                        shell=True)
        except Exception as err:
            cmd = "pkill virtiofsd"
            process.run(cmd, shell=True)
            test.fail("{}".format(err))

    def prepare_stress_script(script_path, script_content):
        """
        Refer to xfstest generic/531. Create stress test script to create a lot of unlinked files.

        :param source_path: The path of script
        :param content: The content of stress script
        """
        logging.debug("stress script path: %s content: %s" %
                      (script_path, script_content))
        script_lines = script_content.split(';')
        try:
            with open(script_path, 'w') as fd:
                fd.write('\n'.join(script_lines))
            os.chmod(script_path, 0o777)
        except Exception as e:
            test.error("Prepare the guest stress script failed %s" % e)

    def run_stress_script(session, script_path):
        """
        Run stress script in the guest

        :param session: guest session
        :param script_path: The path of script in the guest
        """
        # Set ULIMIT_NOFILE to increase the number of unlinked files
        session.cmd("ulimit -n 500000 && /usr/bin/python3 %s" % script_path,
                    timeout=120)

    def umount_fs(vm):
        """
        Unmount the filesystem in guest

        :param vm: filesystem in this vm that should be unmounted
        """
        if vm.is_alive():
            session = vm.wait_for_login()
            for fs_dev in fs_devs:
                mount_dir = '/var/tmp/' + fs_dev.target['dir']
                session.cmd('umount -f %s' % mount_dir, ignore_all_errors=True)
                session.cmd('rm -rf %s' % mount_dir, ignore_all_errors=True)
            session.close()

    def check_detached_xml(vm):
        """
        Check whether there is xml about the filesystem device
        in the vm xml

        :param vm: the vm to be checked
        """
        vmxml = vm_xml.VMXML.new_from_dumpxml(vm.name)
        filesystems = vmxml.devices.by_device_tag('filesystem')
        if filesystems:
            test.fail("There should be no filesystem devices in guest "
                      "xml after hotunplug")

    def check_filesystem_in_guest(vm, fs_dev):
        """
        Check whether there is virtiofs in vm

        :param vm: the vm to be checked
        :param fs_dev: the virtiofs device to be checked
        """
        session = vm.wait_for_login()
        mount_dir = '/var/tmp/' + fs_dev.target['dir']
        cmd = "mkdir %s; mount -t virtiofs %s %s" % (
            mount_dir, fs_dev.target['dir'], mount_dir)
        status, output = session.cmd_status_output(cmd, timeout=300)
        session.cmd('rm -rf %s' % mount_dir, ignore_all_errors=True)
        if not status:
            test.fail(
                "Mount virtiofs should failed after hotunplug device. %s" %
                output)
        session.close()

    start_vm = params.get("start_vm", "no")
    vm_names = params.get("vms", "avocado-vt-vm1").split()
    cache_mode = params.get("cache_mode", "none")
    xattr = params.get("xattr", "on")
    lock_posix = params.get("lock_posix", "on")
    flock = params.get("flock", "on")
    xattr = params.get("xattr", "on")
    path = params.get("virtiofsd_path", "/usr/libexec/virtiofsd")
    queue_size = int(params.get("queue_size", "512"))
    driver_type = params.get("driver_type", "virtiofs")
    guest_num = int(params.get("guest_num", "1"))
    fs_num = int(params.get("fs_num", "1"))
    vcpus_per_cell = int(params.get("vcpus_per_cell", 2))
    dir_prefix = params.get("dir_prefix", "mount_tag")
    error_msg_start = params.get("error_msg_start", "")
    error_msg_save = params.get("error_msg_save", "")
    status_error = params.get("status_error", "no") == "yes"
    socket_file_checking = params.get("socket_file_checking", "no") == "yes"
    suspend_resume = params.get("suspend_resume", "no") == "yes"
    managedsave = params.get("managedsave", "no") == "yes"
    coldplug = params.get("coldplug", "no") == "yes"
    hotplug_unplug = params.get("hotplug_unplug", "no") == "yes"
    detach_device_alias = params.get("detach_device_alias", "no") == "yes"
    extra_hugepages = params.get_numeric("extra_hugepages")
    edit_start = params.get("edit_start", "no") == "yes"
    with_hugepages = params.get("with_hugepages", "yes") == "yes"
    with_numa = params.get("with_numa", "yes") == "yes"
    with_memfd = params.get("with_memfd", "no") == "yes"
    source_socket = params.get("source_socket", "/var/tmp/vm001.socket")
    launched_mode = params.get("launched_mode", "auto")
    destroy_start = params.get("destroy_start", "no") == "yes"
    bug_url = params.get("bug_url", "")
    script_content = params.get("stress_script", "")
    stdio_handler_file = "file" == params.get("stdio_handler")

    fs_devs = []
    vms = []
    vmxml_backups = []
    expected_fails_msg = []
    expected_results = ""
    host_hp_size = utils_memory.get_huge_page_size()
    backup_huge_pages_num = utils_memory.get_num_huge_pages()
    huge_pages_num = 0

    if len(vm_names) != guest_num:
        test.cancel("This test needs exactly %d vms." % guest_num)

    if not libvirt_version.version_compare(7, 0, 0) and not with_numa:
        test.cancel("Not supported without NUMA before 7.0.0")

    if not libvirt_version.version_compare(7, 6, 0) and destroy_start:
        test.cancel("Bug %s is not fixed on current build" % bug_url)

    try:
        # Define filesystem device xml
        for index in range(fs_num):
            driver = {'type': driver_type, 'queue': queue_size}
            source_dir = os.path.join('/var/tmp/',
                                      str(dir_prefix) + str(index))
            logging.debug(source_dir)
            not os.path.isdir(source_dir) and os.mkdir(source_dir)
            target_dir = dir_prefix + str(index)
            source = {'socket': source_socket}
            target = {'dir': target_dir}
            if launched_mode == "auto":
                binary_keys = [
                    'path', 'cache_mode', 'xattr', 'lock_posix', 'flock'
                ]
                binary_values = [path, cache_mode, xattr, lock_posix, flock]
                binary_dict = dict(zip(binary_keys, binary_values))
                source = {'dir': source_dir}
                accessmode = "passthrough"
                fsdev_keys = [
                    'accessmode', 'driver', 'source', 'target', 'binary'
                ]
                fsdev_values = [
                    accessmode, driver, source, target, binary_dict
                ]
            else:
                fsdev_keys = ['driver', 'source', 'target']
                fsdev_values = [driver, source, target]
            fsdev_dict = dict(zip(fsdev_keys, fsdev_values))
            logging.debug(fsdev_dict)
            fs_dev = libvirt_device_utils.create_fs_xml(
                fsdev_dict, launched_mode)
            logging.debug(fs_dev)
            fs_devs.append(fs_dev)

        #Start guest with virtiofs filesystem device
        for index in range(guest_num):
            logging.debug("prepare vm %s", vm_names[index])
            vm = env.get_vm(vm_names[index])
            vms.append(vm)
            vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_names[index])
            vmxml_backup = vmxml.copy()
            vmxml_backups.append(vmxml_backup)
            if vmxml.max_mem < 1024000:
                vmxml.max_mem = 1024000
            if with_hugepages:
                huge_pages_num += vmxml.max_mem // host_hp_size + extra_hugepages
                utils_memory.set_num_huge_pages(huge_pages_num)
            vmxml.remove_all_device_by_type('filesystem')
            vmxml.sync()
            numa_no = None
            if with_numa:
                numa_no = vmxml.vcpu // vcpus_per_cell if vmxml.vcpu != 1 else 1
            vm_xml.VMXML.set_vm_vcpus(vmxml.vm_name,
                                      vmxml.vcpu,
                                      numa_number=numa_no)
            vm_xml.VMXML.set_memoryBacking_tag(vmxml.vm_name,
                                               access_mode="shared",
                                               hpgs=with_hugepages,
                                               memfd=with_memfd)
            vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_names[index])
            logging.debug(vmxml)
            if launched_mode == "externally":
                launch_externally_virtiofs(source_dir, source_socket)
            if coldplug:
                ret = virsh.attach_device(vm_names[index],
                                          fs_devs[0].xml,
                                          flagstr='--config',
                                          debug=True)
                utils_test.libvirt.check_exit_status(ret, expect_error=False)
            else:
                if not hotplug_unplug:
                    for fs in fs_devs:
                        vmxml.add_device(fs)
                        vmxml.sync()
            logging.debug(vmxml)
            libvirt_pcicontr.reset_pci_num(vm_names[index])
            result = virsh.start(vm_names[index], debug=True)
            if hotplug_unplug:
                if stdio_handler_file:
                    qemu_config = LibvirtQemuConfig()
                    qemu_config.stdio_handler = "file"
                    utils_libvirtd.Libvirtd().restart()
                for fs_dev in fs_devs:
                    ret = virsh.attach_device(vm_names[index],
                                              fs_dev.xml,
                                              ignore_status=True,
                                              debug=True)
                    libvirt.check_exit_status(ret, status_error)

                if status_error:
                    return
            if status_error and not managedsave:
                expected_error = error_msg_start
                utils_test.libvirt.check_exit_status(result, expected_error)
                return
            else:
                utils_test.libvirt.check_exit_status(result,
                                                     expect_error=False)
            expected_results = generate_expected_process_option(
                expected_results)
            if launched_mode == "auto":
                cmd = 'ps aux | grep virtiofsd | head -n 1'
                utils_test.libvirt.check_cmd_output(cmd,
                                                    content=expected_results)

        if managedsave:
            expected_error = error_msg_save
            result = virsh.managedsave(vm_names[0],
                                       ignore_status=True,
                                       debug=True)
            utils_test.libvirt.check_exit_status(result, expected_error)
        else:
            shared_data(vm_names, fs_devs)
            if suspend_resume:
                virsh.suspend(vm_names[0], debug=True, ignore_status=False)
                time.sleep(30)
                virsh.resume(vm_names[0], debug=True, ignore_statue=False)
            elif destroy_start:
                session = vm.wait_for_login(timeout=120)
                # Prepare the guest test script
                script_path = os.path.join(fs_devs[0].source["dir"], "test.py")
                script_content %= (fs_devs[0].source["dir"],
                                   fs_devs[0].source["dir"])
                prepare_stress_script(script_path, script_content)
                # Run guest stress script
                stress_script_thread = threading.Thread(
                    target=run_stress_script, args=(session, script_path))
                stress_script_thread.setDaemon(True)
                stress_script_thread.start()
                # Create a lot of unlink files
                time.sleep(60)
                virsh.destroy(vm_names[0], debug=True, ignore_status=False)
                ret = virsh.start(vm_names[0], debug=True)
                libvirt.check_exit_status(ret)
            elif edit_start:
                vmxml_virtio_backup = vm_xml.VMXML.new_from_inactive_dumpxml(
                    vm_names[0])
                if vm.is_alive():
                    virsh.destroy(vm_names[0])
                    cmd = "virt-xml %s --edit --qemu-commandline '\-foo'" % vm_names[
                        0]
                    cmd_result = process.run(cmd,
                                             ignore_status=True,
                                             shell=True)
                    logging.debug(virsh.dumpxml(vm_names[0]))
                    if cmd_result.exit_status:
                        test.error("virt-xml edit guest failed: %s" %
                                   cmd_result)
                    result = virsh.start(vm_names[0],
                                         ignore_status=True,
                                         debug=True)
                    if error_msg_start:
                        expected_fails_msg.append(error_msg_start)
                    utils_test.libvirt.check_result(
                        result, expected_fails=expected_fails_msg)
                    if not libvirt_version.version_compare(6, 10, 0):
                        # Because of bug #1897105, it was fixed in libvirt-6.10.0,
                        # before this version, need to recover the env manually.
                        cmd = "pkill virtiofsd"
                        process.run(cmd, shell=True)
                    if not vm.is_alive():
                        # Restoring vm and check if vm can start successfully
                        vmxml_virtio_backup.sync()
                        virsh.start(vm_names[0],
                                    ignore_status=False,
                                    shell=True)
            elif socket_file_checking:
                result = virsh.domid(vm_names[0])
                domid = result.stdout.strip()
                domain_dir = "var/lib/libvirt/qemu/domain-" + domid + '-' + vm_names[
                    0]
                if result.exit_status:
                    test.fail("Get domid failed.")
                    for fs_dev in fs_devs:
                        alias = fs_dev.alias['name']
                        expected_pid = domain_dir + alias + '-fs.pid'
                        expected_sock = alias + '-fs.sock'
                        status1 = process.run('ls -l %s' % expected_pid,
                                              shell=True).exit_status
                        status2 = process.run('ls -l %s' % expected_sock,
                                              shell=True).exit_status
                        if not (status1 and status2):
                            test.fail(
                                "The socket and pid file is not as expected")
            elif hotplug_unplug:
                for vm in vms:
                    umount_fs(vm)
                    for fs_dev in fs_devs:
                        if detach_device_alias:
                            alias = fs_dev.alias['name']
                            cmd = 'lsof /var/log/libvirt/qemu/%s-%s-virtiofsd.log' % (
                                vm.name, alias)
                            output = process.run(cmd).stdout_text.splitlines()
                            for item in output[1:]:
                                if stdio_handler_file:
                                    if item.split()[0] != "virtiofsd":
                                        test.fail(
                                            "When setting stdio_handler as file, the command"
                                            "to write log should be virtiofsd!"
                                        )
                                else:
                                    if item.split()[0] != "virtlogd":
                                        test.fail(
                                            "When setting stdio_handler as logd, the command"
                                            "to write log should be virtlogd!")
                            ret = virsh.detach_device_alias(
                                vm.name,
                                alias,
                                ignore_status=True,
                                debug=True,
                                wait_for_event=True)
                        else:
                            ret = virsh.detach_device(vm.name,
                                                      fs_dev.xml,
                                                      ignore_status=True,
                                                      debug=True,
                                                      wait_for_event=True)
                        libvirt.check_exit_status(ret, status_error)
                        check_filesystem_in_guest(vm, fs_dev)
                    check_detached_xml(vm)
    finally:
        for vm in vms:
            if vm.is_alive():
                umount_fs(vm)
                vm.destroy(gracefully=False)
        for vmxml_backup in vmxml_backups:
            vmxml_backup.sync()
        for index in range(fs_num):
            process.run('rm -rf %s' % '/var/tmp/' + str(dir_prefix) +
                        str(index),
                        ignore_status=False)
            process.run('rm -rf %s' % source_socket,
                        ignore_status=False,
                        shell=True)
        if launched_mode == "externally":
            process.run('restorecon %s' % path,
                        ignore_status=False,
                        shell=True)
        utils_memory.set_num_huge_pages(backup_huge_pages_num)
        if stdio_handler_file:
            qemu_config.restore()
            utils_libvirtd.Libvirtd().restart()
Пример #4
0
    def enable_hugepage(vmname,
                        no_of_HPs,
                        hp_unit='',
                        hp_node='',
                        pin=False,
                        node_list=[],
                        host_hp_size=0,
                        numa_pin=False):
        """
        creates list of dictionaries of page tag for HP

        :param vmname: name of the guest
        :param no_of_HPs: Number of hugepages
        :param hp_unit: unit of HP size
        :param hp_node: number of numa nodes to be HP pinned
        :param pin: flag to pin HP with guest numa or not
        :param node_list: Numa node list
        :param host_hp_size: size of the HP to pin with guest numa
        :param numa_pin: flag to numa pin
        :return: list of page tag dictionary for HP pin
        """
        dest_machine = params.get("migrate_dest_host")
        server_user = params.get("server_user", "root")
        server_pwd = params.get("server_pwd")
        command = "cat /proc/meminfo | grep HugePages_Free"
        server_session = remote.wait_for_login('ssh', dest_machine, '22',
                                               server_user, server_pwd,
                                               r"[\#\$]\s*$")
        cmd_output = server_session.cmd_status_output(command)
        server_session.close()
        if (cmd_output[0] == 0):
            dest_HP_free = cmd_output[1].strip('HugePages_Free:').strip()
        else:
            raise error.TestNAError("HP not supported/configured")
        hp_list = []

        # setting hugepages in destination machine here as remote ssh
        # configuration is done
        hugepage_assign(str(no_of_HPs),
                        target_ip=dest_machine,
                        user=server_user,
                        password=server_pwd)
        logging.debug("Remote host hugepage config done")
        if numa_pin:
            for each_node in node_list:
                if (each_node['mode'] == 'strict'):
                    # reset source host hugepages
                    if int(utils_memory.get_num_huge_pages() > 0):
                        logging.debug("reset source host hugepages")
                        hugepage_assign("0")
                    # reset dest host hugepages
                    if (int(dest_HP_free) > 0):
                        logging.debug("reset dest host hugepages")
                        hugepage_assign("0",
                                        target_ip=dest_machine,
                                        user=server_user,
                                        password=server_pwd)
                    # set source host hugepages for the specific node
                    logging.debug("set src host hugepages for specific node")
                    hugepage_assign(str(no_of_HPs),
                                    node=each_node['nodeset'],
                                    hp_size=str(host_hp_size))
                    # set dest host hugepages for specific node
                    logging.debug("set dest host hugepages for specific node")
                    hugepage_assign(str(no_of_HPs),
                                    target_ip=dest_machine,
                                    node=each_node['nodeset'],
                                    hp_size=str(host_hp_size),
                                    user=server_user,
                                    password=server_pwd)
        if not pin:
            vm_xml.VMXML.set_memoryBacking_tag(vmname)
            logging.debug("Hugepage without pin")
        else:
            hp_dict = {}
            hp_dict['size'] = str(host_hp_size)
            hp_dict['unit'] = str(hp_unit)
            if int(hp_node) == 1:
                hp_dict['nodeset'] = "0"
                logging.debug("Hugepage with pin to 1 node")
            else:
                hp_dict['nodeset'] = "0-1"
                logging.debug("Hugepage with pin to both nodes")
            hp_list.append(hp_dict)
            logging.debug(hp_list)
        return hp_list
Пример #5
0
    def enable_hugepage(vmname, no_of_HPs, hp_unit='', hp_node='', pin=False,
                        node_list=[], host_hp_size=0, numa_pin=False):
        """
        creates list of dictionaries of page tag for HP

        :param vmname: name of the guest
        :param no_of_HPs: Number of hugepages
        :param hp_unit: unit of HP size
        :param hp_node: number of numa nodes to be HP pinned
        :param pin: flag to pin HP with guest numa or not
        :param node_list: Numa node list
        :param host_hp_size: size of the HP to pin with guest numa
        :param numa_pin: flag to numa pin
        :return: list of page tag dictionary for HP pin
        """
        dest_machine = params.get("migrate_dest_host")
        server_user = params.get("server_user", "root")
        server_pwd = params.get("server_pwd")
        command = "cat /proc/meminfo | grep HugePages_Free"
        server_session = remote.wait_for_login('ssh', dest_machine, '22',
                                               server_user, server_pwd,
                                               r"[\#\$]\s*$")
        cmd_output = server_session.cmd_status_output(command)
        server_session.close()
        if (cmd_output[0] == 0):
            dest_HP_free = cmd_output[1].strip('HugePages_Free:').strip()
        else:
            raise error.TestNAError("HP not supported/configured")
        hp_list = []

        # setting hugepages in destination machine here as remote ssh
        # configuration is done
        hugepage_assign(str(no_of_HPs), target_ip=dest_machine,
                        user=server_user, password=server_pwd)
        logging.debug("Remote host hugepage config done")
        if numa_pin:
            for each_node in node_list:
                if (each_node['mode'] == 'strict'):
                    # reset source host hugepages
                    if int(utils_memory.get_num_huge_pages() > 0):
                        logging.debug("reset source host hugepages")
                        hugepage_assign("0")
                    # reset dest host hugepages
                    if (int(dest_HP_free) > 0):
                        logging.debug("reset dest host hugepages")
                        hugepage_assign("0", target_ip=dest_machine,
                                        user=server_user, password=server_pwd)
                    # set source host hugepages for the specific node
                    logging.debug("set src host hugepages for specific node")
                    hugepage_assign(str(no_of_HPs), node=each_node['nodeset'],
                                    hp_size=str(host_hp_size))
                    # set dest host hugepages for specific node
                    logging.debug("set dest host hugepages for specific node")
                    hugepage_assign(str(no_of_HPs), target_ip=dest_machine,
                                    node=each_node['nodeset'], hp_size=str(
                                    host_hp_size), user=server_user,
                                    password=server_pwd)
        if not pin:
            vm_xml.VMXML.set_memoryBacking_tag(vmname)
            logging.debug("Hugepage without pin")
        else:
            hp_dict = {}
            hp_dict['size'] = str(host_hp_size)
            hp_dict['unit'] = str(hp_unit)
            if int(hp_node) == 1:
                hp_dict['nodeset'] = "0"
                logging.debug("Hugepage with pin to 1 node")
            else:
                hp_dict['nodeset'] = "0-1"
                logging.debug("Hugepage with pin to both nodes")
            hp_list.append(hp_dict)
            logging.debug(hp_list)
        return hp_list
Пример #6
0
def run(test, params, env):
    """
    Test steps:

    1) Get the params from params.
    2) check the environment
    3) Strat the VM and check whether the VM been started successfully
    4) Compare the Hugepage memory size to the Guest memory setted.
    5) Check the hugepage memory usage.
    6) Clean up
    """
    test_type = params.get("test_type", 'normal')
    tlbfs_enable = 'yes' == params.get("hugetlbfs_enable", 'no')
    shp_num = int(params.get("static_hugepage_num", 1024))
    thp_enable = 'yes' == params.get("trans_hugepage_enable", 'no')
    mb_enable = 'yes' == params.get("mb_enable", 'yes')
    delay = int(params.get("delay_time", 10))

    # Skip cases early
    vm_names = []
    if test_type == "contrast":
        vm_names = params.get("vms").split()[:2]
        if len(vm_names) < 2:
            raise error.TestNAError("This test requires two VMs")
        # confirm no VM running
        allvms = virsh.dom_list('--name').stdout.strip()
        if allvms != '':
            raise error.TestNAError("one or more VMs are alive")
        err_range = float(params.get("mem_error_range", 1.25))
    else:
        vm_names.append(params.get("main_vm"))
        if test_type == "stress":
            target_path = params.get("target_path", "/tmp/test.out")
        elif test_type == "unixbench":
            unixbench_control_file = params.get("unixbench_controle_file",
                                                "unixbench5.control")

    # backup orignal setting
    shp_orig_num = utils_memory.get_num_huge_pages()
    thp_orig_status = utils_memory.get_transparent_hugepage()
    page_size = utils_memory.get_huge_page_size()

    # mount/umount hugetlbfs
    tlbfs_status = utils_misc.is_mounted("hugetlbfs", "/dev/hugepages",
                                         "hugetlbfs")
    if tlbfs_enable is True:
        if tlbfs_status is not True:
            utils_misc.mount("hugetlbfs", "/dev/hugepages", "hugetlbfs")
    else:
        if tlbfs_status is True:
            utils_misc.umount("hugetlbfs", "/dev/hugepages", "hugetlbfs")

    # set static hugepage
    utils_memory.set_num_huge_pages(shp_num)

    # enable/disable transparent hugepage
    if thp_enable:
        utils_memory.set_transparent_hugepage('always')
    else:
        utils_memory.set_transparent_hugepage('never')

    # set/del memoryBacking tag
    for vm_name in vm_names:
        if mb_enable:
            vm_xml.VMXML.set_memoryBacking_tag(vm_name)
        else:
            vm_xml.VMXML.del_memoryBacking_tag(vm_name)

    utils_libvirtd.libvirtd_restart()
    non_started_free = utils_memory.get_num_huge_pages_free()

    vms = []
    sessions = []
    try:
        for vm_name in vm_names:
            # try to start vm and login
            try:
                vm = env.get_vm(vm_name)
                vm.start()
            except VMError, e:
                if mb_enable and not tlbfs_enable:
                    # if hugetlbfs not be mounted,
                    # VM start with memoryBacking tag will fail
                    logging.debug(e)
                else:
                    error_msg = "Test failed in positive case. error: %s\n" % e
                    raise error.TestFail(error_msg)
            if vm.is_alive() is not True:
                break
            vms.append(vm)

            # try to login and run some program
            try:
                session = vm.wait_for_login()
            except (LoginError, ShellError), e:
                error_msg = "Test failed in positive case.\n error: %s\n" % e
                raise error.TestFail(error_msg)
            sessions.append(session)

            if test_type == "stress":
                # prepare file for increasing stress
                stress_path = prepare_c_file()
                remote.scp_to_remote(vm.get_address(), 22,
                                     'root', params.get('password'),
                                     stress_path, "/tmp/")
                # Try to install gcc on guest first
                utils_package.package_install(["gcc"], session, 360)
                # increasing workload
                session.cmd("gcc %s -o %s" % (stress_path, target_path))
                session.cmd("%s &" % target_path)

            if test_type == "unixbench":
                params["main_vm"] = vm_name
                params["test_control_file"] = unixbench_control_file

                control_path = os.path.join(test.virtdir, "control",
                                            unixbench_control_file)
                # unixbench test need 'patch' and 'perl' commands installed
                utils_package.package_install(["patch", "perl"], session, 360)
                command = utils_test.run_autotest(vm, session, control_path,
                                                  None, None, params,
                                                  copy_only=True)
                session.cmd("%s &" % command, ignore_all_errors=True)
                # wait for autotest running on vm
                time.sleep(delay)

                def _is_unixbench_running():
                    cmd = "ps -ef | grep perl | grep Run"
                    return not session.cmd_status(cmd)
                if not utils_misc.wait_for(_is_unixbench_running, timeout=240):
                    raise error.TestNAError("Failed to run unixbench in guest,"
                                            " please make sure some necessary"
                                            " packages are installed in guest,"
                                            " such as gcc, tar, bzip2")
                logging.debug("Unixbench test is running in VM")
Пример #7
0
 def cleanup_test_virtio_mem():
     """
     Clean up environment
     """
     if utils_memory.get_num_huge_pages() != ORG_HP:
         utils_memory.set_num_huge_pages(ORG_HP)
Пример #8
0
import re

from virttest import libvirt_version
from virttest import utils_misc
from virttest import virsh

from virttest.libvirt_xml import vm_xml
from virttest.libvirt_xml.devices.memory import Memory
from virttest.staging import utils_memory
from virttest.utils_test import libvirt
from virttest.utils_libvirt import libvirt_vmxml
from virttest.utils_version import VersionInterval

VIRSH_ARGS = {'debug': True, 'ignore_status': False}
ORG_HP = utils_memory.get_num_huge_pages()


def run(test, params, env):
    """
    Update memory device
    """
    def check_environment(vm, params):
        """
        Check the test environment

        :param vm: VM object
        :param params: Dictionary with the test parameters
        """
        libvirt_version.is_libvirt_feature_supported(params)
        utils_misc.is_qemu_function_supported(params)
Пример #9
0
def run(test, params, env):
    """
    Test steps:

    1) Get the params from params.
    2) check the environment
    3) Strat the VM and check whether the VM been started successfully
    4) Compare the Hugepage memory size to the Guest memory setted.
    5) Check the hugepage memory usage.
    6) Clean up
    """
    test_type = params.get("test_type", 'normal')
    tlbfs_enable = 'yes' == params.get("hugetlbfs_enable", 'no')
    shp_num = int(params.get("static_hugepage_num", 1024))
    thp_enable = 'yes' == params.get("trans_hugepage_enable", 'no')
    mb_enable = 'yes' == params.get("mb_enable", 'yes')
    delay = int(params.get("delay_time", 10))

    # Skip cases early
    vm_names = []
    if test_type == "contrast":
        vm_names = params.get("vms").split()[:2]
        if len(vm_names) < 2:
            test.cancel("This test requires two VMs")
        # confirm no VM running
        allvms = virsh.dom_list('--name').stdout.strip()
        if allvms != '':
            test.cancel("one or more VMs are alive")
        err_range = float(params.get("mem_error_range", 1.25))
    else:
        vm_names.append(params.get("main_vm"))
        if test_type == "stress":
            target_path = params.get("target_path", "/tmp/test.out")
        elif test_type == "unixbench":
            unixbench_control_file = params.get("unixbench_controle_file",
                                                "unixbench5.control")

    # backup orignal setting
    shp_orig_num = utils_memory.get_num_huge_pages()
    thp_orig_status = utils_memory.get_transparent_hugepage()
    page_size = utils_memory.get_huge_page_size()

    # mount/umount hugetlbfs
    tlbfs_status = utils_misc.is_mounted("hugetlbfs", "/dev/hugepages",
                                         "hugetlbfs")
    if tlbfs_enable is True:
        if tlbfs_status is not True:
            utils_misc.mount("hugetlbfs", "/dev/hugepages", "hugetlbfs")
    else:
        if tlbfs_status is True:
            utils_misc.umount("hugetlbfs", "/dev/hugepages", "hugetlbfs")

    # set static hugepage
    utils_memory.set_num_huge_pages(shp_num)

    # enable/disable transparent hugepage
    if thp_enable:
        utils_memory.set_transparent_hugepage('always')
    else:
        utils_memory.set_transparent_hugepage('never')

    # set/del memoryBacking tag
    for vm_name in vm_names:
        if mb_enable:
            vm_xml.VMXML.set_memoryBacking_tag(vm_name)
        else:
            vm_xml.VMXML.del_memoryBacking_tag(vm_name)

    utils_libvirtd.libvirtd_restart()
    non_started_free = utils_memory.get_num_huge_pages_free()

    vms = []
    sessions = []
    try:
        for vm_name in vm_names:
            # try to start vm and login
            try:
                vm = env.get_vm(vm_name)
                vm.start()
            except VMError as e:
                if mb_enable and not tlbfs_enable:
                    # if hugetlbfs not be mounted,
                    # VM start with memoryBacking tag will fail
                    logging.debug(e)
                else:
                    error_msg = "Test failed in positive case. error: %s\n" % e
                    test.fail(error_msg)
            if vm.is_alive() is not True:
                break
            vms.append(vm)

            # try to login and run some program
            try:
                session = vm.wait_for_login()
            except (LoginError, ShellError) as e:
                error_msg = "Test failed in positive case.\n error: %s\n" % e
                test.fail(error_msg)
            sessions.append(session)

            if test_type == "stress":
                # prepare file for increasing stress
                stress_path = prepare_c_file()
                remote.scp_to_remote(vm.get_address(), 22,
                                     'root', params.get('password'),
                                     stress_path, "/tmp/")
                # Try to install gcc on guest first
                utils_package.package_install(["gcc"], session, 360)
                # increasing workload
                session.cmd("gcc %s -o %s" % (stress_path, target_path))
                session.cmd("%s &" % target_path)

            if test_type == "unixbench":
                params["main_vm"] = vm_name
                params["test_control_file"] = unixbench_control_file

                control_path = os.path.join(test.virtdir, "control",
                                            unixbench_control_file)
                # unixbench test need 'patch' and 'perl' commands installed
                utils_package.package_install(["patch", "perl"], session, 360)
                command = utils_test.run_autotest(vm, session, control_path,
                                                  None, None, params,
                                                  copy_only=True)
                session.cmd("%s &" % command, ignore_all_errors=True)
                # wait for autotest running on vm
                time.sleep(delay)

                def _is_unixbench_running():
                    cmd = "ps -ef | grep perl | grep Run"
                    return not session.cmd_status(cmd)
                if not utils_misc.wait_for(_is_unixbench_running, timeout=240):
                    test.cancel("Failed to run unixbench in guest,"
                                " please make sure some necessary"
                                " packages are installed in guest,"
                                " such as gcc, tar, bzip2")
                logging.debug("Unixbench test is running in VM")

        if test_type == "contrast":
            # wait for vm finish starting completely
            time.sleep(delay)

        if not (mb_enable and not tlbfs_enable):
            logging.debug("starting analyzing the hugepage usage...")
            pid = vms[-1].get_pid()
            started_free = utils_memory.get_num_huge_pages_free()
            # Get the thp usage from /proc/pid/smaps
            started_anon = utils_memory.get_num_anon_huge_pages(pid)
            static_used = non_started_free - started_free
            hugepage_used = static_used * page_size

            if test_type == "contrast":
                # get qemu-kvm memory consumption by top
                cmd = "top -b -n 1|awk '$1 == %s {print $10}'" % pid
                rate = process.run(cmd, ignore_status=False,
                                   verbose=True, shell=True).stdout_text.strip()
                qemu_kvm_used = (utils_memory.memtotal() * float(rate)) / 100
                logging.debug("rate: %s, used-by-qemu-kvm: %f, used-by-vm: %d",
                              rate, qemu_kvm_used, hugepage_used)
                if abs(qemu_kvm_used - hugepage_used) > hugepage_used * (err_range - 1):
                    test.fail("Error for hugepage usage")
            if test_type == "stress":
                if non_started_free <= started_free:
                    logging.debug("hugepage usage:%d -> %d", non_started_free,
                                  started_free)
                    test.fail("Error for hugepage usage with stress")
            if mb_enable is not True:
                if static_used > 0:
                    test.fail("VM use static hugepage without"
                              " memoryBacking element")
                if thp_enable is not True and started_anon > 0:
                    test.fail("VM use transparent hugepage, while"
                              " it's disabled")
            else:
                if tlbfs_enable is not True:
                    if static_used > 0:
                        test.fail("VM use static hugepage without tlbfs"
                                  " mounted")
                    if thp_enable and started_anon <= 0:
                        test.fail("VM doesn't use transparent"
                                  " hugepage")
                else:
                    if shp_num > 0:
                        if static_used <= 0:
                            test.fail("VM doesn't use static"
                                      " hugepage")
                    else:
                        if static_used > 0:
                            test.fail("VM use static hugepage,"
                                      " while it's set to zero")
                    if thp_enable is not True:
                        if started_anon > 0:
                            test.fail("VM use transparent hugepage,"
                                      " while it's disabled")
                    else:
                        if shp_num == 0 and started_anon <= 0:
                            test.fail("VM doesn't use transparent"
                                      " hugepage, while static"
                                      " hugepage is disabled")
    finally:
        # end up session
        for session in sessions:
            session.close()

        for vm in vms:
            if vm.is_alive():
                vm.destroy()

        for vm_name in vm_names:
            if mb_enable:
                vm_xml.VMXML.del_memoryBacking_tag(vm_name)
            else:
                vm_xml.VMXML.set_memoryBacking_tag(vm_name)

        utils_libvirtd.libvirtd_restart()

        if tlbfs_enable is True:
            if tlbfs_status is not True:
                utils_misc.umount("hugetlbfs", "/dev/hugepages", "hugetlbfs")
        else:
            if tlbfs_status is True:
                utils_misc.mount("hugetlbfs", "/dev/hugepages", "hugetlbfs")
        utils_memory.set_num_huge_pages(shp_orig_num)
        utils_memory.set_transparent_hugepage(thp_orig_status)
Пример #10
0
def run(test, params, env):
    """
    Test virtiofs filesystem device:

    1.Start guest with 1/2 virtiofs filesystem devices.
    2.Start 2 guest with the same virtiofs filesystem device.
    3.Coldplug/Coldunplug virtiofs filesystem device
    4.Share data between guests and host.
    5.Lifecycle for guest with virtiofs filesystem device.
    """
    def generate_expected_process_option(expected_results):
        """
        Generate expected virtiofsd process option
        """
        if cache_mode != "auto":
            expected_results = "cache=%s" % cache_mode
        if xattr == "on":
            expected_results += ",xattr"
        elif xattr == "off":
            expected_results += ",no_xattr"
        if flock == "on":
            expected_results += ",flock"
        else:
            expected_results += ",no_flock"
        if lock_posix == "on":
            expected_results += ",posix_lock"
        else:
            expected_results += ",no_posix_lock"
        logging.debug(expected_results)
        return expected_results

    def shared_data(vm_names, fs_devs):
        """
        Shared data between guests and host:
        1.Mount dir in guest;
        2.Write a file in guest;
        3.Check the md5sum value are the same in guests and host;
        """
        md5s = []
        for vm in vms:
            session = vm.wait_for_login()
            for fs_dev in fs_devs:
                logging.debug(fs_dev)
                session.cmd('rm -rf %s' % fs_dev.source['dir'],
                            ignore_all_errors=False)
                session.cmd('mkdir -p %s' % fs_dev.source['dir'])
                logging.debug("mount virtiofs dir in guest")
                cmd = "mount -t virtiofs %s %s" % (fs_dev.target['dir'],
                                                   fs_dev.source['dir'])
                status, output = session.cmd_status_output(cmd, timeout=300)
                if status != 0:
                    session.close()
                    test.fail("mount virtiofs dir failed: %s" % output)
                if vm == vms[0]:
                    filename = fs_dev.source['dir'] + '/' + vm.name
                    cmd = "dd if=/dev/urandom of=%s bs=1M count=512 oflag=direct" % filename
                    status, output = session.cmd_status_output(cmd,
                                                               timeout=300)
                    if status != 0:
                        session.close()
                        test.fail("Write data failed: %s" % output)
                md5_value = session.cmd_status_output("md5sum %s" %
                                                      filename)[1].strip()
                md5s.append(md5_value)
                logging.debug(md5_value)
                md5_value = process.run("md5sum %s" %
                                        filename).stdout_text.strip()
                logging.debug(md5_value)
                md5s.append(md5_value)
            session.close()
        if len(set(md5s)) != len(fs_devs):
            test.fail("The md5sum value are not the same in guests and host")

    start_vm = params.get("start_vm", "no")
    vm_names = params.get("vms", "avocado-vt-vm1").split()
    cache_mode = params.get("cache_mode", "none")
    xattr = params.get("xattr", "on")
    lock_posix = params.get("lock_posix", "on")
    flock = params.get("flock", "on")
    xattr = params.get("xattr", "on")
    path = params.get("virtiofsd_path", "/usr/libexec/virtiofsd")
    queue_size = int(params.get("queue_size", "512"))
    driver_type = params.get("driver_type", "virtiofs")
    guest_num = int(params.get("guest_num", "1"))
    fs_num = int(params.get("fs_num", "1"))
    vcpus_per_cell = int(params.get("vcpus_per_cell", 2))
    source_dir_prefix = params.get("source_dir_prefix", "/dir")
    target_prefix = params.get("target_prefix", "mount_tag")
    error_msg_start = params.get("error_msg_start", "")
    error_msg_save = params.get("error_msg_save", "")
    status_error = params.get("status_error", "no") == "yes"
    socket_file_checking = params.get("socket_file_checking", "no") == "yes"
    suspend_resume = params.get("suspend_resume", "no") == "yes"
    managedsave = params.get("managedsave", "no") == "yes"
    coldplug = params.get("coldplug", "no") == "yes"
    extra_hugepages = params.get_numeric("extra_hugepages")
    edit_start = params.get("edit_start", "no") == "yes"
    with_hugepages = params.get("with_hugepages", "yes") == "yes"
    with_numa = params.get("with_numa", "yes") == "yes"

    fs_devs = []
    vms = []
    vmxml_backups = []
    expected_fails_msg = []
    expected_results = ""
    host_hp_size = utils_memory.get_huge_page_size()
    backup_huge_pages_num = utils_memory.get_num_huge_pages()
    huge_pages_num = 0

    if len(vm_names) != guest_num:
        test.cancel("This test needs exactly %d vms." % guest_num)
    try:
        # Define filesystem device xml
        for index in range(fs_num):
            fsdev_keys = ['accessmode', 'driver', 'source', 'target', 'binary']
            accessmode = "passthrough"
            driver = {'type': driver_type, 'queue': queue_size}
            source_dir = str(source_dir_prefix) + str(index)
            logging.debug(source_dir)
            not os.path.isdir(source_dir) and os.mkdir(source_dir)
            target_dir = target_prefix + str(index)
            source = {'dir': source_dir}
            target = {'dir': target_dir}
            fsdev_dict = [accessmode, driver, source, target]
            binary_keys = [
                'path', 'cache_mode', 'xattr', 'lock_posix', 'flock'
            ]
            binary_values = [path, cache_mode, xattr, lock_posix, flock]
            binary_dict = dict(zip(binary_keys, binary_values))
            fsdev_values = [accessmode, driver, source, target, binary_dict]
            fsdev_dict = dict(zip(fsdev_keys, fsdev_values))
            logging.debug(fsdev_dict)
            fs_dev = libvirt_device_utils.create_fs_xml(fsdev_dict)
            logging.debug(fs_dev)
            fs_devs.append(fs_dev)

        #Start guest with virtiofs filesystem device
        for index in range(guest_num):
            logging.debug("prepare vm %s", vm_names[index])
            vm = env.get_vm(vm_names[index])
            vms.append(vm)
            vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_names[index])
            vmxml_backup = vmxml.copy()
            vmxml_backups.append(vmxml_backup)
            if vmxml.max_mem < 1024000:
                vmxml.max_mem = 1024000
            if with_hugepages:
                huge_pages_num += vmxml.max_mem // host_hp_size + extra_hugepages
                utils_memory.set_num_huge_pages(huge_pages_num)
            vmxml.remove_all_device_by_type('filesystem')
            vmxml.sync()
            numa_no = None
            if with_numa:
                numa_no = vmxml.vcpu // vcpus_per_cell if vmxml.vcpu != 1 else 1
            vm_xml.VMXML.set_vm_vcpus(vmxml.vm_name,
                                      vmxml.vcpu,
                                      numa_number=numa_no)
            vm_xml.VMXML.set_memoryBacking_tag(vmxml.vm_name,
                                               access_mode="shared",
                                               hpgs=with_hugepages)
            vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_names[index])
            logging.debug(vmxml)
            if coldplug:
                ret = virsh.attach_device(vm_names[index],
                                          fs_devs[0].xml,
                                          flagstr='--config',
                                          debug=True)
                utils_test.libvirt.check_exit_status(ret, expect_error=False)
            else:
                for fs in fs_devs:
                    vmxml.add_device(fs)
                    vmxml.sync()
            logging.debug(vmxml)
            result = virsh.start(vm_names[index], debug=True)
            if status_error and not managedsave:
                expected_error = error_msg_start
                utils_test.libvirt.check_exit_status(result, expected_error)
                return
            else:
                utils_test.libvirt.check_exit_status(result,
                                                     expect_error=False)
            expected_results = generate_expected_process_option(
                expected_results)
            cmd = 'ps aux | grep virtiofsd | head -n 1'
            utils_test.libvirt.check_cmd_output(cmd, content=expected_results)

        if managedsave:
            expected_error = error_msg_save
            result = virsh.managedsave(vm_names[0],
                                       ignore_status=True,
                                       debug=True)
            utils_test.libvirt.check_exit_status(result, expected_error)
        else:
            shared_data(vm_names, fs_devs)
            if suspend_resume:
                virsh.suspend(vm_names[0], debug=True, ignore_status=False)
                time.sleep(30)
                virsh.resume(vm_names[0], debug=True, ignore_statue=False)
            elif edit_start:
                vmxml_virtio_backup = vm_xml.VMXML.new_from_inactive_dumpxml(
                    vm_names[0])
                if vm.is_alive():
                    virsh.destroy(vm_names[0])
                    cmd = "virt-xml %s --edit --qemu-commandline  '\-foo'" % vm_names[
                        0]
                    cmd_result = process.run(cmd,
                                             ignore_status=True,
                                             shell=True)
                    logging.debug(virsh.dumpxml(vm_names[0]))
                    if cmd_result.exit_status:
                        test.error("virt-xml edit guest failed: %s" %
                                   cmd_result)
                    result = virsh.start(vm_names[0],
                                         ignore_status=True,
                                         debug=True)
                    if error_msg_start:
                        expected_fails_msg.append(error_msg_start)
                    utils_test.libvirt.check_result(
                        result, expected_fails=expected_fails_msg)
                    if not libvirt_version.version_compare(6, 10, 0):
                        # Because of bug #1897105, it was fixed in libvirt-6.10.0,
                        # before this version, need to recover the env manually.
                        cmd = "pkill virtiofsd"
                        process.run(cmd, shell=True)
                    if not vm.is_alive():
                        # Restoring vm and check if vm can start successfully
                        vmxml_virtio_backup.sync()
                        virsh.start(vm_names[0],
                                    ignore_status=False,
                                    shell=True)
            elif socket_file_checking:
                result = virsh.domid(vm_names[0])
                domid = result.stdout.strip()
                domain_dir = "var/lib/libvirt/qemu/domain-" + domid + '-' + vm_names[
                    0]
                if result.exit_status:
                    test.fail("Get domid failed.")
                    for fs_dev in fs_devs:
                        alias = fs_dev.alias['name']
                        expected_pid = domain_dir + alias + '-fs.pid'
                        expected_sock = alias + '-fs.sock'
                        status1 = process.run('ls -l %s' % expected_pid,
                                              shell=True).exit_status
                        status2 = process.run('ls -l %s' % expected_sock,
                                              shell=True).exit_status
                        if not (status1 and status2):
                            test.fail(
                                "The socket and pid file is not as expected")

    finally:
        for vm in vms:
            if vm.is_alive():
                session = vm.wait_for_login()
                for fs_dev in fs_devs:
                    mount_dir = fs_dev.source['dir']
                    logging.debug(mount_dir)
                    session.cmd('umount -f %s' % mount_dir,
                                ignore_all_errors=True)
                    session.cmd('rm -rf %s' % mount_dir,
                                ignore_all_errors=True)
                session.close()
                vm.destroy(gracefully=False)
        for index in range(fs_num):
            process.run('rm -rf %s' % source_dir_prefix + str(index),
                        ignore_status=False)
        for vmxml_backup in vmxml_backups:
            vmxml_backup.sync()
        utils_memory.set_num_huge_pages(backup_huge_pages_num)
Пример #11
0
def run(test, params, env):
    """
    Test steps:

    1) Get the params from params.
    2) check the environment
    3) Strat the VM and check whether the VM been started successfully
    4) Compare the Hugepage memory size to the Guest memory setted.
    5) Check the hugepage memory usage.
    6) Clean up
    """
    test_type = params.get("test_type", 'normal')
    tlbfs_enable = 'yes' == params.get("hugetlbfs_enable", 'no')
    shp_num = int(params.get("static_hugepage_num", 1024))
    thp_enable = 'yes' == params.get("trans_hugepage_enable", 'no')
    mb_enable = 'yes' == params.get("mb_enable", 'yes')
    delay = int(params.get("delay_time", 10))

    # backup orignal setting
    shp_orig_num = utils_memory.get_num_huge_pages()
    thp_orig_status = utils_memory.get_transparent_hugepage()
    page_size = utils_memory.get_huge_page_size()

    if test_type == "contrast":
        range = float(params.get("mem_error_range", 1.25))
    elif test_type == "stress":
        target_path = params.get("target_path", "/tmp/test.out")
    elif test_type == "unixbench":
        unixbench_control_file = params.get("unixbench_controle_file",
                                            "unixbench5.control")

    vm_names = []
    if test_type == "contrast":
        vm_names = params.get("vms").split()[:2]
        if len(vm_names) < 2:
            raise error.TestNAError("Hugepage Stress Test need two VM(s).")
        # confirm no VM(s) running
        allvms = virsh.dom_list('--name').stdout.strip()
        if allvms != '':
            raise error.TestNAError("one or more VM(s) is living.")
    else:
        vm_names.append(params.get("main_vm"))

    # mount/umount hugetlbfs
    tlbfs_status = utils_misc.is_mounted("hugetlbfs", "/dev/hugepages", "hugetlbfs")
    if tlbfs_enable is True:
        if tlbfs_status is not True:
            utils_misc.mount("hugetlbfs", "/dev/hugepages", "hugetlbfs")
    else:
        if tlbfs_status is True:
            utils_misc.umount("hugetlbfs", "/dev/hugepages", "hugetlbfs")

    # set static hugepage
    utils_memory.set_num_huge_pages(shp_num)

    # enable/disable transparent hugepage
    if thp_enable:
        utils_memory.set_transparent_hugepage('always')
    else:
        utils_memory.set_transparent_hugepage('never')

    # set/del memoryBacking tag
    for vm_name in vm_names:
        if mb_enable:
            vm_xml.VMXML.set_memoryBacking_tag(vm_name)
        else:
            vm_xml.VMXML.del_memoryBacking_tag(vm_name)

    utils_libvirtd.libvirtd_restart()
    non_started_free = utils_memory.get_num_huge_pages_free()

    vms = []
    sessions = []
    for vm_name in vm_names:
        # try to start vm and login
        try:
            vm = env.get_vm(vm_name)
            vm.start()
        except VMError, e:
            if mb_enable and not tlbfs_enable:
                # if hugetlbfs not be mounted,
                # VM start with memoryBacking tag will fail
                logging.debug(e)
                pass  # jump out of for-loop
            else:
                error_msg = "Test failed in positive case. error: %s\n" % e
                raise error.TestFail(error_msg)
        if vm.is_alive() is not True:
            break
        vms.append(vm)

        # try to login and run some program
        try:
            session = vm.wait_for_login()
        except (LoginError, ShellError), e:
            error_msg = "Test failed in positive case.\n error: %s\n" % e
            raise error.TestFail(error_msg)
Пример #12
0
def run(test, params, env):
    """
    Test steps:

    1) Get the params from params.
    2) check the environment
    3) Strat the VM and check whether the VM been started successfully
    4) Compare the Hugepage memory size to the Guest memory setted.
    5) Check the hugepage memory usage.
    6) Clean up
    """
    test_type = params.get("test_type", 'normal')
    tlbfs_enable = 'yes' == params.get("hugetlbfs_enable", 'no')
    shp_num = int(params.get("static_hugepage_num", 1024))
    thp_enable = 'yes' == params.get("trans_hugepage_enable", 'no')
    mb_enable = 'yes' == params.get("mb_enable", 'yes')
    delay = int(params.get("delay_time", 10))

    # backup orignal setting
    shp_orig_num = utils_memory.get_num_huge_pages()
    thp_orig_status = utils_memory.get_transparent_hugepage()
    page_size = utils_memory.get_huge_page_size()

    if test_type == "contrast":
        range = float(params.get("mem_error_range", 1.25))
    elif test_type == "stress":
        target_path = params.get("target_path", "/tmp/test.out")
    elif test_type == "unixbench":
        unixbench_control_file = params.get("unixbench_controle_file",
                                            "unixbench5.control")

    vm_names = []
    if test_type == "contrast":
        vm_names = params.get("vms").split()[:2]
        if len(vm_names) < 2:
            raise error.TestNAError("Hugepage Stress Test need two VM(s).")
        # confirm no VM(s) running
        allvms = virsh.dom_list('--name').stdout.strip()
        if allvms != '':
            raise error.TestNAError("one or more VM(s) is living.")
    else:
        vm_names.append(params.get("main_vm"))

    # mount/umount hugetlbfs
    tlbfs_status = utils_misc.is_mounted("hugetlbfs", "/dev/hugepages",
                                         "hugetlbfs")
    if tlbfs_enable is True:
        if tlbfs_status is not True:
            utils_misc.mount("hugetlbfs", "/dev/hugepages", "hugetlbfs")
    else:
        if tlbfs_status is True:
            utils_misc.umount("hugetlbfs", "/dev/hugepages", "hugetlbfs")

    # set static hugepage
    utils_memory.set_num_huge_pages(shp_num)

    # enable/disable transparent hugepage
    if thp_enable:
        utils_memory.set_transparent_hugepage('always')
    else:
        utils_memory.set_transparent_hugepage('never')

    # set/del memoryBacking tag
    for vm_name in vm_names:
        if mb_enable:
            vm_xml.VMXML.set_memoryBacking_tag(vm_name)
        else:
            vm_xml.VMXML.del_memoryBacking_tag(vm_name)

    utils_libvirtd.libvirtd_restart()
    non_started_free = utils_memory.get_num_huge_pages_free()

    vms = []
    sessions = []
    for vm_name in vm_names:
        # try to start vm and login
        try:
            vm = env.get_vm(vm_name)
            vm.start()
        except VMError, e:
            if mb_enable and not tlbfs_enable:
                # if hugetlbfs not be mounted,
                # VM start with memoryBacking tag will fail
                logging.debug(e)
                pass  # jump out of for-loop
            else:
                error_msg = "Test failed in positive case. error: %s\n" % e
                raise error.TestFail(error_msg)
        if vm.is_alive() is not True:
            break
        vms.append(vm)

        # try to login and run some program
        try:
            session = vm.wait_for_login()
        except (LoginError, ShellError), e:
            error_msg = "Test failed in positive case.\n error: %s\n" % e
            raise error.TestFail(error_msg)