def run(test, params, env):
    """
    Test command virsh cpu-models
    """
    cpu_arch = params.get("cpu_arch", "")
    option = params.get("option", "")
    target_uri = params.get("target_uri", "default")
    status_error = "yes" == params.get("status_error", "no")
    logging.debug(target_uri.count('EXAMPLE.COM'))
    if target_uri.count('EXAMPLE.COM'):
        raise error.TestNAError("Please replace '%s' with valid uri" %
                                target_uri)
    connect_uri = libvirt_vm.normalize_connect_uri(target_uri)
    arch_list = []
    if not cpu_arch:
        try:
            capa = capability_xml.CapabilityXML()
            guest_map = capa.get_guest_capabilities()
            guest_arch = []
            for v in guest_map.values():
                guest_arch += v.keys()
            for arch in set(guest_arch):
                arch_list.append(arch)
        except Exception, e:
            raise error.TestError("Fail to get guest arch list of the"
                                  " host supported:\n%s" % e)
 def check_config_file():
     """
     Check whether the config file take effects by checking UUID.
     """
     cur_uuid = capability_xml.CapabilityXML()['uuid']
     if cur_uuid != check_uuid:
         raise error.TestFail('Expected host UUID is %s, but got %s' %
                              (check_uuid, cur_uuid))
def run(test, params, env):
    """
    Test the command virsh domcapabilities
    """
    target_uri = params.get("target_uri", "default")
    if target_uri.count("EXAMPLE.COM"):
        raise error.TestNAError("Please replace '%s' with valid uri" %
                                target_uri)
    connect_uri = libvirt_vm.normalize_connect_uri(target_uri)
    virsh_options = params.get("virsh_options", "")
    virttype = params.get("virttype_value", "")
    emulatorbin = params.get("emulatorbin_value", "")
    arch = params.get("arch_value", "")
    machine = params.get("machine_value", "")
    option_dict = {'arch': arch, 'virttype': virttype,
                   'emulatorbin': emulatorbin, 'machine': machine}
    options_list = [option_dict]
    extra_option = params.get("extra_option", "")
    # Get --virttype, --emulatorbin, --arch and --machine values from
    # virsh capabilities output, then assemble option for testing
    # This will ignore the virttype, emulatorbin, arch and machine values
    if virsh_options == "AUTO":
        options_list = []
        capa_xml = capability_xml.CapabilityXML()
        guest_capa = capa_xml.get_guest_capabilities()
        for arch_prop in guest_capa.values():
            for arch in arch_prop.keys():
                machine_list = arch_prop[arch]['machine']
                virttype_list = []
                emulatorbin_list = [arch_prop[arch]['emulator']]
                for key in arch_prop[arch].keys():
                    if key.startswith("domain_"):
                        virttype_list.append(key[7:])
                        if arch_prop[arch][key].values():
                            emulatorbin_list.append(arch_prop[arch][key]['emulator'])
                for virttype in virttype_list:
                    for emulatorbin in emulatorbin_list:
                        for machine in machine_list:
                            option_dict = {'arch': arch,
                                           'virttype': virttype,
                                           'emulatorbin': emulatorbin,
                                           'machine': machine}
                            options_list.append(option_dict)

    # Run test cases
    for option in options_list:
        result = virsh.domcapabilities(virttype=option['virttype'],
                                       emulatorbin=option['emulatorbin'],
                                       arch=option['arch'],
                                       machine=option['machine'],
                                       options=extra_option,
                                       uri=connect_uri,
                                       ignore_status=True,
                                       debug=True)
    # Check status_error
    status_error = "yes" == params.get("status_error", "no")
    utlv.check_exit_status(result, status_error)
 def test_intel_iommu():
     kernel_cmd = utils_misc.get_ker_cmd()
     res = re.search("iommu=on", kernel_cmd)
     if not res:
         test.error("iommu should be enabled in kernel cmd line - "
                    "'%s'." % res)
     cap_xml = capability_xml.CapabilityXML()
     if cap_xml.get_iommu().get('support') != 'yes':
         test.fail("IOMMU is disabled in capabilities: %s. "
                   % cap_xml.get_iommu())
Beispiel #5
0
def get_capa_xml(operate='', extract=False):
    """
    Get full capabilities xml or the cpu definition
    from capabilities xml

    :param operate: Operation mode, decide file's detail
    :param extract: Setting True means to extract cpu definition
                    from capabilities xml
    :return: The instance of CapabilityXML
    """
    capa_xml = capability_xml.CapabilityXML()
    if operate == 'delete':
        capa_xml.remove_feature(num=-1)
    if extract:
        capa_xml.xmltreefile = capa_xml.xmltreefile.reroot('/host/cpu')
    return capa_xml
def run(test, params, env):
    """
    Test command virsh cpu-models
    """
    cpu_arch = params.get("cpu_arch", "")
    option = params.get("option", "")
    status_error = "yes" == params.get("status_error", "no")
    remote_ref = params.get("remote_ref", "")
    connect_uri = libvirt_vm.normalize_connect_uri(
        params.get("connect_uri", "default"))

    if remote_ref == "remote":
        remote_ip = params.get("remote_ip", "REMOTE.EXAMPLE.COM")
        remote_pwd = params.get("remote_pwd", None)

        if 'EXAMPLE.COM' in remote_ip:
            test.cancel("Please replace '%s' with valid remote ip" % remote_ip)

        ssh_key.setup_ssh_key(remote_ip, "root", remote_pwd)
        connect_uri = libvirt_vm.complete_uri(remote_ip)

    arch_list = []
    if not cpu_arch:
        try:
            capa = capability_xml.CapabilityXML()
            guest_map = capa.get_guest_capabilities()
            guest_arch = []
            for v in list(itervalues(guest_map)):
                guest_arch += list(v.keys())
            for arch in set(guest_arch):
                arch_list.append(arch)
        except Exception as e:
            test.error("Fail to get guest arch list of the host"
                       " supported:\n%s" % e)
    else:
        arch_list.append(cpu_arch)
    for arch in arch_list:
        logging.debug("Get the CPU models for arch: %s" % arch)
        result = virsh.cpu_models(arch,
                                  options=option,
                                  uri=connect_uri,
                                  ignore_status=True,
                                  debug=True)
        utlv.check_exit_status(result, expect_error=status_error)
Beispiel #7
0
    def get_cpu_xml(target, mode, tmp_file, cpu_mode=""):
        """
        Get CPU infomation and put it into a file.

        @param: target: Test target, host or guest's cpu description.
        @param: mode: Test mode, decides file's detail.
        @param: tmp_file: File saves CPU infomation.
        """
        try:
            cpu_xml_file = open(tmp_file, 'wb')
            if target == "host":
                libvirtxml = capability_xml.CapabilityXML()
            else:
                libvirtxml = vm_xml.VMCPUXML(vm_name=vm_name, mode=cpu_mode)
            if mode == "modify":
                if modify_target == "vendor":
                    libvirtxml['vendor'] = test_vendor
                else:
                    # Choose the last feature to test
                    if feature_action == "remove":
                        libvirtxml.remove_feature(feature_num)
                    elif feature_action == "repeat":
                        name = libvirtxml.get_feature_name(feature_num)
                        libvirtxml.add_feature(name)
                    else:
                        libvirtxml.set_feature(feature_num, feature_name)
                libvirtxml.dict_get('xml').write(cpu_xml_file)
            elif mode == "clear":
                # Clear up file detail
                cpu_xml_file.truncate(0)
            else:
                libvirtxml.dict_get('xml').write(cpu_xml_file)
            cpu_xml_file.close()
        except (IndexError, AttributeError):
            if target == "guest":
                vmxml.undefine()
                vmxml.define()
            raise error.TestError("Get CPU infomation failed!")
Beispiel #8
0
def get_cpu_definition(source_type, vm_name, test):
    """
    Extract cpu definition according source type

    :param source_type: The source file type includes
                        domxml, domcapa_xml and capa_xml
    :param vm_name: The VM name
    :param test: Avocado test object
    :return: The string of cpu definition
    """
    cpu_xml = None
    if source_type == "domxml":
        dom_xml = vm_xml.VMXML.new_from_dumpxml(vm_name)
        cpu_xml = dom_xml.xmltreefile.get_element_string('/cpu')
    elif source_type == "domcapa_xml":
        domcapa_xml = domcapability_xml.DomCapabilityXML()
        cpu_xml = domcapa_xml.xmltreefile.get_element_string('/cpu')
    elif source_type == "capa_xml":
        capa_xml = capability_xml.CapabilityXML()
        cpu_xml = capa_xml.xmltreefile.get_element_string('/host/cpu')
    else:
        test.fail("Invalid source type: %s" % source_type)
    return cpu_xml
def run(test, params, env):
    """
    Test the command virsh domcapabilities
    """
    connect_uri = libvirt_vm.normalize_connect_uri(
        params.get("connect_uri", "default"))
    remote_ref = params.get("remote_ref", "")

    if remote_ref == "remote":
        remote_ip = params.get("remote_ip", "REMOTE.EXAMPLE.COM")
        remote_pwd = params.get("remote_pwd", None)

        if 'EXAMPLE.COM' in remote_ip:
            test.cancel("Please replace '%s' with valid remote ip" % remote_ip)

        ssh_key.setup_ssh_key(remote_ip, "root", remote_pwd)
        connect_uri = libvirt_vm.complete_uri(remote_ip)

    virsh_options = params.get("virsh_options", "")
    virttype = params.get("virttype_value", "")
    emulatorbin = params.get("emulatorbin_value", "")
    arch = params.get("arch_value", "")
    machine = params.get("machine_value", "")
    option_dict = {
        'arch': arch,
        'virttype': virttype,
        'emulatorbin': emulatorbin,
        'machine': machine
    }
    options_list = [option_dict]
    extra_option = params.get("extra_option", "")
    # Get --virttype, --emulatorbin, --arch and --machine values from
    # virsh capabilities output, then assemble option for testing
    # This will ignore the virttype, emulatorbin, arch and machine values
    if virsh_options == "AUTO":
        options_list = []
        capa_xml = capability_xml.CapabilityXML()
        guest_capa = capa_xml.get_guest_capabilities()
        for arch_prop in list(itervalues(guest_capa)):
            for arch in list(arch_prop.keys()):
                machine_list = arch_prop[arch]['machine']
                virttype_list = []
                emulatorbin_list = [arch_prop[arch]['emulator']]
                for key in list(arch_prop[arch].keys()):
                    if key.startswith("domain_"):
                        virttype_list.append(key[7:])
                        if list(itervalues(arch_prop[arch][key])):
                            emulatorbin_list.append(
                                arch_prop[arch][key]['emulator'])
                for virttype in virttype_list:
                    for emulatorbin in emulatorbin_list:
                        for machine in machine_list:
                            option_dict = {
                                'arch': arch,
                                'virttype': virttype,
                                'emulatorbin': emulatorbin,
                                'machine': machine
                            }
                            options_list.append(option_dict)

    # Run test cases
    for option in options_list:
        result = virsh.domcapabilities(virttype=option['virttype'],
                                       emulatorbin=option['emulatorbin'],
                                       arch=option['arch'],
                                       machine=option['machine'],
                                       options=extra_option,
                                       uri=connect_uri,
                                       ignore_status=True,
                                       debug=True)
    # Check status_error
    status_error = "yes" == params.get("status_error", "no")
    utlv.check_exit_status(result, status_error)
Beispiel #10
0
def run(test, params, env):
    """
    Test the command virsh maxvcpus

    (1) Call virsh maxvcpus
    (2) Call virsh -c remote_uri maxvcpus
    (3) Call virsh maxvcpus with an unexpected option
    """

    # get the params from subtests.
    # params for general.
    option = params.get("virsh_maxvcpus_options")
    status_error = params.get("status_error")
    connect_arg = params.get("connect_arg", "")

    # params for transport connect.
    local_ip = params.get("local_ip", "ENTER.YOUR.LOCAL.IP")
    local_pwd = params.get("local_pwd", "ENTER.YOUR.LOCAL.ROOT.PASSWORD")
    server_ip = params.get("remote_ip", local_ip)
    server_pwd = params.get("remote_pwd", local_pwd)
    transport_type = params.get("connect_transport_type", "local")
    transport = params.get("connect_transport", "ssh")
    connect_uri = None
    # check the config
    if (connect_arg == "transport" and transport_type == "remote"
            and local_ip.count("ENTER")):
        raise exceptions.TestSkipError("Parameter local_ip is not configured "
                                       "in remote test.")
    if (connect_arg == "transport" and transport_type == "remote"
            and local_pwd.count("ENTER")):
        raise exceptions.TestSkipError("Parameter local_pwd is not configured "
                                       "in remote test.")

    if connect_arg == "transport":
        canonical_uri_type = virsh.driver()

        if transport == "ssh":
            ssh_connection = utils_conn.SSHConnection(server_ip=server_ip,
                                                      server_pwd=server_pwd,
                                                      client_ip=local_ip,
                                                      client_pwd=local_pwd)
            try:
                ssh_connection.conn_check()
            except utils_conn.ConnectionError:
                ssh_connection.conn_setup()
                ssh_connection.conn_check()

            connect_uri = libvirt_vm.get_uri_with_transport(
                uri_type=canonical_uri_type,
                transport=transport,
                dest_ip=server_ip)
            virsh_dargs = {
                'remote_ip': server_ip,
                'remote_user': '******',
                'remote_pwd': server_pwd,
                'ssh_remote_auth': True
            }
            virsh_instance = virsh.VirshPersistent(**virsh_dargs)
    else:
        connect_uri = connect_arg
        virsh_instance = virsh

    if libvirt_version.version_compare(2, 3, 0):
        try:
            maxvcpus = None
            maxvcpus_cap = None
            dom_capabilities = None
            # make sure we take maxvcpus from right host, helps in case remote
            try:
                dom_capabilities = domcap.DomCapabilityXML(
                    virsh_instance=virsh_instance)
                maxvcpus = dom_capabilities.max
                logging.debug(
                    "maxvcpus calculate from domcapabilities "
                    "is %s", maxvcpus)
            except Exception as details:
                raise exceptions.TestFail("Failed to get maxvcpus from "
                                          "domcapabilities xml:\n%s" %
                                          dom_capabilities)
            try:
                cap_xml = capability_xml.CapabilityXML()
                maxvcpus_cap = cap_xml.get_guest_capabilities()['hvm'][
                    platform.machine()]['maxcpus']
                logging.debug('maxvcpus_cap is %s', maxvcpus_cap)
            except Exception as details:
                logging.debug(
                    "Failed to get maxvcpu from virsh "
                    "capabilities: %s", details)
                # Let's fall back in case of failure
                maxvcpus_cap = maxvcpus
            if not maxvcpus:
                raise exceptions.TestFail("Failed to get max value for vcpu"
                                          "from domcapabilities "
                                          "xml:\n%s" % dom_capabilities)
        except Exception as details:
            raise exceptions.TestFail(
                "Failed get the virsh instance with uri: "
                "%s\n Details: %s" % (connect_uri, details))

    is_arm = "aarch" in platform.machine()
    gic_version = ''
    if is_arm:
        for gic_enum in domcap.DomCapabilityXML()['features']['gic_enums']:
            if gic_enum['name'] == "version":
                gic_version = gic_enum['values'][0].get_value()

    # Run test case
    result = virsh.maxvcpus(option,
                            uri=connect_uri,
                            ignore_status=True,
                            debug=True)

    maxvcpus_test = result.stdout.strip()
    status = result.exit_status

    # Check status_error
    if status_error == "yes":
        if status == 0:
            raise exceptions.TestFail("Run succeeded with unsupported option!")
        else:
            logging.info("Run failed with unsupported option %s " % option)
    elif status_error == "no":
        if status == 0:
            if not libvirt_version.version_compare(2, 3, 0):
                if "kqemu" in option:
                    if not maxvcpus_test == '1':
                        raise exceptions.TestFail("Command output %s is not "
                                                  "expected for %s " %
                                                  (maxvcpus_test, option))
                elif option in ['qemu', '--type qemu', '']:
                    if not maxvcpus_test == '16':
                        raise exceptions.TestFail("Command output %s is not "
                                                  "expected for %s " %
                                                  (maxvcpus_test, option))
                else:
                    # No check with other types
                    pass
            else:
                # It covers all possible combinations
                if option in [
                        'qemu', 'kvm', '--type qemu', '--type kvm', 'kqemu',
                        '--type kqemu', ''
                ]:
                    if (is_arm and gic_version == '2'
                            and option in ['kvm', '']):
                        if not maxvcpus_test == '8':
                            raise exceptions.TestFail(
                                "Command output %s is not "
                                "expected for %s " % (maxvcpus_test, option))
                    elif not (maxvcpus_test == maxvcpus
                              or maxvcpus_test == maxvcpus_cap):
                        raise exceptions.TestFail("Command output %s is not "
                                                  "expected for %s " %
                                                  (maxvcpus_test, option))
                else:
                    # No check with other types
                    pass
        else:
            raise exceptions.TestFail("Run command failed")
Beispiel #11
0
 def _from_scratch(self):
     return capability_xml.CapabilityXML(virsh_instance=self.dummy_virsh)
Beispiel #12
0
def run(test, params, env):
    """
    Test interafce xml options.

    1.Prepare test environment,destroy or suspend a VM.
    2.Edit xml and start the domain.
    3.Perform test operation.
    4.Recover test environment.
    5.Confirm the test result.
    """
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    host_arch = platform.machine()
    virsh_dargs = {'debug': True, 'ignore_status': False}

    if not utils_package.package_install(["lsof"]):
        test.cancel("Failed to install dependency package lsof" " on host")

    def create_iface_xml(iface_mac):
        """
        Create interface xml file
        """
        iface = Interface(type_name=iface_type)
        source = ast.literal_eval(iface_source)
        if source:
            iface.source = source
        iface.model = iface_model if iface_model else "virtio"
        iface.mac_address = iface_mac
        driver_dict = {}
        driver_host = {}
        driver_guest = {}
        if iface_driver:
            driver_dict = ast.literal_eval(iface_driver)
        if iface_driver_host:
            driver_host = ast.literal_eval(iface_driver_host)
        if iface_driver_guest:
            driver_guest = ast.literal_eval(iface_driver_guest)
        iface.driver = iface.new_driver(driver_attr=driver_dict,
                                        driver_host=driver_host,
                                        driver_guest=driver_guest)
        if test_target:
            iface.target = {"dev": target_dev}
        logging.debug("Create new interface xml: %s", iface)
        return iface

    def modify_iface_xml(update, status_error=False):
        """
        Modify interface xml options
        """
        vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
        xml_devices = vmxml.devices
        iface_index = xml_devices.index(
            xml_devices.by_device_tag("interface")[0])
        iface = xml_devices[iface_index]
        if iface_model:
            iface.model = iface_model
        else:
            del iface.model
        if iface_type:
            iface.type_name = iface_type
        del iface.source
        source = ast.literal_eval(iface_source)
        if source:
            net_ifs = utils_net.get_net_if(state="UP")
            # Check source device is valid or not,
            # if it's not in host interface list, try to set
            # source device to first active interface of host
            if (iface.type_name == "direct" and 'dev' in source
                    and source['dev'] not in net_ifs):
                logging.warn(
                    "Source device %s is not a interface"
                    " of host, reset to %s", source['dev'], net_ifs[0])
                source['dev'] = net_ifs[0]
            iface.source = source
        backend = ast.literal_eval(iface_backend)
        if backend:
            iface.backend = backend
        driver_dict = {}
        driver_host = {}
        driver_guest = {}
        if iface_driver:
            driver_dict = ast.literal_eval(iface_driver)
        if iface_driver_host:
            driver_host = ast.literal_eval(iface_driver_host)
        if iface_driver_guest:
            driver_guest = ast.literal_eval(iface_driver_guest)
        iface.driver = iface.new_driver(driver_attr=driver_dict,
                                        driver_host=driver_host,
                                        driver_guest=driver_guest)
        if test_target:
            logging.debug("iface.target is %s" % target_dev)
            iface.target = {"dev": target_dev}
        if iface.address:
            del iface.address
        if set_ip:
            iface.ips = [ast.literal_eval(x) for x in set_ips]
        logging.debug("New interface xml file: %s", iface)
        if unprivileged_user:
            # Create disk image for unprivileged user
            disk_index = xml_devices.index(
                xml_devices.by_device_tag("disk")[0])
            disk_xml = xml_devices[disk_index]
            logging.debug("source: %s", disk_xml.source)
            disk_source = disk_xml.source.attrs["file"]
            cmd = ("cp -fZ {0} {1} && chown {2}:{2} {1}"
                   "".format(disk_source, dst_disk, unprivileged_user))
            process.run(cmd, shell=True)
            disk_xml.source = disk_xml.new_disk_source(
                attrs={"file": dst_disk})
            vmxml.devices = xml_devices
            # Remove all channels to avoid of permission problem
            channels = vmxml.get_devices(device_type="channel")
            for channel in channels:
                vmxml.del_device(channel)
            logging.info("Unprivileged users can't use 'dac' security driver,"
                         " removing from domain xml if present...")
            vmxml.del_seclabel([('model', 'dac')])

            # Set vm memory to 2G if it's larger than 2G
            if vmxml.memory > 2097152:
                vmxml.memory = vmxml.current_mem = 2097152

            vmxml.xmltreefile.write()
            logging.debug("New VM xml: %s", vmxml)
            process.run("chmod a+rw %s" % vmxml.xml, shell=True)
            virsh.define(vmxml.xml, **virsh_dargs)
        # Try to modify interface xml by update-device or edit xml
        elif update:
            iface.xmltreefile.write()
            ret = virsh.update_device(vm_name, iface.xml, ignore_status=True)
            libvirt.check_exit_status(ret, status_error)
        else:
            vmxml.devices = xml_devices
            vmxml.xmltreefile.write()
            try:
                vmxml.sync()
                if define_error:
                    test.fail("Define VM succeed, but it should fail")
            except xcepts.LibvirtXMLError as e:
                if not define_error:
                    test.fail("Define VM fail: %s" % e)

    def check_offloads_option(if_name, driver_options, session=None):
        """
        Check interface offloads by ethtool output
        """
        offloads = {
            "csum": "tx-checksumming",
            "tso4": "tcp-segmentation-offload",
            "tso6": "tx-tcp6-segmentation",
            "ecn": "tx-tcp-ecn-segmentation",
            "ufo": "udp-fragmentation-offload"
        }
        if session:
            ret, output = session.cmd_status_output("ethtool -k %s | head"
                                                    " -18" % if_name)
        else:
            out = process.run("ethtool -k %s | head -18" % if_name, shell=True)
            ret, output = out.exit_status, out.stdout_text
        if ret:
            test.fail("ethtool return error code")
        logging.debug("ethtool output: %s", output)
        for offload in list(driver_options.keys()):
            if offload in offloads:
                if (output.count(offloads[offload]) and not output.count(
                        "%s: %s" %
                    (offloads[offload], driver_options[offload]))):
                    test.fail("offloads option %s: %s isn't"
                              " correct in ethtool output" %
                              (offloads[offload], driver_options[offload]))

    def run_xml_test(iface_mac):
        """
        Test for interface options in vm xml
        """
        # Get the interface object according the mac address
        vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
        iface_devices = vmxml.get_devices(device_type="interface")
        iface = None
        for iface_dev in iface_devices:
            if iface_dev.mac_address == iface_mac:
                iface = iface_dev
        if not iface:
            test.fail("Can't find interface with mac"
                      " '%s' in vm xml" % iface_mac)
        driver_dict = {}
        if iface_driver:
            driver_dict = ast.literal_eval(iface_driver)
        for driver_opt in list(driver_dict.keys()):
            if not driver_dict[driver_opt] == iface.driver.driver_attr[
                    driver_opt]:
                test.fail("Can't see driver option %s=%s in vm xml" %
                          (driver_opt, driver_dict[driver_opt]))
            else:
                logging.info("Find %s=%s in vm xml" %
                             (driver_opt, driver_dict[driver_opt]))
        if iface_target:
            if ("dev" not in iface.target
                    or not iface.target["dev"].startswith(iface_target)):
                test.fail("Can't see device target dev in vm xml")
            # Check macvtap mode by ip link command
            if iface_target == "macvtap" and "mode" in iface.source:
                cmd = "ip -d link show %s" % iface.target["dev"]
                output = process.run(cmd, shell=True).stdout_text
                logging.debug("ip link output: %s", output)
                mode = iface.source["mode"]
                if mode == "passthrough":
                    mode = "passthru"
                if not re.search(r"macvtap\s+mode %s" % mode, output):
                    test.fail("Failed to verify macvtap mode")
        # Check if the "target dev" is set successfully
        # 1. Target dev name with prefix as "vnet" will always be override;
        # 2. Target dev name with prefix as "macvtap" or "macvlan" with direct
        # type interface will be override;
        # 3. Other scenarios, the target dev should be set successfully.
        if test_target:
            if target_dev != iface.target["dev"]:
                if target_dev.startswith("vnet") or \
                        (iface_type == "direct" and
                         (target_dev.startswith("macvtap") or
                          target_dev.startswith("macvlan"))):
                    logging.debug("target dev %s is override" % target_dev)
                else:
                    test.fail("Failed to set target dev to %s", target_dev)
            else:
                logging.debug("target dev set successfully to %s",
                              iface.target["dev"])

    def run_cmdline_test(iface_mac, host_arch):
        """
        Test qemu command line
        :param iface_mac: expected MAC
        :param host_arch: host architecture, e.g. x86_64
        :raise avocado.core.exceptions.TestError: if preconditions are not met
        :raise avocado.core.exceptions.TestFail: if commandline doesn't match
        :return: None
        """
        cmd = ("ps -ef | grep %s | grep -v grep " % vm_name)
        ret = process.run(cmd, shell=True)
        logging.debug("Command line %s", ret.stdout_text)
        if test_vhost_net:
            if not ret.stdout_text.count("vhost=on") and not rm_vhost_driver:
                test.fail("Can't see vhost options in"
                          " qemu-kvm command line")

        if iface_model == "virtio":
            if host_arch == 's390x':
                model_option = "device virtio-net-ccw"
            else:
                model_option = "device virtio-net-pci"
        elif iface_model == 'rtl8139':
            model_option = "device rtl8139"
        else:
            test.error(
                "Don't know which device driver to expect on qemu cmdline"
                " for iface_model %s" % iface_model)
        iface_cmdline = re.findall(
            r"%s,(.+),mac=%s" % (model_option, iface_mac), ret.stdout_text)
        if not iface_cmdline:
            test.fail("Can't see %s with mac %s in command"
                      " line" % (model_option, iface_mac))

        cmd_opt = {}
        for opt in iface_cmdline[0].split(','):
            tmp = opt.rsplit("=")
            cmd_opt[tmp[0]] = tmp[1]
        logging.debug("Command line options %s", cmd_opt)

        driver_dict = {}
        # Test <driver> xml options.
        if iface_driver:
            iface_driver_dict = ast.literal_eval(iface_driver)
            for driver_opt in list(iface_driver_dict.keys()):
                if driver_opt == "name":
                    continue
                elif driver_opt == "txmode":
                    if iface_driver_dict["txmode"] == "iothread":
                        driver_dict["tx"] = "bh"
                    else:
                        driver_dict["tx"] = iface_driver_dict["txmode"]
                elif driver_opt == "queues":
                    driver_dict["mq"] = "on"
                    if "pci" in model_option:
                        driver_dict["vectors"] = str(
                            int(iface_driver_dict["queues"]) * 2 + 2)
                else:
                    driver_dict[driver_opt] = iface_driver_dict[driver_opt]
        # Test <driver><host/><driver> xml options.
        if iface_driver_host:
            driver_dict.update(ast.literal_eval(iface_driver_host))
        # Test <driver><guest/><driver> xml options.
        if iface_driver_guest:
            driver_dict.update(ast.literal_eval(iface_driver_guest))

        for driver_opt in list(driver_dict.keys()):
            if (driver_opt not in cmd_opt
                    or not cmd_opt[driver_opt] == driver_dict[driver_opt]):
                test.fail("Can't see option '%s=%s' in qemu-kvm "
                          " command line" %
                          (driver_opt, driver_dict[driver_opt]))
            logging.info("Find %s=%s in qemu-kvm command line" %
                         (driver_opt, driver_dict[driver_opt]))
        if test_backend:
            guest_pid = ret.stdout_text.rsplit()[1]
            cmd = "lsof %s | grep %s" % (backend["tap"], guest_pid)
            if process.system(cmd, ignore_status=True, shell=True):
                test.fail("Guest process didn't open backend file"
                          " %s" % backend["tap"])
            cmd = "lsof %s | grep %s" % (backend["vhost"], guest_pid)
            if process.system(cmd, ignore_status=True, shell=True):
                test.fail("Guest process didn't open backend file"
                          " %s" % backend["vhost"])

    def get_guest_ip(session, mac):
        """
        Wrapper function to get guest ip address
        """
        utils_net.restart_guest_network(session, mac)
        # Wait for IP address is ready
        utils_misc.wait_for(lambda: utils_net.get_guest_ip_addr(session, mac),
                            10)
        return utils_net.get_guest_ip_addr(session, mac)

    def check_user_network(session):
        """
        Check user network ip address on guest
        """
        vm_ips = []
        vm_ips.append(get_guest_ip(session, iface_mac_old))
        if attach_device:
            vm_ips.append(get_guest_ip(session, iface_mac))
        logging.debug("IP address on guest: %s", vm_ips)
        if len(vm_ips) != len(set(vm_ips)):
            logging.debug(
                "Duplicated IP address on guest. Check bug: "
                "https://bugzilla.redhat.com/show_bug.cgi?id=1147238")
        for vm_ip in vm_ips:
            if not vm_ip or vm_ip != expect_ip:
                logging.debug("vm_ip is %s, expect_ip is %s", vm_ip, expect_ip)
                test.fail("Found wrong IP address" " on guest")
        # Check gateway address
        gateway = str(utils_net.get_default_gateway(False, session))
        if expect_gw not in gateway:
            test.fail("The gateway on guest is %s, while expect is %s" %
                      (gateway, expect_gw))
        # Check dns server address
        ns_list = utils_net.get_guest_nameserver(session)
        if expect_ns not in ns_list:
            test.fail("The dns found is %s, which expect is %s" %
                      (ns_list, expect_ns))

    def check_mcast_network(session, add_session):
        """
        Check multicast ip address on guests

        :param session: vm session
        :param add_session: additional vm session
        """
        src_addr = ast.literal_eval(iface_source)['address']
        vms_sess_dict = {vm_name: session, additional_vm.name: add_session}

        # Check mcast address on host
        cmd = "netstat -g | grep %s" % src_addr
        if process.run(cmd, ignore_status=True, shell=True).exit_status:
            test.fail("Can't find multicast ip address" " on host")
        vms_ip_dict = {}
        # Get ip address on each guest
        for vms in list(vms_sess_dict.keys()):
            vm_mac = vm_xml.VMXML.get_first_mac_by_name(vms)
            vm_ip = get_guest_ip(vms_sess_dict[vms], vm_mac)
            if not vm_ip:
                test.fail("Can't get multicast ip" " address on guest")
            vms_ip_dict.update({vms: vm_ip})
        if len(set(vms_ip_dict.values())) != len(vms_sess_dict):
            test.fail("Got duplicated multicast ip address")
        logging.debug("Found ips on guest: %s", vms_ip_dict)

        # Run omping server on host
        if not utils_package.package_install(["omping"]):
            test.error("Failed to install omping" " on host")
        cmd = ("iptables -F;omping -m %s %s" %
               (src_addr,
                "192.168.122.1 %s" % ' '.join(list(vms_ip_dict.values()))))
        # Run a backgroup job waiting for connection of client
        bgjob = utils_misc.AsyncJob(cmd)

        # Run omping client on guests
        for vms in list(vms_sess_dict.keys()):
            # omping should be installed first
            if not utils_package.package_install(["omping"],
                                                 vms_sess_dict[vms]):
                test.error("Failed to install omping" " on guest")
            cmd = ("iptables -F; omping -c 5 -T 5 -m %s %s" %
                   (src_addr, "192.168.122.1 %s" % vms_ip_dict[vms]))
            ret, output = vms_sess_dict[vms].cmd_status_output(cmd)
            logging.debug("omping ret: %s, output: %s", ret, output)
            if (not output.count('multicast, xmt/rcv/%loss = 5/5/0%')
                    or not output.count('unicast, xmt/rcv/%loss = 5/5/0%')):
                test.fail("omping failed on guest")
        # Kill the backgroup job
        bgjob.kill_func()

    def get_iface_model(iface_model, host_arch):
        """
        Get iface_model. On s390x use default model 'virtio' if non-virtio given
        :param iface_model: value as by test configuration or default
        :param host_arch: host architecture, e.g. x86_64
        :return: iface_model
        """
        if 's390x' == host_arch and 'virtio' not in iface_model:
            return "virtio"
        else:
            return iface_model

    def check_vhostuser_guests(session1, session2):
        """
        Check the vhostuser interface in guests

        param session1: Session of original guest
        param session2: Session of original additional guest
        """
        logging.debug("iface details is %s" %
                      libvirt.get_interface_details(vm_name))
        vm1_mac = str(libvirt.get_interface_details(vm_name)[0]['mac'])
        vm2_mac = str(libvirt.get_interface_details(add_vm_name)[0]['mac'])

        utils_net.set_guest_ip_addr(session1, vm1_mac, guest1_ip)
        utils_net.set_guest_ip_addr(session2, vm2_mac, guest2_ip)
        ping_status, ping_output = utils_net.ping(dest=guest2_ip,
                                                  count='3',
                                                  timeout=5,
                                                  session=session1)
        logging.info("output:%s" % ping_output)
        if ping_status != 0:
            if ping_expect_fail:
                logging.info("Can not ping guest2 as expected")
            else:
                test.fail("Can not ping guest2 from guest1")
        else:
            if ping_expect_fail:
                test.fail("Ping guest2 successfully not expected")
            else:
                logging.info("Can ping guest2 from guest1")

    def get_ovs_statis(ovs):
        """
        Get ovs-vsctl interface statistics and format in dict

        param ovs: openvswitch instance
        """
        ovs_statis_dict = {}
        ovs_iface_info = ovs.ovs_vsctl(["list",
                                        "interface"]).stdout_text.strip()
        ovs_iface_list = re.findall(
            'name\s+: (\S+)\n.*?statistics\s+: {(.*?)}\n', ovs_iface_info,
            re.S)
        logging.info("ovs iface list is %s", ovs_iface_list)
        # Dict of iface name and statistics
        for iface_name in vhostuser_names.split():
            for ovs_iface in ovs_iface_list:
                if iface_name == eval(ovs_iface[0]):
                    format_statis = dict(
                        re.findall(r'(\S*?)=(\d*?),', ovs_iface[1]))
                    ovs_statis_dict[iface_name] = format_statis
                    break
        return ovs_statis_dict

    status_error = "yes" == params.get("status_error", "no")
    start_error = "yes" == params.get("start_error", "no")
    define_error = "yes" == params.get("define_error", "no")
    unprivileged_user = params.get("unprivileged_user")

    # Interface specific attributes.
    iface_type = params.get("iface_type", "network")
    iface_source = params.get("iface_source", "{}")
    iface_driver = params.get("iface_driver")
    iface_model = get_iface_model(params.get("iface_model", "virtio"),
                                  host_arch)
    iface_target = params.get("iface_target")
    iface_backend = params.get("iface_backend", "{}")
    iface_driver_host = params.get("iface_driver_host")
    iface_driver_guest = params.get("iface_driver_guest")
    ovs_br_name = params.get("ovs_br_name")
    vhostuser_names = params.get("vhostuser_names")
    attach_device = params.get("attach_iface_device")
    expect_tx_size = params.get("expect_tx_size")
    guest1_ip = params.get("vhostuser_guest1_ip", "192.168.100.1")
    guest2_ip = params.get("vhostuser_guest2_ip", "192.168.100.2")
    change_option = "yes" == params.get("change_iface_options", "no")
    update_device = "yes" == params.get("update_iface_device", "no")
    additional_guest = "yes" == params.get("additional_guest", "no")
    serial_login = "******" == params.get("serial_login", "no")
    rm_vhost_driver = "yes" == params.get("rm_vhost_driver", "no")
    test_option_cmd = "yes" == params.get("test_iface_option_cmd", "no")
    test_option_xml = "yes" == params.get("test_iface_option_xml", "no")
    test_vhost_net = "yes" == params.get("test_vhost_net", "no")
    test_option_offloads = "yes" == params.get("test_option_offloads", "no")
    test_iface_user = "******" == params.get("test_iface_user", "no")
    test_iface_mcast = "yes" == params.get("test_iface_mcast", "no")
    test_libvirtd = "yes" == params.get("test_libvirtd", "no")
    restart_libvirtd = "yes" == params.get("restart_libvirtd", "no")
    restart_vm = "yes" == params.get("restart_vm", "no")
    test_guest_ip = "yes" == params.get("test_guest_ip", "no")
    test_backend = "yes" == params.get("test_backend", "no")
    check_guest_trans = "yes" == params.get("check_guest_trans", "no")
    set_ip = "yes" == params.get("set_user_ip", "no")
    set_ips = params.get("set_ips", "").split()
    expect_ip = params.get("expect_ip")
    expect_gw = params.get("expect_gw")
    expect_ns = params.get("expect_ns")
    test_target = "yes" == params.get("test_target", "no")
    target_dev = params.get("target_dev", None)

    # test params for vhostuser test
    huge_page = ast.literal_eval(params.get("huge_page", "{}"))
    numa_cell = ast.literal_eval(params.get("numa_cell", "{}"))
    additional_iface_source = ast.literal_eval(
        params.get("additional_iface_source", "{}"))
    vcpu_num = params.get("vcpu_num")
    cpu_mode = params.get("cpu_mode")
    hugepage_num = params.get("hugepage_num")
    log_pattern = params.get("log_pattern")

    # judgement params for vhostuer test
    need_vhostuser_env = "yes" == params.get("need_vhostuser_env", "no")
    ping_expect_fail = "yes" == params.get("ping_expect_fail", "no")
    check_libvirtd_log = "yes" == params.get("check_libvirtd_log", "no")
    check_statistics = "yes" == params.get("check_statistics", "no")
    enable_multiqueue = "yes" == params.get("enable_multiqueue", "no")

    queue_size = None
    if iface_driver:
        driver_dict = ast.literal_eval(iface_driver)
        if "queues" in driver_dict:
            queue_size = int(driver_dict.get("queues"))

    if iface_driver_host or iface_driver_guest or test_backend:
        if not libvirt_version.version_compare(1, 2, 8):
            test.cancel("Offloading/backend options not "
                        "supported in this libvirt version")
    if iface_driver and "queues" in ast.literal_eval(iface_driver):
        if not libvirt_version.version_compare(1, 0, 6):
            test.cancel("Queues options not supported"
                        " in this libvirt version")

    if unprivileged_user:
        if not libvirt_version.version_compare(1, 1, 1):
            test.cancel("qemu-bridge-helper not supported" " on this host")
        virsh_dargs["unprivileged_user"] = unprivileged_user
        # Create unprivileged user if needed
        cmd = ("grep {0} /etc/passwd || "
               "useradd {0}".format(unprivileged_user))
        process.run(cmd, shell=True)
        # Need another disk image for unprivileged user to access
        dst_disk = "/tmp/%s.img" % unprivileged_user

    # Destroy VM first
    if vm.is_alive():
        vm.destroy(gracefully=False)

    # Back up xml file.
    vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    iface_mac_old = vm_xml.VMXML.get_first_mac_by_name(vm_name)
    # iface_mac will update if attach a new interface
    iface_mac = iface_mac_old
    # Additional vm for test
    additional_vm = None
    libvirtd = utils_libvirtd.Libvirtd()

    libvirtd_log_path = None
    libvirtd_conf = None
    if check_libvirtd_log:
        libvirtd_log_path = os.path.join(test.tmpdir, "libvirtd.log")
        libvirtd_conf = utils_config.LibvirtdConfig()
        libvirtd_conf["log_outputs"] = '"1:file:%s"' % libvirtd_log_path
        libvirtd.restart()

    # Prepare vhostuser
    ovs = None
    if need_vhostuser_env:
        # Reserve selinux status
        selinux_mode = utils_selinux.get_status()
        # Reserve orig page size
        orig_size = utils_memory.get_num_huge_pages()
        ovs_dir = data_dir.get_tmp_dir()
        ovs = utils_net.setup_ovs_vhostuser(hugepage_num, ovs_dir, ovs_br_name,
                                            vhostuser_names, queue_size)

    try:
        # Build the xml and run test.
        try:
            # Prepare interface backend files
            if test_backend:
                if not os.path.exists("/dev/vhost-net"):
                    process.run("modprobe vhost-net", shell=True)
                backend = ast.literal_eval(iface_backend)
                backend_tap = "/dev/net/tun"
                backend_vhost = "/dev/vhost-net"
                if not backend:
                    backend["tap"] = backend_tap
                    backend["vhost"] = backend_vhost
                if not start_error:
                    # Create backend files for normal test
                    if not os.path.exists(backend["tap"]):
                        os.rename(backend_tap, backend["tap"])
                    if not os.path.exists(backend["vhost"]):
                        os.rename(backend_vhost, backend["vhost"])
            # Edit the interface xml.
            if change_option:
                modify_iface_xml(update=False)
                if define_error:
                    return

            if test_target:
                logging.debug("Setting target device name to %s", target_dev)
                modify_iface_xml(update=False)

            if rm_vhost_driver:
                # remove vhost driver on host and
                # the character file /dev/vhost-net
                cmd = ("modprobe -r {0}; "
                       "rm -f /dev/vhost-net".format("vhost_net"))
                if process.system(cmd, ignore_status=True, shell=True):
                    test.error("Failed to remove vhost_net driver")
            else:
                # Load vhost_net driver by default
                cmd = "modprobe vhost_net"
                process.system(cmd, shell=True)

            # Attach a interface when vm is shutoff
            if attach_device == 'config':
                iface_mac = utils_net.generate_mac_address_simple()
                iface_xml_obj = create_iface_xml(iface_mac)
                iface_xml_obj.xmltreefile.write()
                ret = virsh.attach_device(vm_name,
                                          iface_xml_obj.xml,
                                          flagstr="--config",
                                          ignore_status=True)
                libvirt.check_exit_status(ret)

            # Add hugepage and update cpu for vhostuser testing
            if huge_page:
                vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
                membacking = vm_xml.VMMemBackingXML()
                hugepages = vm_xml.VMHugepagesXML()
                pagexml = hugepages.PageXML()
                pagexml.update(huge_page)
                hugepages.pages = [pagexml]
                membacking.hugepages = hugepages
                vmxml.mb = membacking

                vmxml.vcpu = int(vcpu_num)
                cpu_xml = vm_xml.VMCPUXML()
                cpu_xml.xml = "<cpu><numa/></cpu>"
                cpu_xml.numa_cell = cpu_xml.dicts_to_cells([numa_cell])
                cpu_xml.mode = cpu_mode
                if cpu_mode == "custom":
                    vm_capability = capability_xml.CapabilityXML()
                    cpu_xml.model = vm_capability.model
                vmxml.cpu = cpu_xml

                vmxml.sync()
                logging.debug("xmltreefile:%s", vmxml.xmltreefile)

            # Clone additional vm
            if additional_guest:
                add_vm_name = "%s_%s" % (vm_name, '1')
                # Clone additional guest
                timeout = params.get("clone_timeout", 360)
                utils_libguestfs.virt_clone_cmd(vm_name,
                                                add_vm_name,
                                                True,
                                                timeout=timeout)
                additional_vm = vm.clone(add_vm_name)
                # Update iface source if needed
                if additional_iface_source:
                    add_vmxml = vm_xml.VMXML.new_from_dumpxml(add_vm_name)
                    add_xml_devices = add_vmxml.devices
                    add_iface_index = add_xml_devices.index(
                        add_xml_devices.by_device_tag("interface")[0])
                    add_iface = add_xml_devices[add_iface_index]
                    add_iface.source = additional_iface_source
                    add_vmxml.devices = add_xml_devices
                    add_vmxml.xmltreefile.write()
                    add_vmxml.sync()

                    logging.debug("add vm xmltreefile:%s",
                                  add_vmxml.xmltreefile)
                additional_vm.start()
                # additional_vm.wait_for_login()
                username = params.get("username")
                password = params.get("password")
                add_session = additional_vm.wait_for_serial_login(
                    username=username, password=password)

            # Start the VM.
            if unprivileged_user:
                virsh.start(vm_name, **virsh_dargs)
                cmd = ("su - %s -c 'virsh console %s'" %
                       (unprivileged_user, vm_name))
                session = aexpect.ShellSession(cmd)
                session.sendline()
                remote.handle_prompts(session, params.get("username"),
                                      params.get("password"), r"[\#\$]\s*$",
                                      60)
                # Get ip address on guest
                if not get_guest_ip(session, iface_mac):
                    test.error("Can't get ip address on guest")
            else:
                # Will raise VMStartError exception if start fails
                vm.start()
                if serial_login:
                    session = vm.wait_for_serial_login()
                else:
                    session = vm.wait_for_login()
            if start_error:
                test.fail("VM started unexpectedly")

            # Attach a interface when vm is running
            if attach_device == 'live':
                iface_mac = utils_net.generate_mac_address_simple()
                iface_xml_obj = create_iface_xml(iface_mac)
                iface_xml_obj.xmltreefile.write()
                ret = virsh.attach_device(vm_name,
                                          iface_xml_obj.xml,
                                          flagstr="--live",
                                          ignore_status=True,
                                          debug=True)
                libvirt.check_exit_status(ret, status_error)
                # Need sleep here for attachment take effect
                time.sleep(5)

            # Update a interface options
            if update_device:
                modify_iface_xml(update=True, status_error=status_error)

            # Run tests for qemu-kvm command line options
            if test_option_cmd:
                run_cmdline_test(iface_mac, host_arch)
            # Run tests for vm xml
            if test_option_xml:
                run_xml_test(iface_mac)
            # Run tests for offloads options
            if test_option_offloads:
                if iface_driver_host:
                    ifname_guest = utils_net.get_linux_ifname(
                        session, iface_mac)
                    check_offloads_option(ifname_guest,
                                          ast.literal_eval(iface_driver_host),
                                          session)
                if iface_driver_guest:
                    ifname_host = libvirt.get_ifname_host(vm_name, iface_mac)
                    check_offloads_option(ifname_host,
                                          ast.literal_eval(iface_driver_guest))

            if test_iface_user:
                # Test user type network
                check_user_network(session)
            if test_iface_mcast:
                # Test mcast type network
                check_mcast_network(session, add_session)
            # Check guest ip address
            if test_guest_ip:
                if not get_guest_ip(session, iface_mac):
                    test.fail("Guest can't get a" " valid ip address")
            # Check guest RX/TX ring
            if check_guest_trans:
                ifname_guest = utils_net.get_linux_ifname(session, iface_mac)
                ret, outp = session.cmd_status_output("ethtool -g %s" %
                                                      ifname_guest)
                if ret:
                    test.fail("ethtool return error code")
                logging.info("ethtool output is %s", outp)
                driver_dict = ast.literal_eval(iface_driver)
                if expect_tx_size:
                    driver_dict['tx_queue_size'] = expect_tx_size
                for outp_p in outp.split("Current hardware"):
                    if 'rx_queue_size' in driver_dict:
                        if re.search(
                                r"RX:\s*%s" % driver_dict['rx_queue_size'],
                                outp_p):
                            logging.info("Find RX setting RX:%s by ethtool",
                                         driver_dict['rx_queue_size'])
                        else:
                            test.fail("Cannot find matching rx setting")
                    if 'tx_queue_size' in driver_dict:
                        if re.search(
                                r"TX:\s*%s" % driver_dict['tx_queue_size'],
                                outp_p):
                            logging.info("Find TX settint TX:%s by ethtool",
                                         driver_dict['tx_queue_size'])
                        else:
                            test.fail("Cannot find matching tx setting")
            if test_target:
                logging.debug("Check if the target dev is set")
                run_xml_test(iface_mac)

            # Check vhostuser guest
            if additional_iface_source:
                check_vhostuser_guests(session, add_session)

            # Check libvirtd log
            if check_libvirtd_log:
                find = 0
                with open(libvirtd_log_path) as f:
                    lines = "".join(f.readlines())
                    if log_pattern in lines:
                        logging.info("Finding msg<%s> in libvirtd log",
                                     log_pattern)
                    else:
                        test.fail("Can not find msg:<%s> in libvirtd.log" %
                                  log_pattern)

            # Check statistics
            if check_statistics:
                session.sendline("ping %s" % guest2_ip)
                add_session.sendline("ping %s" % guest1_ip)
                time.sleep(5)
                vhost_name = vhostuser_names.split()[0]
                ovs_statis_dict = get_ovs_statis(ovs)[vhost_name]
                domif_info = {}
                domif_info = libvirt.get_interface_details(vm_name)
                virsh.domiflist(vm_name, debug=True)
                domif_stat_result = virsh.domifstat(vm_name, vhost_name)
                if domif_stat_result.exit_status != 0:
                    test.fail("domifstat cmd fail with msg:%s" %
                              domif_stat_result.stderr)
                else:
                    domif_stat = domif_stat_result.stdout.strip()
                logging.debug("vhost_name is %s, domif_stat is %s", vhost_name,
                              domif_stat)
                domif_stat_dict = dict(
                    re.findall("%s (\S*) (\d*)" % vhost_name, domif_stat))
                logging.debug("ovs_statis is %s, domif_stat is %s",
                              ovs_statis_dict, domif_stat_dict)
                ovs_cmp_dict = {
                    'tx_bytes': ovs_statis_dict['rx_bytes'],
                    'tx_drop': ovs_statis_dict['rx_dropped'],
                    'tx_errs': ovs_statis_dict['rx_errors'],
                    'tx_packets': ovs_statis_dict['rx_packets'],
                    'rx_bytes': ovs_statis_dict['tx_bytes'],
                    'rx_drop': ovs_statis_dict['tx_dropped']
                }
                logging.debug("ovs_cmp_dict is %s", ovs_cmp_dict)
                for dict_key in ovs_cmp_dict.keys():
                    if domif_stat_dict[dict_key] != ovs_cmp_dict[dict_key]:
                        test.fail(
                            "Find ovs %s result (%s) different with domifstate result (%s)"
                            % (dict_key, ovs_cmp_dict[dict_key],
                               domif_stat_dict[dict_key]))
                    else:
                        logging.info("ovs %s value %s is same with domifstate",
                                     dict_key, domif_stat_dict[dict_key])

            # Check multi_queue
            if enable_multiqueue:
                ifname_guest = utils_net.get_linux_ifname(session, iface_mac)
                for comb_size in (queue_size, queue_size - 1):
                    logging.info("Setting multiqueue size to %s" % comb_size)
                    session.cmd_status("ethtool -L %s combined %s" %
                                       (ifname_guest, comb_size))
                    ret, outp = session.cmd_status_output("ethtool -l %s" %
                                                          ifname_guest)
                    logging.debug("ethtool cmd output:%s" % outp)
                    if not ret:
                        pre_comb = re.search(
                            "Pre-set maximums:[\s\S]*?Combined:.*?(\d+)",
                            outp).group(1)
                        cur_comb = re.search(
                            "Current hardware settings:[\s\S]*?Combined:.*?(\d+)",
                            outp).group(1)
                        if int(pre_comb) != queue_size or int(cur_comb) != int(
                                comb_size):
                            test.fail(
                                "Fail to check the combined size: setting: %s,"
                                "Pre-set: %s, Current-set: %s, queue_size: %s"
                                % (comb_size, pre_comb, cur_comb, queue_size))
                        else:
                            logging.info(
                                "Getting correct Pre-set and Current set value"
                            )
                    else:
                        test.error("ethtool list fail: %s" % outp)

            session.close()
            if additional_guest:
                add_session.close()

            # Restart libvirtd and guest, then test again
            if restart_libvirtd:
                libvirtd.restart()

            if restart_vm:
                vm.destroy(gracefully=True)
                vm.start()
                if test_option_xml:
                    run_xml_test(iface_mac)

            # Detach hot/cold-plugged interface at last
            if attach_device and not status_error:
                ret = virsh.detach_device(vm_name,
                                          iface_xml_obj.xml,
                                          flagstr="",
                                          ignore_status=True,
                                          debug=True)
                libvirt.check_exit_status(ret)

        except virt_vm.VMStartError as e:
            logging.info(str(e))
            if not start_error:
                test.fail('VM failed to start\n%s' % e)

    finally:
        # Recover VM.
        logging.info("Restoring vm...")
        # Restore interface backend files
        if test_backend:
            if not os.path.exists(backend_tap):
                os.rename(backend["tap"], backend_tap)
            if not os.path.exists(backend_vhost):
                os.rename(backend["vhost"], backend_vhost)
        if rm_vhost_driver:
            # Restore vhost_net driver
            process.system("modprobe vhost_net", shell=True)
        if unprivileged_user:
            virsh.remove_domain(vm_name, **virsh_dargs)
            process.run('rm -f %s' % dst_disk, shell=True)
        if additional_vm:
            virsh.remove_domain(additional_vm.name, "--remove-all-storage")
            # Kill all omping server process on host
            process.system("pidof omping && killall omping",
                           ignore_status=True,
                           shell=True)
        if vm.is_alive():
            vm.destroy(gracefully=True)
        vmxml_backup.sync()

        if need_vhostuser_env:
            utils_net.clean_ovs_env(selinux_mode=selinux_mode,
                                    page_size=orig_size,
                                    clean_ovs=True)

        if libvirtd_conf:
            libvirtd_conf.restore()
            libvirtd.restart()

        if libvirtd_log_path and os.path.exists(libvirtd_log_path):
            os.unlink(libvirtd_log_path)
Beispiel #13
0
    def check_cpu(xml, cpu_match):
        """
        Check the dumpxml result for --update-cpu option

        Note, function custom_cpu() hard code these features and policy,
        so after run virsh dumpxml --update-cpu:
        1. For match='minimum', all host support features will added,
           and match='exact'
        2. policy='optional' features(support by host) will update to
           policy='require'
        3. policy='optional' features(unsupport by host) will update to
           policy='disable'
        4. Other policy='disable|force|forbid|require' with keep the
           original values
        """
        vmxml = vm_xml.VMXML()
        vmxml['xml'] = xml
        vmcpu_xml = vmxml['cpu']
        check_pass = True
        require_count = 0
        expect_require_features = 0
        cpu_feature_list = vmcpu_xml.get_feature_list()
        host_capa = capability_xml.CapabilityXML()
        for i in range(len(cpu_feature_list)):
            f_name = vmcpu_xml.get_feature_name(i)
            f_policy = vmcpu_xml.get_feature_policy(i)
            err_msg = "Policy of '%s' is not expected: %s" % (f_name, f_policy)
            expect_policy = "disable"
            if f_name in ["xtpr", "vme", "ia64"]:
                # Check if feature is support on the host
                if host_capa.check_feature_name(f_name):
                    expect_policy = "require"
                if f_policy != expect_policy:
                    logging.error(err_msg)
                    check_pass = False
            if f_name == "tm2":
                if f_policy != "disable":
                    logging.error(err_msg)
                    check_pass = False
            if f_name == "est":
                if f_policy != "force":
                    logging.error(err_msg)
                    check_pass = False
            if f_name == "vmx":
                if f_policy != "forbid":
                    logging.error(err_msg)
                    check_pass = False
            # Count expect require features
            if expect_policy == "require":
                expect_require_features += 1
            # Count actual require features
            if f_policy == "require":
                require_count += 1
        # Check
        if cpu_match == "minimum":
            expect_match = "exact"
            # For different host, the support require features are different,
            # so just check the actual require features greater than the
            # expect number
            if require_count < expect_require_features:
                logging.error("Find %d require features, but expect >=%s",
                              require_count, expect_require_features)
                check_pass = False
        else:
            expect_match = cpu_match
            if require_count != expect_require_features:
                logging.error("Find %d require features, but expect %s",
                              require_count, expect_require_features)
                check_pass = False
        match = vmcpu_xml['match']
        if match != expect_match:
            logging.error("CPU match '%s' is not expected", match)
            check_pass = False
        if vmcpu_xml['model'] != 'Penryn':
            logging.error("CPU model %s is not expected", vmcpu_xml['model'])
            check_pass = False
        if vmcpu_xml['vendor'] != "Intel":
            logging.error("CPU vendor %s is not expected", vmcpu_xml['vendor'])
            check_pass = False
        return check_pass
Beispiel #14
0
    def output_check(nodeinfo_output):
        # Check CPU model
        cpu_model_nodeinfo = _check_nodeinfo(nodeinfo_output, "CPU model", 3)
        cpu_model_os = utils.get_current_kernel_arch()
        if not re.match(cpu_model_nodeinfo, cpu_model_os):
            raise error.TestFail(
                "Virsh nodeinfo output didn't match CPU model")

        # Check number of CPUs
        cpus_nodeinfo = _check_nodeinfo(nodeinfo_output, "CPU(s)", 2)
        cpus_os = utils.count_cpus()
        if int(cpus_nodeinfo) != cpus_os:
            raise error.TestFail(
                "Virsh nodeinfo output didn't match number of "
                "CPU(s)")

        # Check CPU frequency
        cpu_frequency_nodeinfo = _check_nodeinfo(nodeinfo_output,
                                                 'CPU frequency', 3)
        cmd = ("cat /proc/cpuinfo | grep 'cpu MHz' | head -n1 | "
               "awk '{print $4}' | awk -F. '{print $1}'")
        cmd_result = utils.run(cmd, ignore_status=True)
        cpu_frequency_os = cmd_result.stdout.strip()
        logging.debug("cpu_frequency_nodeinfo=%s cpu_frequency_os=%s",
                      cpu_frequency_nodeinfo, cpu_frequency_os)
        #
        # Matching CPU Frequency is not an exact science in todays modern
        # processors and OS's. CPU's can have their execution speed varied
        # based on current workload in order to save energy and keep cool.
        # Thus since we're getting the values at disparate points in time,
        # we cannot necessarily do a pure comparison.
        # So, let's get the absolute value of the difference and ensure
        # that it's within 20 percent of each value to give us enough of
        # a "fudge" factor to declare "close enough". Don't return a failure
        # just print a debug message and move on.
        diffval = abs(int(cpu_frequency_nodeinfo) - int(cpu_frequency_os))
        if float(diffval) / float(cpu_frequency_nodeinfo) > 0.20 or \
           float(diffval) / float(cpu_frequency_os) > 0.20:
            logging.debug("Virsh nodeinfo output didn't match CPU "
                          "frequency within 20 percent")

        # Get CPU topolopy from virsh capabilities xml
        cpu_topolopy = capability_xml.CapabilityXML()['cpu_topolopy']
        logging.debug("Cpu topolopy in virsh capabilities output: %s",
                      cpu_topolopy)

        # Check CPU socket(s)
        cpu_sockets_nodeinfo = int(
            _check_nodeinfo(nodeinfo_output, 'CPU socket(s)', 3))
        cmd = "grep 'physical id' /proc/cpuinfo | uniq | sort | uniq |wc -l"
        cmd_result = utils.run(cmd, ignore_status=True)
        cpu_NUMA_nodeinfo = _check_nodeinfo(nodeinfo_output, 'NUMA cell(s)', 3)
        cpu_sockets_os = int(
            cmd_result.stdout.strip()) / int(cpu_NUMA_nodeinfo)
        if cpu_sockets_os != cpu_sockets_nodeinfo:
            raise error.TestFail("Virsh nodeinfo output didn't match CPU "
                                 "socket(s) of host OS")
        if cpu_sockets_nodeinfo != int(cpu_topolopy['sockets']):
            raise error.TestFail("Virsh nodeinfo output didn't match CPU "
                                 "socket(s) of virsh capabilities output")

        # Check Core(s) per socket
        cores_per_socket_nodeinfo = _check_nodeinfo(nodeinfo_output,
                                                    'Core(s) per socket', 4)
        cmd = "grep 'cpu cores' /proc/cpuinfo | head -n1 | awk '{print $4}'"
        cmd_result = utils.run(cmd, ignore_status=True)
        cores_per_socket_os = cmd_result.stdout.strip()
        if not re.match(cores_per_socket_nodeinfo, cores_per_socket_os):
            raise error.TestFail("Virsh nodeinfo output didn't match Core(s) "
                                 "per socket of host OS")
        if cores_per_socket_nodeinfo != cpu_topolopy['cores']:
            raise error.TestFail("Virsh nodeinfo output didn't match Core(s) "
                                 "per socket of virsh capabilities output")

        # Ckeck Thread(s) per core
        threads_per_core_nodeinfo = _check_nodeinfo(nodeinfo_output,
                                                    'Thread(s) per core', 4)
        if threads_per_core_nodeinfo != cpu_topolopy['threads']:
            raise error.TestFail(
                "Virsh nodeinfo output didn't match Thread(s) "
                "per core of virsh capabilities output")

        # Check Memory size
        memory_size_nodeinfo = int(
            _check_nodeinfo(nodeinfo_output, 'Memory size', 3))
        memory_size_os = utils_memory.memtotal()
        if memory_size_nodeinfo != memory_size_os:
            raise error.TestFail("Virsh nodeinfo output didn't match "
                                 "Memory size")
    def compare_capabilities_xml(source):
        cap_xml = capability_xml.CapabilityXML()
        cap_xml.xml = source

        # Check that host has a non-empty UUID tag.
        xml_uuid = cap_xml.uuid
        logging.debug("Host UUID (capabilities_xml): %s", xml_uuid)
        if xml_uuid == "":
            test.fail("The host uuid in capabilities_xml is none!")

        # Check the host arch.
        xml_arch = cap_xml.arch
        logging.debug("Host arch (capabilities_xml): %s", xml_arch)
        exp_arch = process.run("arch", shell=True).stdout.strip()
        if cmp(xml_arch, exp_arch) != 0:
            test.fail("The host arch in capabilities_xml is "
                      "expected to be %s, but get %s" %
                      (exp_arch, xml_arch))

        # Check the host cpu count.
        xml_cpu_count = cap_xml.cpu_count
        logging.debug("Host cpus count (capabilities_xml): %s", xml_cpu_count)
        search_str = 'processor'
        if platform.machine() == 's390x':
            search_str = 'cpu number'
        cmd = "grep '%s' /proc/cpuinfo | wc -l" % search_str
        exp_cpu_count = int(process.run(cmd, shell=True).stdout.strip())
        if xml_cpu_count != exp_cpu_count:
            test.fail("Host cpus count is expected to be %s, "
                      "but get %s" %
                      (exp_cpu_count, xml_cpu_count))

        # Check the arch of guest supported.
        guest_capa = cap_xml.get_guest_capabilities()
        logging.debug(guest_capa)

        # libvirt track wordsize in hardcode struct virArchData
        wordsize = {}
        wordsize['64'] = ['alpha', 'aarch64', 'ia64', 'mips64', 'mips64el',
                          'parisc64', 'ppc64', 'ppc64le', 's390x', 'sh4eb',
                          'sparc64', 'x86_64']
        wordsize['32'] = ['armv6l', 'armv7l', 'armv7b', 'cris', 'i686', 'lm32',
                          'm68k', 'microblaze', 'microblazeel', 'mips',
                          'mipsel', 'openrisc', 'parisc', 'ppc', 'ppcle',
                          'ppcemb', 's390', 'sh4', 'sparc', 'unicore32',
                          'xtensa', 'xtensaeb']
        uri_type = process.run("virsh uri", shell=True).stdout.split(':')[0]
        domain_type = "domain_" + uri_type
        for arch_dict in list(itervalues(guest_capa)):
            for arch, val_dict in list(iteritems(arch_dict)):
                # Check wordsize
                if arch not in wordsize[val_dict['wordsize']]:
                    test.fail("'%s' wordsize '%s' in "
                              "capabilities_xml not expected" %
                              (arch, val_dict['wordsize']))
                # Check the type of hypervisor
                if domain_type not in list(val_dict.keys()):
                    test.fail("domain type '%s' is not matched"
                              " under arch '%s' in "
                              "capabilities_xml" %
                              (uri_type, arch))

        # check power management support.
        try:
            pm_cmd = path.find_command('pm-is-supported')
            pm_cap_map = {'suspend': 'suspend_mem',
                          'hibernate': 'suspend_disk',
                          'suspend-hybrid': 'suspend_hybrid'}
            exp_pms = []
            for opt in pm_cap_map:
                cmd = '%s --%s' % (pm_cmd, opt)
                res = process.run(cmd, ignore_status=True, shell=True)
                if res.exit_status == 0:
                    exp_pms.append(pm_cap_map[opt])
            pms = cap_xml.power_management_list
            if set(exp_pms) != set(pms):
                test.fail("Expected supported PMs are %s, got %s "
                          "instead." % (exp_pms, pms))
        except path.CmdNotFoundError:
            logging.debug('Power management checking is skipped, since command'
                          ' pm-is-supported is not found.')
Beispiel #16
0
    def output_check(nodeinfo_output):
        # Check CPU model
        cpu_model_nodeinfo = _check_nodeinfo(nodeinfo_output, "CPU model", 3)
        cpu_model_os = utils.get_current_kernel_arch()
        if not re.match(cpu_model_nodeinfo, cpu_model_os):
            raise error.TestFail(
                "Virsh nodeinfo output didn't match CPU model")

        # Check number of CPUs, nodeinfo CPUs represent online threads in the
        # system, check all online cpus in sysfs
        cpus_nodeinfo = _check_nodeinfo(nodeinfo_output, "CPU(s)", 2)
        cmd = "cat /sys/devices/system/cpu/cpu*/online | grep 1 | wc -l"
        cpus_online = utils.run(cmd, ignore_status=True).stdout.strip()
        cmd = "cat /sys/devices/system/cpu/cpu*/online | wc -l"
        cpus_total = utils.run(cmd, ignore_status=True).stdout.strip()
        if not os.path.exists('/sys/devices/system/cpu/cpu0/online'):
            cpus_online = str(int(cpus_online) + 1)
            cpus_total = str(int(cpus_total) + 1)

        logging.debug("host online cpus are %s", cpus_online)
        logging.debug("host total cpus are %s", cpus_total)

        if cpus_nodeinfo != cpus_online:
            if 'power' in cpu_util.get_cpu_arch():
                if cpus_nodeinfo != cpus_total:
                    raise error.TestFail("Virsh nodeinfo output of CPU(s) on"
                                         " ppc did not match all threads in "
                                         "the system")
            else:
                raise error.TestFail("Virsh nodeinfo output didn't match "
                                     "number of CPU(s)")

        # Check CPU frequency, frequency is under clock for ppc
        cpu_frequency_nodeinfo = _check_nodeinfo(
            nodeinfo_output, 'CPU frequency', 3)
        cmd = ("cat /proc/cpuinfo | grep -E 'cpu MHz|clock' | head -n1 | "
               "awk -F: '{print $2}' | awk -F. '{print $1}'")
        cmd_result = utils.run(cmd, ignore_status=True)
        cpu_frequency_os = cmd_result.stdout.strip()
        logging.debug("cpu_frequency_nodeinfo=%s cpu_frequency_os=%s",
                      cpu_frequency_nodeinfo, cpu_frequency_os)
        #
        # Matching CPU Frequency is not an exact science in todays modern
        # processors and OS's. CPU's can have their execution speed varied
        # based on current workload in order to save energy and keep cool.
        # Thus since we're getting the values at disparate points in time,
        # we cannot necessarily do a pure comparison.
        # So, let's get the absolute value of the difference and ensure
        # that it's within 20 percent of each value to give us enough of
        # a "fudge" factor to declare "close enough". Don't return a failure
        # just print a debug message and move on.
        diffval = abs(int(cpu_frequency_nodeinfo) - int(cpu_frequency_os))
        if float(diffval) / float(cpu_frequency_nodeinfo) > 0.20 or \
           float(diffval) / float(cpu_frequency_os) > 0.20:
            logging.debug("Virsh nodeinfo output didn't match CPU "
                          "frequency within 20 percent")

        # Get CPU topology from virsh capabilities xml
        cpu_topology = capability_xml.CapabilityXML()['cpu_topology']
        logging.debug("Cpu topology in virsh capabilities output: %s",
                      cpu_topology)

        # Check CPU socket(s)
        cpu_sockets_nodeinfo = int(
            _check_nodeinfo(nodeinfo_output, 'CPU socket(s)', 3))
        # CPU socket(s) in virsh nodeinfo is Total sockets in each node, not
        # total sockets in the system, so get total sockets in one node and
        # check with it
        node_info = utils_misc.NumaInfo()
        node_online_list = node_info.get_online_nodes()
        cmd = "cat /sys/devices/system/node/node%s" % node_online_list[0]
        cmd += "/cpu*/topology/physical_package_id | uniq |wc -l"
        cmd_result = utils.run(cmd, ignore_status=True)
        total_sockets_in_node = int(cmd_result.stdout.strip())
        if total_sockets_in_node != cpu_sockets_nodeinfo:
            raise error.TestFail("Virsh nodeinfo output didn't match CPU "
                                 "socket(s) of host OS")
        if cpu_sockets_nodeinfo != int(cpu_topology['sockets']):
            raise error.TestFail("Virsh nodeinfo output didn't match CPU "
                                 "socket(s) of virsh capabilities output")

        # Check Core(s) per socket
        cores_per_socket_nodeinfo = _check_nodeinfo(
            nodeinfo_output, 'Core(s) per socket', 4)
        cmd = "lscpu | grep 'Core(s) per socket' | head -n1 | awk '{print $4}'"
        cmd_result = utils.run(cmd, ignore_status=True)
        cores_per_socket_os = cmd_result.stdout.strip()
        if not re.match(cores_per_socket_nodeinfo, cores_per_socket_os):
            raise error.TestFail("Virsh nodeinfo output didn't match Core(s) "
                                 "per socket of host OS")
        if cores_per_socket_nodeinfo != cpu_topology['cores']:
            raise error.TestFail("Virsh nodeinfo output didn't match Core(s) "
                                 "per socket of virsh capabilities output")

        # Ckeck Thread(s) per core
        threads_per_core_nodeinfo = _check_nodeinfo(nodeinfo_output,
                                                    'Thread(s) per core', 4)
        if threads_per_core_nodeinfo != cpu_topology['threads']:
            raise error.TestFail("Virsh nodeinfo output didn't match Thread(s) "
                                 "per core of virsh capabilities output")

        # Check Memory size
        memory_size_nodeinfo = int(
            _check_nodeinfo(nodeinfo_output, 'Memory size', 3))
        memory_size_os = 0
        for i in node_online_list:
            node_memory = node_info.read_from_node_meminfo(i, 'MemTotal')
            memory_size_os += int(node_memory)
        logging.debug('The host total memory from nodes is %s', memory_size_os)

        if memory_size_nodeinfo != memory_size_os:
            raise error.TestFail("Virsh nodeinfo output didn't match "
                                 "Memory size")
def run(test, params, env):
    """
    Test command: virsh cpu-compare.

    Compare host CPU with a CPU described by an XML file.
    1.Get all parameters from configuration.
    2.Prepare temp file saves of CPU information.
    3.Perform virsh cpu-compare operation.
    4.Confirm the result.
    """
    def get_cpu_xml(target, mode):
        """
        Get CPU information and put it into a file.

        :param target: Test target, host or guest's cpu description.
        :param mode: Test mode, decides file's detail.
        """
        libvirtxml = vm_xml.VMCPUXML()
        if target == "host":
            cpu_feature_list = host_cpu_xml.get_feature_list()
            if cpu_match:
                libvirtxml['match'] = cpu_match
            libvirtxml['vendor'] = host_cpu_xml['vendor']
            libvirtxml['model'] = host_cpu_xml['model']
            for cpu_feature in cpu_feature_list:
                feature_name = cpu_feature.get('name')
                libvirtxml.add_feature(feature_name, "require")
        else:
            try:
                libvirtxml = vmxml['cpu']
            except LibvirtXMLNotFoundError:
                test.cancel("No <cpu> element in domain XML")

        if mode == "modify":
            if modify_target == "mode":
                libvirtxml['mode'] = modify_value
                # Power CPU model names are in lowercases for compatibility mode
                if "ppc" in platform.machine(
                ) and modify_value == "host-model":
                    libvirtxml['model'] = libvirtxml['model'].lower()
            elif modify_target == "model":
                libvirtxml['model'] = modify_value
            elif modify_target == "vendor":
                libvirtxml['vendor'] = modify_value
            elif modify_target == "feature_name":
                if modify_value == "REPEAT":
                    feature_name = libvirtxml.get_feature_name(feature_num)
                    feature_policy = libvirtxml.get_feature_policy(0)
                    libvirtxml.add_feature(feature_name, feature_policy)
                else:
                    libvirtxml.set_feature(feature_num, name=modify_value)
            elif modify_target == "feature_policy":
                libvirtxml.set_feature(feature_num, policy=modify_value)
        elif mode == "delete":
            libvirtxml.remove_feature(feature_num)
        else:
            pass
        return libvirtxml

    # Get all parameters.
    ref = params.get("cpu_compare_ref")
    mode = params.get("cpu_compare_mode", "")
    modify_target = params.get("cpu_compare_modify_target", "")
    modify_value = params.get("cpu_compare_modify_value", "")
    feature_num = int(params.get("cpu_compare_feature_num", -1))
    target = params.get("cpu_compare_target", "host")
    extra = params.get("cpu_compare_extra", "")
    file_name = params.get("cpu_compare_file_name", "cpu.xml")
    cpu_match = params.get("cpu_compare_cpu_match", "")
    modify_invalid = "yes" == params.get("cpu_compare_modify_invalid", "no")
    check_vm_ps = "yes" == params.get("check_vm_ps", "no")
    check_vm_ps_value = params.get("check_vm_ps_value")
    tmp_file = os.path.join(test.tmpdir, file_name)

    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    if vm.is_alive():
        vm.destroy()
    vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
    vmxml_backup = vmxml.copy()
    host_cpu_xml = capability_xml.CapabilityXML()

    try:
        # Add cpu element if it not present in VM XML
        if not vmxml.get('cpu'):
            new_cpu = vm_xml.VMCPUXML()
            new_cpu['model'] = host_cpu_xml['model']
            vmxml['cpu'] = new_cpu
        # Add cpu model element if it not present in VM XML
        if not vmxml['cpu'].get('model'):
            vmxml['cpu']['model'] = host_cpu_xml['model']
        # Prepare VM cpu feature if necessary
        if modify_target in ['feature_name', 'feature_policy', 'delete']:
            if len(vmxml['cpu'].get_feature_list()
                   ) == 0 and host_cpu_xml.get_feature_list():
                # Add a host feature to VM for testing
                vmxml_cpu = vmxml['cpu'].copy()
                vmxml_cpu.add_feature(host_cpu_xml.get_feature_name('-1'))
                vmxml['cpu'] = vmxml_cpu
                vmxml.sync()
            else:
                test.cancel("No features present in host " "capability XML")

        # Prepare temp compare file.
        cpu_compare_xml = get_cpu_xml(target, mode)
        with open(tmp_file, 'w+b') as cpu_compare_xml_f:
            if mode == "clear":
                cpu_compare_xml_f.truncate(0)
            else:
                cpu_compare_xml.xmltreefile.write(cpu_compare_xml_f)
            cpu_compare_xml_f.seek(0)
            logging.debug("CPU description XML:\n%s", cpu_compare_xml_f.read())

        # Expected possible result msg patterns and exit status
        msg_patterns = ""
        if not mode:
            if target == "host":
                msg_patterns = "identical"
            else:
                # As we don't know the <cpu> element in domain,
                # so just check command exit status
                pass
        elif mode == "delete":
            if cpu_match == "strict":
                msg_patterns = "incompatible"
            else:
                msg_patterns = "superset"
        elif mode == "modify":
            if modify_target == "mode":
                if modify_invalid:
                    msg_patterns = "Invalid mode"
            elif modify_target == "model":
                if modify_invalid:
                    msg_patterns = "Unknown CPU model"
            elif modify_target == "vendor":
                if modify_invalid:
                    msg_patterns = "incompatible"
            elif modify_target == "feature_name":
                if modify_value == "REPEAT":
                    msg_patterns = "more than once"
                elif modify_value == "ia64":
                    msg_patterns = "incompatible"
                elif modify_invalid:
                    msg_patterns = "Unknown"
            elif modify_target == "feature_policy":
                if modify_value == "forbid":
                    msg_patterns = "incompatible"
                else:
                    msg_patterns = "identical"
            else:
                test.cancel("Unsupport modify target %s in this "
                            "test" % mode)
        elif mode == "clear":
            msg_patterns = "empty"
        elif mode == "invalid_test":
            msg_patterns = ""
        else:
            test.cancel("Unsupport modify mode %s in this " "test" % mode)
        status_error = params.get("status_error", "")
        if status_error == "yes":
            expected_status = 1
        elif status_error == "no":
            expected_status = 0
        else:
            # If exit status is not specified in cfg, using msg_patterns
            # to get expect exit status
            if msg_patterns in ['identical', 'superset']:
                expected_status = 0
            else:
                expected_status = 1
            # Default guest cpu compare should pass
            if not mode and target == "guest":
                expected_status = 0

        if ref == "file":
            ref = tmp_file
        ref = "%s %s" % (ref, extra)

        # Perform virsh cpu-compare operation.
        result = virsh.cpu_compare(ref, ignore_status=True, debug=True)

        if os.path.exists(tmp_file):
            os.remove(tmp_file)
        # Check result
        logging.debug("Expect command exit status: %s", expected_status)
        if result.exit_status != expected_status:
            test.fail("Exit status %s is not expected" % result.exit_status)
        if msg_patterns:
            logging.debug("Expect key word in comand output: %s", msg_patterns)
            if result.stdout.strip():
                output = result.stdout.strip()
            else:
                output = result.stderr.strip()
            if not output.count(msg_patterns):
                test.fail("Not find expect key word in command output")

        # Check VM for cpu 'mode' related cases
        if check_vm_ps:
            vmxml['cpu'] = cpu_compare_xml
            vmxml.sync()
            virsh.start(vm_name, ignore_status=True, debug=True)
            vm_pid = vm.get_pid()
            if vm_pid is None:
                test.error("Could not get VM pid")
            with open("/proc/%d/cmdline" % vm_pid) as vm_cmdline_file:
                vm_cmdline = vm_cmdline_file.read()
            vm_features_str = ""
            # cmdline file is always separated by NUL characters('\x00')
            for item in vm_cmdline.split('\x00-'):
                if item.count('cpu'):
                    vm_features_str = item
            logging.debug("VM cpu device: %s", vm_features_str)
            vm_features = []
            for f in vm_features_str.split(','):
                if f.startswith('+'):
                    vm_features.append(f[1:])
            host_features = []
            if check_vm_ps_value == 'CAPABILITY':
                for feature in host_cpu_xml.get_feature_list():
                    host_features.append(feature.get('name'))
            else:
                host_features.append(check_vm_ps_value)
            for feature in vm_features:
                if feature not in host_features:
                    test.fail("Not find %s in host capability" % feature)
    finally:
        vmxml_backup.sync()
Beispiel #18
0
    def output_check(nodeinfo_output):
        # Check CPU model
        cpu_model_nodeinfo = _check_nodeinfo(nodeinfo_output, "CPU model", 3)
        cpu_arch = platform.machine()
        if not re.match(cpu_model_nodeinfo, cpu_arch):
            test.fail(
                "Virsh nodeinfo output didn't match CPU model")

        # Check number of CPUs, nodeinfo CPUs represent online threads in the
        # system, check all online cpus in sysfs
        cpus_nodeinfo = _check_nodeinfo(nodeinfo_output, "CPU(s)", 2)
        cmd = "cat /sys/devices/system/cpu/cpu*/online | grep 1 | wc -l"
        cpus_online = process.run(cmd, ignore_status=True,
                                  shell=True).stdout_text.strip()

        cmd = "cat /sys/devices/system/cpu/cpu*/online | wc -l"
        cpus_total = process.run(cmd, ignore_status=True,
                                 shell=True).stdout_text.strip()

        if not os.path.exists('/sys/devices/system/cpu/cpu0/online'):
            cpus_online = str(int(cpus_online) + 1)
            cpus_total = str(int(cpus_total) + 1)

        logging.debug("host online cpus are %s", cpus_online)
        logging.debug("host total cpus are %s", cpus_total)

        if cpus_nodeinfo != cpus_online:
            if 'ppc' in cpu_arch:
                if cpus_nodeinfo != cpus_total:
                    test.fail("Virsh nodeinfo output of CPU(s) on"
                              " ppc did not match all threads in "
                              "the system")
            else:
                test.fail("Virsh nodeinfo output didn't match "
                          "number of CPU(s)")

        # Check CPU frequency, frequency is under clock for ppc
        cpu_frequency_nodeinfo = _check_nodeinfo(
            nodeinfo_output, 'CPU frequency', 3)
        cmd = ("cat /proc/cpuinfo | grep -E 'cpu MHz|clock|BogoMIPS' | "
               "head -n1 | awk -F: '{print $2}' | awk -F. '{print $1}'")
        cmd_result = process.run(cmd, ignore_status=True, shell=True)
        cpu_frequency_os = cmd_result.stdout_text.strip()
        logging.debug("cpu_frequency_nodeinfo=%s cpu_frequency_os=%s",
                      cpu_frequency_nodeinfo, cpu_frequency_os)
        #
        # Matching CPU Frequency is not an exact science in today's modern
        # processors and OS's. CPU's can have their execution speed varied
        # based on current workload in order to save energy and keep cool.
        # Thus since we're getting the values at disparate points in time,
        # we cannot necessarily do a pure comparison.
        # So, let's get the absolute value of the difference and ensure
        # that it's within 20 percent of each value to give us enough of
        # a "fudge" factor to declare "close enough". Don't return a failure
        # just print a debug message and move on.
        diffval = abs(int(cpu_frequency_nodeinfo) - int(cpu_frequency_os))
        if (float(diffval) / float(cpu_frequency_nodeinfo) > 0.20 or
                float(diffval) / float(cpu_frequency_os) > 0.20):
            logging.debug("Virsh nodeinfo output didn't match CPU "
                          "frequency within 20 percent")

        # Get CPU topology from virsh capabilities xml
        cpu_topology = capability_xml.CapabilityXML()['cpu_topology']
        logging.debug("Cpu topology in virsh capabilities output: %s",
                      cpu_topology)

        # Check CPU socket(s)
        cpu_sockets_nodeinfo = int(
            _check_nodeinfo(nodeinfo_output, 'CPU socket(s)', 3))
        # CPU socket(s) in virsh nodeinfo is Total sockets in each node, not
        # total sockets in the system, so get total sockets in one node and
        # check with it
        node_info = utils_misc.NumaInfo()
        node_online_list = node_info.get_online_nodes()
        cmd = "cat /sys/devices/system/node/node%s" % node_online_list[0]
        cmd += "/cpu*/topology/physical_package_id | uniq |wc -l"
        cmd_result = process.run(cmd, ignore_status=True, shell=True)
        total_sockets_in_node = int(cmd_result.stdout_text.strip())
        if total_sockets_in_node != cpu_sockets_nodeinfo:
            test.fail("Virsh nodeinfo output didn't match CPU "
                      "socket(s) of host OS")
        if cpu_sockets_nodeinfo != int(cpu_topology['sockets']):
            test.fail("Virsh nodeinfo output didn't match CPU "
                      "socket(s) of virsh capabilities output")

        # Check Core(s) per socket
        cores_per_socket_nodeinfo = _check_nodeinfo(
            nodeinfo_output, 'Core(s) per socket', 4)
        cmd = "lscpu | grep 'Core(s) per socket' | head -n1 | awk '{print $4}'"
        cmd_result = process.run(cmd, ignore_status=True, shell=True)
        cores_per_socket_os = cmd_result.stdout_text.strip()
        spec_numa = False

        numa_cells_nodeinfo = _check_nodeinfo(
            nodeinfo_output, 'NUMA cell(s)', 3)
        cmd = 'cat /proc/cpuinfo |grep "physical id"|sort |uniq|wc -l'
        logging.debug("cpu_sockets_nodeinfo=%d", cpu_sockets_nodeinfo)
        logging.debug("numa_cells_nodeinfo=%d", numa_cells_nodeinfo)

        cmd_ret = process.run(cmd, ignore_status=True, shell=True)
        if int(cmd_ret.stdout_text.strip()) != int(numa_cells_nodeinfo) * int(cpu_sockets_nodeinfo):
            test.fail("Command {} output {} isn't equal to "
                      "NUMA cell(s) {} * CPU sockets {}".format(cmd,
                                                                cmd_ret.stdout_text.strip(),
                                                                numa_cells_nodeinfo,
                                                                cpu_sockets_nodeinfo))
        if not re.match(cores_per_socket_nodeinfo, cores_per_socket_os):
            # for spec NUMA arch, the output of nodeinfo is in a spec format
            cpus_os = cpu.get_cpu_info().get("CPU(s)")

            if (re.match(cores_per_socket_nodeinfo, cpus_os) and
                    re.match(numa_cells_nodeinfo, "1")):
                spec_numa = True
            else:
                test.fail("Virsh nodeinfo output didn't match "
                          "CPU(s) or Core(s) per socket of host OS")

        if cores_per_socket_nodeinfo != cpu_topology['cores']:
            test.fail("Virsh nodeinfo output didn't match Core(s) "
                      "per socket of virsh capabilities output")
        # Check Thread(s) per core
        threads_per_core_nodeinfo = _check_nodeinfo(nodeinfo_output,
                                                    'Thread(s) per core', 4)
        if not spec_numa:
            if threads_per_core_nodeinfo != cpu_topology['threads']:
                test.fail("Virsh nodeinfo output didn't match"
                          "Thread(s) per core of virsh"
                          "capabilities output")
        else:
            if threads_per_core_nodeinfo != "1":
                test.fail("Virsh nodeinfo output didn't match"
                          "Thread(s) per core of virsh"
                          "capabilities output")
        # Check Memory size
        memory_size_nodeinfo = int(
            _check_nodeinfo(nodeinfo_output, 'Memory size', 3))
        memory_size_os = 0
        if libvirt_version.version_compare(2, 0, 0):
            for i in node_online_list:
                node_memory = node_info.read_from_node_meminfo(i, 'MemTotal')
                memory_size_os += int(node_memory)
        else:
            memory_size_os = utils_memory.memtotal()
        logging.debug('The host total memory from nodes is %s', memory_size_os)

        if memory_size_nodeinfo != memory_size_os:
            test.fail("Virsh nodeinfo output didn't match "
                      "Memory size")
Beispiel #19
0
def run(test, params, env):
    """
    Test libvirtd_config parameter in /etc/sysconfig/libvirtd.

    1) Change libvirtd_config in sysconfig;
    2) Change host_uuid in newly defined libvirtd.conf file;
    3) Restart libvirt daemon;
    4) Check if libvirtd successfully started;
    5) Check if host_uuid updated accordingly;
    """
    def get_init_name():
        """
        Internal function to determine what executable is PID 1,
        :return: executable name for PID 1, aka init
        """
        fp = open('/proc/1/comm')
        name = fp.read().strip()
        fp.close()
        return name

    libvirtd_config = params.get('libvirtd_config', 'not_set')
    expected_result = params.get('expected_result', 'success')

    if get_init_name() == 'systemd':
        logging.info('Init process is systemd, '
                     'LIBVIRTD_CONFIG should not working.')
        expected_result = 'unchanged'

    sysconfig = utils_config.LibvirtdSysConfig()
    libvirtd = utils_libvirtd.Libvirtd()
    config_path = ""
    check_uuid = '13371337-1337-1337-1337-133713371337'
    try:
        if libvirtd_config == 'not_set':
            del sysconfig.LIBVIRTD_CONFIG
        elif libvirtd_config == 'exist_file':
            config_path = os.path.join(data_dir.get_tmp_dir(), 'test.conf')
            open(config_path, 'a').close()

            config = utils_config.LibvirtdConfig(config_path)
            config.host_uuid = check_uuid

            sysconfig.LIBVIRTD_CONFIG = config_path
        else:
            sysconfig.LIBVIRTD_CONFIG = libvirtd_config

        if not libvirtd.restart():
            if expected_result != 'unbootable':
                raise error.TestFail('Libvirtd is expected to be started '
                                     'with LIBVIRTD_CONFIG = '
                                     '%s' % sysconfig.LIBVIRTD_CONFIG)
        if expected_result == 'unbootable':
            raise error.TestFail('Libvirtd is not expected to be started '
                                 'with LIBVIRTD_CONFIG = '
                                 '%s' % sysconfig.LIBVIRTD_CONFIG)
        cur_uuid = capability_xml.CapabilityXML()['uuid']
        if cur_uuid == check_uuid:
            if expected_result == 'unchange':
                raise error.TestFail('Expected host UUID is not changed, '
                                     'but got %s' % cur_uuid)
        else:
            if expected_result == 'change':
                raise error.TestFail('Expected host UUID is %s, but got %s' %
                                     (check_uuid, cur_uuid))

    finally:
        if libvirtd_config == 'exist_file':
            config.restore()
            if os.path.isfile(config_path):
                os.remove(config_path)
        sysconfig.restore()
        libvirtd.restart()
Beispiel #20
0
def run(test, params, env):
    """
    Test the command virsh maxvcpus

    (1) Call virsh maxvcpus
    (2) Call virsh -c remote_uri maxvcpus
    (3) Call virsh maxvcpus with an unexpected option
    """

    # get the params from subtests.
    # params for general.
    option = params.get("virsh_maxvcpus_options")
    status_error = params.get("status_error")
    connect_arg = params.get("connect_arg", "")

    # params for transport connect.
    local_ip = params.get("local_ip", "ENTER.YOUR.LOCAL.IP")
    local_pwd = params.get("local_pwd", "ENTER.YOUR.LOCAL.ROOT.PASSWORD")
    server_ip = params.get("remote_ip", local_ip)
    server_pwd = params.get("remote_pwd", local_pwd)
    transport_type = params.get("connect_transport_type", "local")
    transport = params.get("connect_transport", "ssh")

    # check the config
    if (connect_arg == "transport" and transport_type == "remote"
            and local_ip.count("ENTER")):
        raise exceptions.TestSkipError("Parameter local_ip is not configured "
                                       "in remote test.")
    if (connect_arg == "transport" and transport_type == "remote"
            and local_pwd.count("ENTER")):
        raise exceptions.TestSkipError("Parameter local_pwd is not configured "
                                       "in remote test.")

    if connect_arg == "transport":
        canonical_uri_type = virsh.driver()

        if transport == "ssh":
            ssh_connection = utils_conn.SSHConnection(server_ip=server_ip,
                                                      server_pwd=server_pwd,
                                                      client_ip=local_ip,
                                                      client_pwd=local_pwd)
            try:
                ssh_connection.conn_check()
            except utils_conn.ConnectionError:
                ssh_connection.conn_setup()
                ssh_connection.conn_check()

            connect_uri = libvirt_vm.get_uri_with_transport(
                uri_type=canonical_uri_type,
                transport=transport,
                dest_ip=server_ip)
    else:
        connect_uri = connect_arg

    if libvirt_version.version_compare(2, 3, 0):
        try:
            maxvcpus = None
            # make sure we take maxvcpus from right host, helps incase remote
            virsh_dargs = {'uri': connect_uri}
            virsh_instance = virsh.Virsh(virsh_dargs)
            try:
                capa = capability_xml.CapabilityXML(virsh_instance)
                host_arch = capa.arch
                maxvcpus = capa.get_guest_capabilities(
                )['hvm'][host_arch]['maxcpus']
            except:
                raise exceptions.TestFail("Failed to get maxvcpus from "
                                          "capabilities xml\n%s" % capa)
            if not maxvcpus:
                raise exceptions.TestFail("Failed to get guest section for "
                                          "host arch: %s from capabilities "
                                          "xml\n%s" % (host_arch, capa))
        except Exception, details:
            raise exceptions.TestFail(
                "Failed get the virsh instance with uri: "
                "%s\n Details: %s" % (connect_uri, details))
    def compare_capabilities_xml(source):
        cap_xml = capability_xml.CapabilityXML()
        cap_xml.xml = source

        # Check that host has a non-empty UUID tag.
        xml_uuid = cap_xml.uuid
        logging.debug("Host UUID (capabilities_xml): %s" % xml_uuid)
        if xml_uuid == "":
            raise error.TestFail("The host uuid in capabilities_xml is none!")

        # Check the host arch.
        xml_arch = cap_xml.arch
        logging.debug("Host arch (capabilities_xml): %s", xml_arch)
        exp_arch = utils.run("arch", ignore_status=True).stdout.strip()
        if cmp(xml_arch, exp_arch) != 0:
            raise error.TestFail("The host arch in capabilities_xml is expected"
                                 " to be %s, but get %s" % (exp_arch, xml_arch))

        # Check the host cpu count.
        xml_cpu_count = cap_xml.cpu_count
        logging.debug("Host cpus count (capabilities_xml): %s", xml_cpu_count)
        cmd = "grep processor /proc/cpuinfo | wc -l"
        exp_cpu_count = int(utils.run(cmd, ignore_status=True).stdout.strip())
        if xml_cpu_count != exp_cpu_count:
            raise error.TestFail("Host cpus count is expected to be %s, but get "
                                 "%s" % (exp_cpu_count, xml_cpu_count))

        # Check the arch of guest supported.
        xmltreefile = cap_xml.__dict_get__('xml')
        xml_os_arch_machine_map = cap_xml.os_arch_machine_map
        logging.debug(xml_os_arch_machine_map['hvm'])
        try:
            img = utils_misc.find_command("qemu-kvm")
        except ValueError:
            raise error.TestNAError("Cannot find qemu-kvm")
        cmd = img + " --cpu ? | grep qemu"
        cmd_result = utils.run(cmd, ignore_status=True)
        for guest in xmltreefile.findall('guest'):
            guest_wordsize = guest.find('arch').find('wordsize').text
            logging.debug("Arch of guest supported (capabilities_xml):%s",
                          guest_wordsize)
            if not re.search(guest_wordsize, cmd_result.stdout.strip()):
                raise error.TestFail("The capabilities_xml gives an extra arch "
                                     "of guest to support!")

        # Check the type of hypervisor.
        first_guest = xmltreefile.findall('guest')[0]
        first_domain = first_guest.find('arch').findall('domain')[0]
        guest_domain_type = first_domain.get('type')
        logging.debug("Hypervisor (capabilities_xml):%s", guest_domain_type)
        cmd_result = utils.run("virsh uri", ignore_status=True)
        if not re.search(guest_domain_type, cmd_result.stdout.strip()):
            raise error.TestFail("The capabilities_xml gives an different "
                                 "hypervisor")

        # check power management support.
        try:
            pm_cmd = os_dep.command('pm-is-supported')
            pm_cap_map = {'suspend': 'suspend_mem',
                          'hibernate': 'suspend_disk',
                          'suspend-hybrid': 'suspend_hybrid',
                          }
            exp_pms = []
            for opt in pm_cap_map:
                cmd = '%s --%s' % (pm_cmd, opt)
                res = utils.run(cmd, ignore_status=True)
                if res.exit_status == 0:
                    exp_pms.append(pm_cap_map[opt])
            pms = cap_xml.power_management_list
            if set(exp_pms) != set(pms):
                raise error.TestFail("Expected supported PMs are %s, got %s "
                                     "instead." % (exp_pms, pms))
        except ValueError:
            logging.debug('Power management checking is skipped, since command '
                          'pm-is-supported is not found.')
Beispiel #22
0
    def check_cpu(xml, cpu_match, arch):
        """
        Check the dumpxml result for --update-cpu option

        Note, function custom_cpu() hard code these features and policy,
        so after run virsh dumpxml --update-cpu:
        1. For match='minimum', all host support features will added,
           and match change to 'exact'. Since libvirt-3.0, cpu update is
           reworked, and the custom CPU with minimum match is converted
           similarly to host-model.
        2. policy='optional' features(support by host) will update to
           policy='require'
        3. policy='optional' features(unsupport by host) will update to
           policy='disable'
        4. Other policy='disable|force|forbid|require' with keep the
           original values
        """
        vmxml = vm_xml.VMXML()
        vmxml['xml'] = xml
        vmcpu_xml = vmxml['cpu']
        check_pass = True
        require_count = 0
        expect_require_features = 0
        if arch == 's390x':
            # on s390x custom is left as-is
            pass
        else:
            cpu_feature_list = vmcpu_xml.get_feature_list()
            host_capa = capability_xml.CapabilityXML()
            for i in range(len(cpu_feature_list)):
                f_name = vmcpu_xml.get_feature_name(i)
                f_policy = vmcpu_xml.get_feature_policy(i)
                err_msg = "Policy of '%s' is not expected: %s" % (f_name,
                                                                  f_policy)
                expect_policy = "disable"
                if f_name in ["xtpr", "vme", "ia64"]:
                    # Check if feature is support on the host
                    # Since libvirt3.9, libvirt query qemu/kvm to get one feature support or not
                    if libvirt_version.version_compare(3, 9, 0):
                        if f_name in get_cpu_features():
                            expect_policy = "require"
                    else:
                        if host_capa.check_feature_name(f_name):
                            expect_policy = "require"
                    if f_policy != expect_policy:
                        logging.error(err_msg)
                        check_pass = False
                if f_name == "tm2":
                    if f_policy != "disable":
                        logging.error(err_msg)
                        check_pass = False
                if f_name == "est":
                    if f_policy != "force":
                        logging.error(err_msg)
                        check_pass = False
                if f_name == "vmx":
                    if f_policy != "forbid":
                        logging.error(err_msg)
                        check_pass = False
                # Count expect require features
                if expect_policy == "require":
                    expect_require_features += 1
                # Count actual require features
                if f_policy == "require":
                    require_count += 1

        # Check optional feature is changed to require/disable
        expect_model = 'Penryn'

        if cpu_match == "minimum":
            # libvirt commit 3b6be3c0 change the behavior of update-cpu
            # Check model is changed to host cpu-model given in domcapabilities
            if libvirt_version.version_compare(3, 0, 0):
                expect_model = host_capa.model
            expect_match = "exact"
            # For different host, the support require features are different,
            # so just check the actual require features greater than the
            # expect number
            if require_count < expect_require_features:
                logging.error("Found %d require features, but expect >=%s",
                              require_count, expect_require_features)
                check_pass = False
        else:
            expect_match = cpu_match
            if require_count != expect_require_features:
                logging.error("Found %d require features, but expect %s",
                              require_count, expect_require_features)
                check_pass = False

        logging.debug("Expect 'match' value is: %s", expect_match)
        match = vmcpu_xml['match']
        if match != expect_match:
            logging.error("CPU match '%s' is not expected", match)
            check_pass = False
        logging.debug("Expect 'model' value is: %s", expect_model)
        if vmcpu_xml['model'] != expect_model:
            logging.error("CPU model %s is not expected", vmcpu_xml['model'])
            check_pass = False
        return check_pass
Beispiel #23
0
def run(test, params, env):
    """
    Test host_uuid parameter in libvird.conf.

    1) Change host_uuid in libvirtd.conf;
    2) Restart libvirt daemon;
    3) Check if libvirtd successfully started;
    4) Check current host UUID by `virsh capabilities`;
    """
    def get_dmi_uuid():
        """
        Retrieve the UUID of DMI, which is usually used as libvirt daemon
        host UUID.

        :return : DMI UUID if it can be located or None if can't.
        """
        uuid_paths = [
            '/sys/devices/virtual/dmi/id/product_uuid',
            '/sys/class/dmi/id/product_uuid',
        ]
        for path in uuid_paths:
            if os.path.isfile(path):
                dmi_fp = open(path)
                try:
                    uuid = dmi_fp.readline().strip().lower()
                    return uuid
                finally:
                    dmi_fp.close()

    uuid_type = params.get("uuid_type", "lowercase")
    expected_result = params.get("expected_result", "success")
    new_uuid = params.get("new_uuid", "")

    # We are expected to get an standard UUID format on success.
    if expected_result == 'success':
        expected_uuid = str(uuid.UUID(new_uuid))

    config = utils_config.LibvirtdConfig()
    libvirtd = utils_libvirtd.Libvirtd()
    try:
        orig_uuid = capability_xml.CapabilityXML()['uuid']
        logging.debug('Original host UUID is %s' % orig_uuid)

        if uuid_type == 'not_set':
            # Remove `host_uuid` in libvirtd.conf.
            del config.host_uuid
        elif uuid_type == 'unterminated':
            # Change `host_uuid` in libvirtd.conf.
            config.set_raw('host_uuid', '"%s' % new_uuid)
        elif uuid_type == 'unquoted':
            config.set_raw('host_uuid', new_uuid)
        elif uuid_type == 'single_quoted':
            config.set_raw('host_uuid', "'%s'" % new_uuid)
        else:
            config.host_uuid = new_uuid

        # Restart libvirtd to make change valid. May raise ConfigError
        # if not succeed.
        if not libvirtd.restart():
            if expected_result != 'unbootable':
                raise error.TestFail('Libvirtd is expected to be started '
                                     'with host_uuid = %s' % config['host_uuid'])
            return

        if expected_result == 'unbootable':
            raise error.TestFail('Libvirtd is not expected to be started '
                                 'with host_uuid = %s' % config['host_uuid'])

        cur_uuid = capability_xml.CapabilityXML()['uuid']
        logging.debug('Current host UUID is %s' % cur_uuid)

        if expected_result == 'success':
            if cur_uuid != expected_uuid:
                raise error.TestFail(
                    "Host UUID doesn't changed as expected"
                    " from %s to %s, but %s" % (orig_uuid, expected_uuid,
                                                cur_uuid))
        # libvirtd should use system DMI UUID for all_digit_same or
        # not_set host_uuid.
        elif expected_result == 'dmi_uuid':
            dmi_uuid = get_dmi_uuid()
            logging.debug("DMI UUID is %s." % dmi_uuid)

            if dmi_uuid is not None and cur_uuid != dmi_uuid:
                raise error.TestFail(
                    "Host UUID doesn't changed from "
                    "%s to DMI UUID %s as expected, but %s" % (
                        orig_uuid, dmi_uuid, cur_uuid))
    finally:
        config.restore()
        if not libvirtd.is_running():
            libvirtd.start()
Beispiel #24
0
def run(test, params, env):
    """
    Test vcpu
    """
    vm_name = params.get('main_vm')
    check = params.get('check', '')
    status_error = 'yes' == params.get('status_error', 'no')
    err_msg = params.get('err_msg', '')
    guest_vcpu = params.get('guest_vcpu')
    boot_timeout = int(params.get('boot_timeout', 240))
    start_fail = 'yes' == params.get('start_fail', 'no')

    vm = env.get_vm(vm_name)
    vmxml = libvirt_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    bkxml = vmxml.copy()

    def check_onlinevcpus(vm, cpu_num):
        """

        Check whether all vcpus are online as expected.

        :param vm: the exact VM need to check
        :param cpu_num: the num of online vcpus need to match
        """
        if not utils_misc.wait_for(
                lambda: cpu.check_if_vm_vcpu_match(cpu_num, vm),
                timeout=120,
                step=5,
                text="wait for vcpu online"):
            test.fail('Not all vcpus are online as expected.')

    def set_iommu(vmxml, **dargs):
        """

        Add iommu device to vm.

        :param vmxml: xml of vm to be add iommu device
        :param dargs: args or the iommu device
        :return:
        """
        logging.info('Add iommu device to vm.')
        iommu_device = Iommu()
        iommu_device.model = dargs.get('model', 'intel')
        iommu_device.driver = dargs.get('driver', {
            'intremap': 'on',
            'eim': 'on'
        })
        vmxml.add_device(iommu_device)

    try:
        # Check the output of "virsh maxvcpus" for both i440fx and q35 VM
        if check == 'virsh_maxvcpus':
            report_num = params.get('report_num', '')
            logging.info('Check the output of virsh maxvcpus')
            cmd_result = virsh.maxvcpus(debug=True)
            if cmd_result.exit_status == 0 and cmd_result.stdout.strip(
            ) == report_num:
                logging.debug('Test passed as the reported max vcpu num is %s',
                              report_num)
            else:
                test.fail(
                    'Test failed as the reported max vcpu num is not as expected.'
                )

        # Check the output of "virsh capabilities" for both i440fx and q35 VM
        if check == "virsh_capabilities":
            report_num_pc_7 = params.get('report_num_pc_7', '')
            report_num_q35_73 = params.get('report_num_q35_73', '')
            report_num_q35_7_8 = params.get('report_num_q35_7_8', '')
            logging.info('Check the output of virsh capabilities')
            xmltreefile = capability_xml.CapabilityXML().xmltreefile
            machtype_vcpunum_dict = {}
            for guest in xmltreefile.findall('guest'):
                for arch in guest.findall('arch'):
                    if arch.get('name') == "x86_64":
                        for machine in arch.findall('machine'):
                            machine_text = machine.text
                            vcpunum = machine.get('maxCpus')
                            machtype_vcpunum_dict[machine_text] = vcpunum
            for key in machtype_vcpunum_dict:
                logging.info("%s : %s", key, machtype_vcpunum_dict[key])
                if key.startswith('pc-i440fx') or key.startswith(
                        'rhel') or key == 'pc':
                    if machtype_vcpunum_dict[key] != report_num_pc_7:
                        test.fail(
                            'Test failed as i440fx_max_vcpus_num in virsh_capa is wrong.'
                        )
                if key.startswith('pc-q35') or key == 'q35':
                    if key == "pc-q35-rhel7.3.0":
                        if machtype_vcpunum_dict[key] != report_num_q35_73:
                            test.fail(
                                'Test failed as q35_rhel73_max_vcpus_num in virsh_capa is wrong.'
                            )
                    else:
                        if machtype_vcpunum_dict[key] != report_num_q35_7_8:
                            test.fail(
                                'Test failed as the q35_max_vcpus_num in virsh_capa is wrong.'
                            )

        # Test i440fx VM starts with 240(positive)/241(negative) vcpus and hot-plugs vcpus to 240
        if check.startswith('i440fx_test'):
            current_vcpu = params.get('current_vcpu')
            target_vcpu = params.get('target_vcpu')
            if 'hotplug' not in check:
                vmxml.vcpu = int(guest_vcpu)
                vmxml.sync()
                if status_error:
                    if start_fail:
                        result_need_check = virsh.start(vm_name, debug=True)
                else:
                    vm.start()
                    logging.info(libvirt_xml.VMXML.new_from_dumpxml(vm_name))
                    vm.wait_for_login(timeout=boot_timeout).close()
                    check_onlinevcpus(vm, int(guest_vcpu))
            else:
                vmxml.vcpu = int(guest_vcpu)
                vmxml.current_vcpu = int(current_vcpu)
                target_vcpu = int(target_vcpu)
                vmxml.sync()
                vm.start()
                logging.info(libvirt_xml.VMXML.new_from_dumpxml(vm_name))
                vm.wait_for_login(timeout=boot_timeout).close()
                check_onlinevcpus(vm, int(current_vcpu))
                res = virsh.setvcpus(vm_name, target_vcpu, debug=True)
                libvirt.check_exit_status(res)
                check_onlinevcpus(vm, int(target_vcpu))

        # Configure a guest vcpu > 255 without iommu device for q35 VM
        if check == 'no_iommu':
            logging.info('Set vcpu to %s', guest_vcpu)
            vmxml.vcpu = int(guest_vcpu)
            result_need_check = virsh.define(vmxml.xml, debug=True)

        # Set iommu device but not set ioapci in features for q35 VM
        if check == 'with_iommu':
            logging.info('Set vcpu to %s', guest_vcpu)
            vmxml.vcpu = int(guest_vcpu)
            set_iommu(vmxml)
            result_need_check = virsh.define(vmxml.xml, debug=True)

        # Add ioapic and iommu device in xml for q35 VM
        if check.startswith('ioapic_iommu'):
            logging.info('Modify features')
            vm_features = vmxml.features
            vm_features.add_feature('apic')
            vm_features.add_feature('ioapic', 'driver', 'qemu')
            vmxml.features = vm_features
            logging.debug(vmxml.features.get_feature_list())

            logging.info('Set vcpu to %s', guest_vcpu)
            set_iommu(vmxml)

            ori_vcpu = vmxml.vcpu
            vmxml.vcpu = int(guest_vcpu)
            vmxml.current_vcpu = ori_vcpu

            if 'hotplug' not in check:
                vmxml.current_vcpu = int(guest_vcpu)

            if status_error:
                if start_fail:
                    if libvirt_version.version_compare(5, 6, 0):
                        result_need_check = virsh.define(vmxml.xml, debug=True)
                    else:
                        vmxml.sync()
                        result_need_check = virsh.start(vm_name, debug=True)

            else:
                # Login guest and check guest cpu number
                vmxml.sync()
                logging.debug(virsh.dumpxml(vm_name))
                vm.start()
                session = vm.wait_for_login(timeout=boot_timeout)
                logging.debug(session.cmd('lscpu -e'))

                # Hotplug vcpu to $guest_vcpu
                if 'hotplug' in check:
                    res = virsh.setvcpus(vm_name, guest_vcpu, debug=True)
                    libvirt.check_exit_status(res)

                # Check if vcpu(s) are online
                check_onlinevcpus(vm, int(guest_vcpu))

        # Check result if there's result to check
        if 'result_need_check' in locals():
            libvirt.check_result(result_need_check, err_msg)

    finally:
        bkxml.sync()
Beispiel #25
0
def run(test, params, env):
    """
    Test the command virsh maxvcpus

    (1) Call virsh maxvcpus
    (2) Call virsh -c remote_uri maxvcpus
    (3) Call virsh maxvcpus with an unexpected option
    """

    # get the params from subtests.
    # params for general.
    option = params.get("virsh_maxvcpus_options")
    status_error = params.get("status_error")
    connect_arg = params.get("connect_arg", "")
    capa = capability_xml.CapabilityXML()
    host_arch = capa.arch
    xmltreefile = capa.__dict_get__('xml')
    maxvcpus = None
    try:
        maxvcpus = capa.get_guest_capabilities()['hvm'][host_arch]['maxcpus']
    except:
        raise exceptions.TestFail("Failed to get maxvcpus from capabilities"
                                  " xml\n%s", xmltreefile)

    if not maxvcpus:
        raise exceptions.TestFail("Failed to get guest section for host arch: %s "
                                  "from capabilities xml\n%s", host_arch, xmltreefile)

    # params for transport connect.
    local_ip = params.get("local_ip", "ENTER.YOUR.LOCAL.IP")
    local_pwd = params.get("local_pwd", "ENTER.YOUR.LOCAL.ROOT.PASSWORD")
    server_ip = params.get("remote_ip", local_ip)
    server_pwd = params.get("remote_pwd", local_pwd)
    transport_type = params.get("connect_transport_type", "local")
    transport = params.get("connect_transport", "ssh")

    # check the config
    if (connect_arg == "transport" and
            transport_type == "remote" and
            local_ip.count("ENTER")):
        raise exceptions.TestNAError("Parameter local_ip is not configured"
                                     "in remote test.")
    if (connect_arg == "transport" and
            transport_type == "remote" and
            local_pwd.count("ENTER")):
        raise exceptions.TestNAError("Parameter local_pwd is not configured"
                                     "in remote test.")

    if connect_arg == "transport":
        canonical_uri_type = virsh.driver()

        if transport == "ssh":
            ssh_connection = utils_conn.SSHConnection(server_ip=server_ip,
                                                      server_pwd=server_pwd,
                                                      client_ip=local_ip,
                                                      client_pwd=local_pwd)
            try:
                ssh_connection.conn_check()
            except utils_conn.ConnectionError:
                ssh_connection.conn_setup()
                ssh_connection.conn_check()

            connect_uri = libvirt_vm.get_uri_with_transport(
                uri_type=canonical_uri_type,
                transport=transport, dest_ip=server_ip)
    else:
        connect_uri = connect_arg

    # Run test case
    result = virsh.maxvcpus(option, uri=connect_uri, ignore_status=True,
                            debug=True)

    maxvcpus_test = result.stdout.strip()
    status = result.exit_status

    # Check status_error
    if status_error == "yes":
        if status == 0:
            raise exceptions.TestFail("Run successed with unsupported option!")
        else:
            logging.info("Run failed with unsupported option %s " % option)
    elif status_error == "no":
        if status == 0:
            if "kqemu" in option:
                if not maxvcpus_test == '1':
                    raise exceptions.TestFail("Command output %s is not expected "
                                              "for %s " % (maxvcpus_test, option))
            elif option == 'qemu' or option == '--type qemu' or option == '':
                if not maxvcpus_test == maxvcpus:
                    raise exceptions.TestFail("Command output %s is not expected "
                                              "for %s " % (maxvcpus_test, option))
            else:
                # No check with other types
                pass
        else:
            raise exceptions.TestFail("Run command failed")