Example #1
0
def run(test, params, env):
    """
    Test bridge support from network

    1) create a linux bridge and connect a physical interface to it
    2) define nwfilter with "vdsm-no-mac-spoofing"
    3) redefine the vm with the new create bridge and filter
    4) check if guest can get public ip after vm start
    5) check if guest and host can ping each other
    6) check if guest and host can ping outside
    7) start another vm connected to the same bridge
    8) check if the 2 guests can ping each other
    """

    def create_bridge_network(br_name, net_name, inbound="{'average':'0'}", outbound="{'average':'0'}"):
        """
        Define and start the bridge type network
        """
        # check if network with the same name already exists
        output_all = virsh.net_list("--all").stdout.strip()
        if re.search(net_name, output_all):
            test.cancel("Network with the same name already exists!")
        test_xml = network_xml.NetworkXML(network_name="%s" % net_name)
        test_xml.forward = {"mode": "bridge"}
        test_xml.bridge = {"name": br_name}
        test_xml.bandwidth_inbound = eval(inbound)
        test_xml.bandwidth_outbound = eval(outbound)
        logging.debug("The network's xml is %s", test_xml)
        test_xml.create()

    def define_nwfilter(filter_name):
        """
        Define nwfilter vdsm-no-mac-spoofing with content like:
        <filter name='vdsm-no-mac-spoofing' chain='root'>
            <filterref filter='no-mac-spoofing'/>
            <filterref filter='no-arp-mac-spoofing'/>
        </filter>

        :param filter_name: the name of nwfilter
        :return: filter created or raise exception
        """
        filter_uuid = params.get("filter_uuid", "11111111-b071-6127-b4ec-111111111111")
        filter_params = {"filter_name": "vdsm-no-mac-spoofing",
                         "filter_chain": "root",
                         "filter_uuid": filter_uuid,
                         "filterref_name_1": "no-mac-spoofing",
                         "filterref_name_2": "no-arp-mac-spoofing"}
        filter_xml = libvirt.create_nwfilter_xml(filter_params).xml
        # Run command
        result = virsh.nwfilter_define(filter_xml, ignore_status=True, debug=True)
        if result.exit_status:
            test.fail("Failed to define nwfilter with %s" % filter_xml)

    def ping(src_ip, dest_ip, ping_count, timeout, session=None):
        """
        Wrap of ping

        :param src_ip: source address
        :param dest_ip: destination address
        :param ping_count: count of icmp packet
        :param timeout: timeout for the ping command
        :param session: local execution or session to execute the ping command
        :return: ping succeed or raise exception
        """
        status, output = utils_net.ping(dest=dest_ip, count=ping_count,
                                        interface=src_ip, timeout=timeout,
                                        session=session, force_ipv4=True)
        if status:
            test.fail("Fail to ping %s from %s" % (dest_ip, src_ip))

    def check_net_functions(guest_ip, ping_count, ping_timeout, guest_session, host_ip, remote_url, endpoint_ip):
        # make sure host network works well
        # host ping remote url
        ping(host_ip, remote_url, ping_count, ping_timeout)
        # host ping guest
        ping(host_ip, guest_ip, ping_count, ping_timeout)
        # guest ping host
        ping(guest_ip, host_ip, ping_count, ping_timeout, session=guest_session)
        # guest ping remote url
        ping(guest_ip, remote_url, ping_count, ping_timeout, session=guest_session)
        # guest ping endpoint
        ping(guest_ip, endpoint_ip, ping_count, ping_timeout, session=guest_session)

    # Get test params
    bridge_name = params.get("bridge_name", "test_br0")
    filter_name = params.get("filter_name", "vdsm-no-mac-spoofing")
    ping_count = params.get("ping_count", "5")
    ping_timeout = float(params.get("ping_timeout", "10"))
    iface_name = utils_net.get_net_if(state="UP")[0]
    bridge_script = NETWORK_SCRIPT + bridge_name
    iface_script = NETWORK_SCRIPT + iface_name
    iface_script_bk = os.path.join(data_dir.get_tmp_dir(), "iface-%s.bk" % iface_name)
    attach_interface = "yes" == params.get("attach_interface", "no")
    iface_model = params.get("iface_model", "virtio")
    iface_source = eval(params.get("iface_source", "{'bridge':'test_br0'}"))
    iface_type = params.get("iface_type", 'bridge')
    iface_target = params.get("iface_target", "br_target")
    iface_alias = params.get("iface_alias", None)
    hotplug = "yes" == params.get("hotplug", "no")
    iface_driver = params.get("iface_driver", None)
    reconnect_tap = "yes" == params.get("reconnect_tap", "no")
    restart_net = "yes" == params.get("restart_net", "no")
    start_vm2 = "yes" == params.get("start_vm2", "no")
    create_network = "yes" == params.get("create_network", "no")
    update_device = "yes" == params.get("update_with_diff_type", "no")
    test_qos = "yes" == params.get("test_qos", "no")
    test_net_qos = "yes" == params.get("test_net_qos", "no")
    iface_inbound = params.get("iface_bandwidth_inbound", "{'average':'0'}")
    iface_outbound = params.get("iface_bandwidth_outbound", "{'average':'0'}")
    net_inbound = params.get("net_bandwidth_inbound", "{'average':'0'}")
    net_outbound = params.get("net_bandwidth_outbound", "{'average':'0'}")

    libvirt_version.is_libvirt_feature_supported(params)
    vms = params.get("vms").split()
    vm1_name = vms[0]
    vm1 = env.get_vm(vm1_name)

    if start_vm2:
        if len(vms) <= 1:
            test.cancel("Need two VMs to test")
        else:
            vm2_name = vms[1]
        vm2 = env.get_vm(vm2_name)
        vm2_xml_bak = vm_xml.VMXML.new_from_dumpxml(vm2_name)

    # Back up the interface script
    if os.path.exists(iface_script):
        process.run("cp %s %s" % (iface_script, iface_script_bk),
                    shell=True, verbose=True)
    # Back up vm xml
    vm1_xml_bak = vm_xml.VMXML.new_from_dumpxml(vm1_name)

    # Stop NetworkManager service
    NM_service = service.Factory.create_service("NetworkManager")
    NM_status = NM_service.status()
    if not NM_status:
        NM_service.start()
    mac = utils_net.generate_mac_address_simple()

    try:
        if libvirt.check_iface(bridge_name, "exists", "--all"):
            test.cancel("The bridge %s already exist" % bridge_name)
        s, o = utils_net.create_linux_bridge_tmux(bridge_name, iface_name)
        if s:
            test.fail("Failed to create linux bridge on the host. Status: %s Stdout: %s" % (s, o))
        define_nwfilter(filter_name)
        if create_network:
            create_bridge_network(bridge_name, iface_source["network"], net_inbound, net_outbound)
        if hotplug:
            err_msgs = ("No more available PCI slots",
                        "No more available PCI addresses")
            # delete the original interface on the vm before hot-plug
            if vm1.is_alive():
                vm1.destroy()
            vmxml = vm_xml.VMXML.new_from_dumpxml(vm1_name)
            iface_xml = vmxml.get_devices('interface')[0]
            logging.debug("Delete the original interface")
            vmxml.del_device(iface_xml)
            vmxml.sync()
            vm1.start()
            # do hot-plug
            if attach_interface:
                logging.info("Try to hot-plug interface")
                options = ("%s %s --model %s --mac %s" %
                           (iface_type, iface_source['bridge'],
                            iface_model, mac))
                if test_qos:
                    inbound_value = ','.join(eval(iface_inbound).values())
                    outbound_value = ','.join(eval(iface_outbound).values())
                    options = ("%s %s --model %s --mac %s --inbound %s --outbound %s" %
                               (iface_type, iface_source['bridge'], iface_model, mac, inbound_value, outbound_value))
                ret = virsh.attach_interface(vm1_name, options,
                                             ignore_status=True)
            else:
                logging.info("Try to hot-plug device")
                target = str({'dev': iface_target})
                iface_alias = str({'name': iface_alias})
                vm_iface_source = str(iface_source)
                iface_params = {"type": iface_type, "source": vm_iface_source, "filter": filter_name, "mac": mac,
                                'alias': iface_alias, 'target': target, 'model': iface_model,
                                'driver': iface_driver}
                attach_xml = interface.Interface(iface_params['type'])
                attach_xml.xml = libvirt.modify_vm_iface(vm1_name, 'get_xml', iface_params)
                ret = virsh.attach_device(vm1_name, attach_xml.xml, ignore_status=True, debug=True)
            if ret.exit_status:
                if any([msg in ret.stderr for msg in err_msgs]):
                    test.error("No more pci slots, can't attach more devices")
                else:
                    test.fail("Failed to attach-interface: %s" % ret.stderr.strip())
            else:
                logging.debug("Hot-plug interface or device pass")
                if update_device:
                    # As the interface type will change to actual type "bridge" in live xml, we need to ensure
                    # the update with original "network" type will not fail.
                    # Try to delete the nwfilter with original type in iface_params
                    update_xml = interface.Interface(iface_type)
                    iface_params_update = {"del_filter": "yes", "type": "network", "source": vm_iface_source}
                    update_xml.xml = libvirt.modify_vm_iface(vm1_name, 'get_xml', iface_params_update)
                    ret = virsh.update_device(vm1_name, update_xml.xml, ignore_status=True, debug=True)
                    libvirt.check_exit_status(ret)

        else:
            vm_iface_source = str(iface_source)
            vm1_iface_params = {"type": iface_type, "source": vm_iface_source, "filter": filter_name,
                                "mac": mac, 'driver': iface_driver, "iface_model": iface_model,
                                "inbound": iface_inbound, "outbound": iface_outbound}
            libvirt.modify_vm_iface(vm1_name, "update_iface", vm1_iface_params)

            if vm1.is_alive():
                vm1.destroy()

            vm1.start()
        # apply ip address as it may not be initialized
        session1 = session2 = None
        session1 = vm1.wait_for_serial_login()
        utils_net.restart_guest_network(session1)
        output = session1.cmd_output("ifconfig || ip a")
        logging.debug("guest1 ip info %s" % output)

        # Check guest's network function
        host_ip = utils_net.get_ip_address_by_interface(bridge_name)
        remote_url = params.get("remote_ip", "www.google.com")

        try:
            vm1_ip = utils_net.get_guest_ip_addr(session1, mac)
        except Exception as errs:
            test.fail("vm1 can't get IP with the new create bridge: %s" % errs)
        if test_qos:
            if test_net_qos:
                logging.debug("Test network inbound:")
                res1 = utils_net.check_class_rules(bridge_name, "1:2", ast.literal_eval(net_inbound))
                logging.debug("Test network outbound:")
                res2 = utils_net.check_filter_rules(bridge_name, ast.literal_eval(net_outbound))
            else:
                iface_mac = vm_xml.VMXML.get_first_mac_by_name(vm1_name)
                tap_name = libvirt.get_ifname_host(vm1_name, iface_mac)
                logging.debug("Test inbound:")
                res1 = utils_net.check_class_rules(tap_name, "1:1", ast.literal_eval(iface_inbound))
                logging.debug("Test outbound:")
                res2 = utils_net.check_filter_rules(tap_name, ast.literal_eval(iface_outbound))
            if not res1 or not res2:
                test.fail("Qos test fail!")
        if hotplug:
            # reboot vm1 then check network function to ensure the interface still there and works fine
            logging.info("reboot the vm")
            virsh.reboot(vm1)
            if session1 is None:
                session1 = vm1.wait_for_serial_login()
            ping(vm1_ip, remote_url, ping_count, ping_timeout, session=session1)
            # restart libvirtd service then check the interface still works fine
            libvirtd = utils_libvirtd.Libvirtd()
            libvirtd.restart()
            vm1.cleanup_serial_console()
            vm1.create_serial_console()
            session1 = vm1.wait_for_serial_login()
            ping(vm1_ip, remote_url, ping_count, ping_timeout, session=session1)
            logging.info("after reboot and restart libvirtd, the network works fine")
            if iface_driver:
                try:
                    driver_dict = eval(iface_driver)
                    if session1 is None:
                        session1 = vm1.wait_for_serial_login()
                    guest_iface_info = session1.cmd_output("ip l").strip()
                    guest_iface_name = re.findall(r"^\d+: (\S+?)[@:].*state UP.*$", guest_iface_info, re.MULTILINE)[0]
                    comb_size = driver_dict.get('queues')
                    rx_size = driver_dict.get('rx_queue_size')
                    session1.cmd_status("ethtool -L %s combined %s" % (guest_iface_name, comb_size))
                    ret, outp = session1.cmd_status_output("ethtool -l %s" % guest_iface_name)
                    logging.debug("ethtool cmd output:%s" % outp)
                    if not ret:
                        pre_comb = re.search("Pre-set maximums:[\s\S]*?Combined:.*?(\d+)", outp).group(1)
                        cur_comb = re.search("Current hardware settings:[\s\S]*?Combined:.*?(\d+)", outp).group(1)
                        if int(pre_comb) != int(comb_size) or int(cur_comb) != int(comb_size):
                            test.fail("Fail to check the combined size: setting: %s,"
                                      "Pre-set: %s, Current-set: %s"
                                      % (comb_size, pre_comb, cur_comb))
                        else:
                            logging.info("Getting correct Pre-set and Current set value")
                    else:
                        test.error("ethtool list fail: %s" % outp)
                    # as tx_queue size is only supported for vhost-user interface, only check rx_queue size
                    ret1, outp1 = session1.cmd_status_output("ethtool -g %s" % guest_iface_name)
                    logging.debug("guest queue size setting is %s" % outp1)
                    if not ret1:
                        pre_set = re.search(r"Pre-set maximums:\s*RX:\s*(\d+)", outp1).group(1)
                        cur_set = re.search(r"Current hardware settings:\s*RX:\s*(\d+)", outp1).group(1)
                        if int(pre_set) != int(rx_size) or int(cur_set) != int(rx_size):
                            test.fail("Fail to check the rx_queue_size!")
                except Exception as errs:
                    test.fail("fail to get driver info")
            # hot-unplug interface/device
            if attach_interface:
                ret = virsh.detach_interface(vm1_name, "bridge",
                                             ignore_status=True)
            else:
                ret = virsh.detach_device(vm1_name, attach_xml.xml,
                                          ignore_status=True,
                                          debug=True)
            if ret.exit_status:
                test.fail("Hot-unplug interface/device fail")
            else:
                logging.info("hot-unplug interface/device succeed")

        else:
            if start_vm2:
                # Start vm2 connect to the same bridge
                mac2 = utils_net.generate_mac_address_simple()
                vm2_iface_params = {"type": "bridge", "source": vm_iface_source, "filter": filter_name, "mac": mac2}
                libvirt.modify_vm_iface(vm2_name, "update_iface", vm2_iface_params)
                if vm2.is_alive():
                    vm2.destroy()
                vm2.start()

                # Check if vm1 and vm2 can ping each other
                try:
                    utils_net.update_mac_ip_address(vm2, timeout=120)
                    vm2_ip = vm2.get_address()
                except Exception as errs:
                    test.fail("vm2 can't get IP with the new create bridge: %s" % errs)
                session2 = vm2.wait_for_login()
                # make sure guest has got ip address
                utils_net.restart_guest_network(session2)
                output2 = session2.cmd_output("ifconfig || ip a")
                logging.debug("guest ip info %s" % output2)
                # check 2 guests' network functions
                check_net_functions(vm1_ip, ping_count, ping_timeout, session1,
                                    host_ip, remote_url, vm2_ip)
                check_net_functions(vm2_ip, ping_count, ping_timeout, session2,
                                    host_ip, remote_url, vm1_ip)
        if reconnect_tap:
            iface_mac = vm_xml.VMXML.get_first_mac_by_name(vm1_name)
            tap_name = libvirt.get_ifname_host(vm1_name, iface_mac)
            # For network with shared host bridge, destroy the network will not
            # impact the connection
            if create_network and restart_net:
                virsh.net_destroy(iface_source["network"])
                out1 = libvirt_network.check_tap_connected(tap_name, True,
                                                           bridge_name)
                virsh.net_start(iface_source["network"])
                out2 = libvirt_network.check_tap_connected(tap_name, True,
                                                           bridge_name)
                if not out1 or not out2:
                    test.fail("Network destroy and restart should not impact "
                              "tap connection from bridge network!")
            else:
                # Delete and re-create bridge, check the tap is not connected
                utils_net.delete_linux_bridge_tmux(bridge_name, iface_name)
                utils_net.create_linux_bridge_tmux(bridge_name, iface_name)
                out3 = libvirt_network.check_tap_connected(tap_name, False,
                                                           bridge_name)
                # Check restart libvirtd will recover the connection
                libvirtd = utils_libvirtd.Libvirtd()
                libvirtd.restart()
                out4 = utils_misc.wait_for(
                    lambda: libvirt_network.check_tap_connected(tap_name, True,
                                                                bridge_name), 20)
                if not out3 or not out4:
                    test.fail("Delete and create linux bridge and check tap "
                              "connection is not as expected!")
    finally:
        logging.debug("Start to restore")
        vm1_xml_bak.sync()
        if start_vm2:
            vm2_xml_bak.sync()
        virsh.nwfilter_undefine(filter_name, ignore_status=True)
        if os.path.exists(iface_script_bk):
            process.run("mv %s %s" % (iface_script_bk, iface_script),
                        shell=True, verbose=True)
        if os.path.exists(bridge_script):
            process.run("rm -rf %s" % bridge_script, shell=True, verbose=True)
        br_path = "/sys/class/net/%s" % bridge_name
        if os.path.exists(br_path):
            utils_net.delete_linux_bridge_tmux(bridge_name, iface_name)
        # reload network configuration
        NM_service.restart()
        # recover NetworkManager
        if NM_status is True:
            NM_service.start()
        if 'network' in iface_source and iface_source["network"] in virsh.net_state_dict():
            virsh.net_destroy(iface_source["network"], ignore_status=False)
def run(test, params, env):
    """
    Test command: virsh net-define/net-undefine.

    1) Collect parameters&environment info before test
    2) Prepare options for command
    3) Execute command for test
    4) Check state of defined network
    5) Recover environment
    6) Check result
    """
    uri = libvirt_vm.normalize_connect_uri(params.get("connect_uri",
                                                      "default"))
    net_name = params.get("net_define_undefine_net_name", "default")
    net_uuid = params.get("net_define_undefine_net_uuid", "")
    options_ref = params.get("net_define_undefine_options_ref", "default")
    trans_ref = params.get("net_define_undefine_trans_ref", "trans")
    extra_args = params.get("net_define_undefine_extra", "")
    remove_existing = params.get("net_define_undefine_remove_existing", "yes")
    status_error = "yes" == params.get("status_error", "no")
    check_states = "yes" == params.get("check_states", "no")
    net_persistent = "yes" == params.get("net_persistent")
    net_active = "yes" == params.get("net_active")
    expect_msg = params.get("net_define_undefine_err_msg")

    # define multi ip/dhcp sections in network
    multi_ip = "yes" == params.get("multi_ip", "no")
    netmask = params.get("netmask")
    prefix_v6 = params.get("prefix_v6")
    single_v6_range = "yes" == params.get("single_v6_range", "no")
    # Get 2nd ipv4 dhcp range
    dhcp_ranges_start = params.get("dhcp_ranges_start", None)
    dhcp_ranges_end = params.get("dhcp_ranges_end", None)

    # Get 2 groups of ipv6 ip address and dhcp section
    address_v6_1 = params.get("address_v6_1")
    dhcp_ranges_v6_start_1 = params.get("dhcp_ranges_v6_start_1", None)
    dhcp_ranges_v6_end_1 = params.get("dhcp_ranges_v6_end_1", None)

    address_v6_2 = params.get("address_v6_2")
    dhcp_ranges_v6_start_2 = params.get("dhcp_ranges_v6_start_2", None)
    dhcp_ranges_v6_end_2 = params.get("dhcp_ranges_v6_end_2", None)

    # Edit net xml forward/ip part then define/start to check invalid setting
    edit_xml = "yes" == params.get("edit_xml", "no")
    address_v4 = params.get("address_v4")
    nat_port_start = params.get("nat_port_start")
    nat_port_end = params.get("nat_port_end")
    test_port = "yes" == params.get("test_port", "no")
    loop = int(params.get("loop", 1))

    # Get params about creating a bridge
    bridge = params.get('bridge', None)
    create_bridge = "yes" == params.get('create_bridge', 'no')
    ovs_bridge = "yes" == params.get('ovs_bridge', 'no')
    iface_name = utils_net.get_net_if(state="UP")[0]

    # Get params about creating a network
    create_netxml = "yes" == params.get("create_netxml", "no")
    domain = params.get('domain', None)
    forward = params.get("forward", None)
    net_dns_txt = params.get("net_dns_txt", None)
    net_bandwidth_inbound = params.get("net_bandwidth_inbound", None)
    net_bandwidth_outbound = params.get("net_bandwidth_outbound", None)
    mac = params.get("mac")

    # Edit the created network xml to get the xml to be tested
    del_mac = "yes" == params.get('del_mac', 'no')
    del_ip = "yes" == params.get('del_ip', 'no')
    add_dev = "yes" == params.get('add_dev', 'no')
    virtualport = 'yes' == params.get("virtualport", "no")
    virtualport_type = params.get("virtualport_type")

    virsh_dargs = {'uri': uri, 'debug': False, 'ignore_status': True}
    virsh_instance = virsh.VirshPersistent(**virsh_dargs)
    # libvirt acl polkit related params
    if not libvirt_version.version_compare(1, 1, 1):
        if params.get('setup_libvirt_polkit') == 'yes':
            test.cancel("API acl test not supported in current"
                        " libvirt version.")

    virsh_uri = params.get("virsh_uri")
    if virsh_uri and not utils_split_daemons.is_modular_daemon():
        virsh_uri = "qemu:///system"
    unprivileged_user = params.get('unprivileged_user')
    if unprivileged_user:
        if unprivileged_user.count('EXAMPLE'):
            unprivileged_user = '******'

    # Prepare environment and record current net_state_dict
    backup = network_xml.NetworkXML.new_all_networks_dict(virsh_instance)
    backup_state = virsh_instance.net_state_dict()
    logging.debug("Backed up network(s): %s", backup_state)

    # Make some XML to use for testing, for now we just copy 'default'
    test_xml = xml_utils.TempXMLFile()  # temporary file
    try:
        # LibvirtXMLBase.__str__ returns XML content
        test_xml.write(str(backup['default']))
        test_xml.flush()
    except (KeyError, AttributeError):
        test.cancel("Test requires default network to exist")

    testnet_xml = get_network_xml_instance(virsh_dargs,
                                           test_xml,
                                           net_name,
                                           net_uuid,
                                           bridge=None)
    logging.debug("Get network xml as testnet_xml: %s" % testnet_xml)

    if remove_existing:
        for netxml in list(backup.values()):
            netxml.orbital_nuclear_strike()

    # Test both define and undefine, So collect info
    # both of them for result check.
    # When something wrong with network, set it to 1
    fail_flag = 0
    result_info = []

    if options_ref == "correct_arg":
        define_options = testnet_xml.xml
        undefine_options = net_name
    elif options_ref == "no_option":
        define_options = ""
        undefine_options = ""
    elif options_ref == "not_exist_option":
        define_options = "/not/exist/file"
        undefine_options = "NOT_EXIST_NETWORK"

    define_extra = undefine_extra = extra_args
    if trans_ref != "define":
        define_extra = ""

    if params.get('setup_libvirt_polkit') == 'yes':
        virsh_dargs = {
            'uri': virsh_uri,
            'unprivileged_user': unprivileged_user,
            'debug': False,
            'ignore_status': True
        }
        cmd = "chmod 666 %s" % testnet_xml.xml
        process.run(cmd, shell=True)

    if params.get('net_define_undefine_readonly', 'no') == 'yes':
        virsh_dargs = {
            'uri': uri,
            'debug': False,
            'ignore_status': True,
            'readonly': True
        }
    try:
        if edit_xml:
            ipxml_v4 = network_xml.IPXML()
            ipxml_v4.address = address_v4
            ipxml_v4.netmask = netmask
            range_4 = network_xml.RangeXML()
            range_4.attrs = {
                "start": dhcp_ranges_start,
                "end": dhcp_ranges_end
            }
            ipxml_v4.dhcp_ranges = range_4
            testnet_xml.del_ip()
            testnet_xml.set_ip(ipxml_v4)
            if test_port:
                nat_port = {"start": nat_port_start, "end": nat_port_end}
                testnet_xml.nat_port = nat_port
            testnet_xml.debug_xml()
        if multi_ip:
            # Enabling IPv6 forwarding with RA routes without accept_ra set to 2
            # is likely to cause routes loss
            sysctl_cmd = 'sysctl net.ipv6.conf.all.accept_ra'
            original_accept_ra = process.run(sysctl_cmd + ' -n').stdout_text
            if original_accept_ra != '2':
                process.system(sysctl_cmd + '=2')
            # add another ipv4 address and dhcp range
            set_ip_section(testnet_xml,
                           address_v4,
                           ipv6=False,
                           netmask=netmask,
                           dhcp_ranges_start=dhcp_ranges_start,
                           dhcp_ranges_end=dhcp_ranges_end)
            # add ipv6 address and dhcp range
            set_ip_section(testnet_xml,
                           address_v6_1,
                           ipv6=True,
                           prefix_v6=prefix_v6,
                           dhcp_ranges_start=dhcp_ranges_v6_start_1,
                           dhcp_ranges_end=dhcp_ranges_v6_end_1)
            # 2nd ipv6 address and dhcp range
            set_ip_section(testnet_xml,
                           address_v6_2,
                           ipv6=True,
                           prefix_v6=prefix_v6,
                           dhcp_ranges_start=dhcp_ranges_v6_start_2,
                           dhcp_ranges_end=dhcp_ranges_v6_end_2)
        if create_netxml:
            net_dict = {
                'del_nat_attrs': True,
                'del_ip': del_ip,
                'dns_txt': net_dns_txt,
                'domain': domain,
                'bridge': bridge,
                'forward': forward,
                'interface_dev': iface_name,
                'virtualport': virtualport,
                'virtualport_type': virtualport_type,
                'mac': mac,
                'net_bandwidth_inbound': net_bandwidth_inbound,
                'net_bandwidth_outbound': net_bandwidth_outbound
            }
            logging.debug("net_dict is %s" % net_dict)
            testnet_xml = libvirt_network.modify_network_xml(
                net_dict, testnet_xml)
        testnet_xml.debug_xml()
        if create_bridge:
            if ovs_bridge:
                utils_net.create_ovs_bridge(bridge, ignore_status=False)
            else:
                utils_net.create_linux_bridge_tmux(bridge,
                                                   iface_name,
                                                   ignore_status=False)
        # Run test case
        while loop:
            try:
                define_result = virsh.net_define(define_options, define_extra,
                                                 **virsh_dargs)
                logging.debug(define_result)
                define_status = define_result.exit_status

                # Check network states after define
                if check_states and not define_status:
                    net_state = virsh_instance.net_state_dict()
                    if (net_state[net_name]['active']
                            or net_state[net_name]['autostart']
                            or not net_state[net_name]['persistent']):
                        fail_flag = 1
                        result_info.append("Found wrong network states for "
                                           "defined network: %s" %
                                           str(net_state))

                if define_status == 1 and status_error and expect_msg:
                    logging.debug("check result is %s, expect_msg is %s" %
                                  (define_result, expect_msg))
                    libvirt.check_result(define_result, expect_msg.split(';'))

                # If defining network succeed, then trying to start it.
                if define_status == 0:
                    start_result = virsh.net_start(net_name,
                                                   extra="",
                                                   **virsh_dargs)
                    logging.debug(start_result)
                    start_status = start_result.exit_status

                if trans_ref == "trans":
                    if define_status:
                        fail_flag = 1
                        result_info.append(
                            "Define network with right command failed.")
                    else:
                        if start_status:
                            fail_flag = 1
                            result_info.append(
                                "Found wrong network states for "
                                "defined network: %s" % str(net_state))

                # Check network states after start
                if check_states and not status_error:
                    net_state = virsh_instance.net_state_dict()
                    if (not net_state[net_name]['active']
                            or net_state[net_name]['autostart']
                            or not net_state[net_name]['persistent']):
                        fail_flag = 1
                        result_info.append("Found wrong network states for "
                                           "started network: %s" %
                                           str(net_state))
                    # Try to set autostart
                    virsh.net_autostart(net_name, **virsh_dargs)
                    net_state = virsh_instance.net_state_dict()
                    if not net_state[net_name]['autostart']:
                        fail_flag = 1
                        result_info.append(
                            "Failed to set autostart for network %s" %
                            net_name)
                    # Restart libvirtd and check state
                    # Close down persistent virsh session before libvirtd restart
                    if hasattr(virsh_instance, 'close_session'):
                        virsh_instance.close_session()
                    libvirtd = utils_libvirtd.Libvirtd()
                    libvirtd.restart()
                    # Need to redefine virsh_instance after libvirtd restart
                    virsh_instance = virsh.VirshPersistent(**virsh_dargs)
                    net_state = virsh_instance.net_state_dict()
                    if (not net_state[net_name]['active']
                            or not net_state[net_name]['autostart']):
                        fail_flag = 1
                        result_info.append(
                            "Found wrong network state after restarting"
                            " libvirtd: %s" % str(net_state))
                    logging.debug("undefine network:")
                    # prepare the network status
                    if not net_persistent:
                        virsh.net_undefine(net_name, ignore_status=False)
                    if not net_active:
                        virsh.net_destroy(net_name, ignore_status=False)
                    undefine_status = virsh.net_undefine(
                        undefine_options, undefine_extra,
                        **virsh_dargs).exit_status

                    net_state = virsh_instance.net_state_dict()
                    if net_persistent:
                        if undefine_status:
                            fail_flag = 1
                            result_info.append(
                                "undefine should succeed but failed")
                        if net_active:
                            if (not net_state[net_name]['active']
                                    or net_state[net_name]['autostart']
                                    or net_state[net_name]['persistent']):
                                fail_flag = 1
                                result_info.append(
                                    "Found wrong network states for "
                                    "undefined network: %s" % str(net_state))
                        else:
                            if net_name in net_state:
                                fail_flag = 1
                                result_info.append(
                                    "Transient network should not exists "
                                    "after undefine : %s" % str(net_state))
                    else:
                        if not undefine_status:
                            fail_flag = 1
                            result_info.append(
                                "undefine transient network should fail "
                                "but succeed: %s" % str(net_state))
                # Stop network for undefine test anyway
                destroy_result = virsh.net_destroy(net_name,
                                                   extra="",
                                                   **virsh_dargs)
                logging.debug(destroy_result)

                # Undefine network
                if not check_states:
                    undefine_result = virsh.net_undefine(
                        undefine_options, undefine_extra, **virsh_dargs)
                    if trans_ref != "define":
                        logging.debug(undefine_result)
                    undefine_status = undefine_result.exit_status
            except Exception:
                logging.debug(
                    "The define and undefine operation in loop %s failed. ",
                    loop)
            finally:
                loop = loop - 1

    finally:
        # Recover environment
        leftovers = network_xml.NetworkXML.new_all_networks_dict(
            virsh_instance)
        for netxml in list(leftovers.values()):
            netxml.orbital_nuclear_strike()

        # Recover from backup
        for netxml in list(backup.values()):
            netxml.sync(backup_state[netxml.name])

        # Close down persistent virsh session (including for all netxml copies)
        if hasattr(virsh_instance, 'close_session'):
            virsh_instance.close_session()

        # Done with file, cleanup
        del test_xml
        del testnet_xml
        if create_bridge:
            if ovs_bridge:
                utils_net.delete_ovs_bridge(bridge, ignore_status=False)
            else:
                utils_net.delete_linux_bridge_tmux(bridge,
                                                   iface_name,
                                                   ignore_status=False)
    # Check status_error
    # If fail_flag is set, it must be transaction test.
    if fail_flag:
        test.fail("Define network for transaction test "
                  "failed:%s" % result_info)

    # The logic to check result:
    # status_error&only undefine:it is negative undefine test only
    # status_error&(no undefine):it is negative define test only
    # (not status_error)&(only undefine):it is positive transaction test.
    # (not status_error)&(no undefine):it is positive define test only
    if status_error:
        if trans_ref == "undefine":
            if undefine_status == 0:
                test.fail("Run successfully with wrong command.")
        else:
            if define_status == 0:
                if start_status == 0:
                    test.fail("Define an unexpected network, "
                              "and start it successfully.")
                else:
                    test.fail("Define an unexpected network, "
                              "but start it failed.")
    else:
        if trans_ref == "undefine":
            if undefine_status:
                test.fail("Define network for transaction "
                          "successfully, but undefine failed.")
        else:
            if define_status != 0:
                test.fail("Run failed with right command")
            else:
                if start_status != 0:
                    test.fail("Network is defined as expected, "
                              "but start it failed.")
Example #3
0
def run(test, params, env):
    """
    Test network/interface function on 2 vms:

        - Test settings on 2 vms
        - Run ping check on 2 vms including pinging each other
        ...

    """
    vms = params.get('vms').split()
    vm_list = [env.get_vm(v_name) for v_name in vms]
    if len(vm_list) != 2:
        test.cancel('More or less than 2 vms is currently unsupported')

    feature = params.get('feature', '')
    case = params.get('case', '')
    create_linux_bridge = 'yes' == params.get('create_linux_bridge', 'no')
    check_ping = 'yes' == params.get('check_ping')
    expect_ping_host = 'yes' == params.get('expect_ping_host', 'no')
    expect_ping_out = 'yes' == params.get('expect_ping_out', 'no')
    expect_ping_vm = 'yes' == params.get('expect_ping_vm', 'no')
    out_ip = params.get('out_ip', 'www.redhat.com')
    live_update = 'yes' == params.get('live_update', 'no')
    set_all = 'yes' == params.get('set_all', 'no')

    rand_id = '_' + utils_misc.generate_random_string(3)
    resume_vm = 'yes' == params.get('resume_vm', 'no')
    bridge_name = params.get('bridge_name', 'test_br0') + rand_id
    iface_name = utils_net.get_net_if(state="UP")[0]
    test_net = feature + rand_id
    bridge_created = False

    def pause_resume_vm(vm_name):
        """
        Use negative cmd to trigger guest io error to paused, then resume it
        :params vm_name: string, name of the vm
        :params return: None
        """
        cmd_ = '''virsh qemu-monitor-command %s '{"execute":"stop"}' ''' % vm_name
        out = process.run(cmd_, shell=True).stdout_text.strip()
        logging.debug("The out of the qemu-monitor-command is %s", out)
        vm = env.get_vm(vm_name)
        if not vm.is_paused():
            test.error("VM %s is not paused by qemu-monitor-command!" %
                       vm_name)
        res = virsh.resume(vm_name, debug=True)
        libvirt.check_exit_status(res)

    vmxml_backup_list = []
    for vm_i in vm_list:
        vmxml_backup_list.append(
            vm_xml.VMXML.new_from_inactive_dumpxml(vm_i.name))

    try:
        # Test feature: port isolated
        if feature == 'port_isolated':
            if not libvirt_version.version_compare(6, 2, 0):
                test.cancel('Libvirt version should be'
                            ' > 6.2.0 to support port isolated')

            if case.startswith('set_iface'):
                utils_net.create_linux_bridge_tmux(bridge_name, iface_name)
                bridge_created = True
                iface_type = case.split('_')[-1]
                if iface_type == 'network':
                    net_dict = {
                        'net_forward': "{'mode': 'bridge'}",
                        'net_bridge': "{'name': '%s'}" % bridge_name
                    }
                    prepare_network(test_net, **net_dict)
                    updated_iface_dict = {
                        'type': iface_type,
                        'source': "{'network': '%s'}" % test_net,
                    }
                elif iface_type == 'bridge':
                    updated_iface_dict = {
                        'type': iface_type,
                        'source': "{'bridge': '%s'}" % bridge_name,
                    }
                else:
                    test.error('Unsupported iface type: %s' % iface_type)

                # Set 2 vms to isolated=yes or set one to 'yes', the other to 'no'
                isolated_settings = ['yes'] * 2 if set_all else ['yes', 'no']
                for i in (0, 1):
                    vm_i = vm_list[i]
                    new_iface_dict = dict(
                        list(updated_iface_dict.items()) +
                        [('port',
                          "{'isolated': '%s'}" % isolated_settings[i])])
                    libvirt.modify_vm_iface(vm_i.name, 'update_iface',
                                            new_iface_dict)
                    logging.debug(virsh.dumpxml(vm_i.name).stdout_text)

            if case == 'update_iface':
                if params.get('iface_port'):
                    iface_dict = {'port': params['iface_port']}
                    for vm_i in vm_list:
                        libvirt.modify_vm_iface(vm_i.name, 'update_iface',
                                                iface_dict)
                        logging.debug(virsh.dumpxml(vm_i.name).stdout_text)
                if live_update:
                    for vm_i in vm_list:
                        vm_i.start()

                # Test Update iface with new attrs
                new_iface_dict = {}
                if params.get('new_iface_port'):
                    new_iface_dict['port'] = params['new_iface_port']
                elif params.get('del_port') == 'yes':
                    new_iface_dict['del_port'] = True
                for vm_i in vm_list:
                    updated_iface = libvirt.modify_vm_iface(
                        vm_i.name, 'get_xml', new_iface_dict)
                    result = virsh.update_device(vm_i.name,
                                                 updated_iface,
                                                 debug=True)
                    libvirt.check_exit_status(result)

            if case == 'attach_iface':
                new_ifaces = {}
                for vm_i in vm_list:
                    # Create iface xml to be attached
                    new_iface = interface.Interface('network')
                    new_iface.xml = libvirt.modify_vm_iface(
                        vm_i.name, 'get_xml',
                        {'port': params.get('new_iface_port')})
                    new_ifaces[vm_i.name] = new_iface

                    # Remove current ifaces on vm
                    vmxml_i = vm_xml.VMXML.new_from_inactive_dumpxml(vm_i.name)
                    vmxml_i.remove_all_device_by_type('interface')
                    vmxml_i.sync()
                    logging.debug(virsh.dumpxml(vm_i.name).stdout_text)

                    # Start vm for hotplug
                    vm_i.start()
                    session = vm_i.wait_for_serial_login()

                    # Hotplug iface
                    virsh.attach_device(vm_i.name,
                                        new_iface.xml,
                                        debug=True,
                                        ignore_status=False)

                    # Wait a few seconds for interface to be fully attached
                    time.sleep(5)
                    ip_l_before = session.cmd_output('ip l')
                    logging.debug(ip_l_before)
                    session.close()

            if case == 'set_network':
                utils_net.create_linux_bridge_tmux(bridge_name, iface_name)
                bridge_created = True
                net_dict = {
                    'net_forward':
                    "{'mode': 'bridge'}",
                    'net_bridge':
                    "{'name': '%s'}" % bridge_name,
                    'net_port':
                    "{'isolated': '%s'}" % params.get('net_isolated', 'yes')
                }
                prepare_network(test_net, **net_dict)

                # Modify iface to connect to newly added network
                updated_iface_dict = {
                    'type': 'network',
                    'source': "{'network': '%s'}" % test_net
                }
                for vm_i in vm_list:
                    libvirt.modify_vm_iface(vm_i.name, 'update_iface',
                                            updated_iface_dict)
                    logging.debug(virsh.domiflist(vm_i.name).stdout_text)

        if feature == 'macTableManager':
            # create network with macTableManager='libvirt'
            if create_linux_bridge:
                utils_net.create_linux_bridge_tmux(bridge_name, iface_name)
                bridge_created = True
                net_dict = {
                    'net_forward':
                    "{'mode': 'bridge'}",
                    'net_bridge':
                    "{'name': '%s', 'macTableManager': 'libvirt'}" %
                    bridge_name,
                }
            else:
                net_dict = {
                    'net_forward': "{'mode': 'nat'}",
                    'net_bridge':
                    "{'name': '%s', 'macTableManager': 'libvirt'}" %
                    bridge_name,
                    'net_ip_address': params.get('net_ip_address'),
                    'net_ip_netmask': params.get('net_ip_netmask'),
                    'dhcp_start_ipv4': params.get('dhcp_start_ipv4'),
                    'dhcp_end_ipv4': params.get('dhcp_end_ipv4')
                }
            iface_dict = {
                'type': 'network',
                'del_mac': True,
                'source': "{'network': '%s'}" % test_net
            }
            prepare_network(test_net, **net_dict)
            for i in (0, 1):
                vm_i = vm_list[i]
                libvirt.modify_vm_iface(vm_i.name, 'update_iface', iface_dict)
                logging.debug("After modify vm interface, check vm xml:\n%s",
                              virsh.dumpxml(vm_i.name).stdout_text)
        # Check ping result from vm session to host, outside, the other vm
        if check_ping:
            for vm_i in vm_list:
                if vm_i.is_dead():
                    vm_i.start()
            host_ip = utils_net.get_host_ip_address()
            ping_expect = {
                host_ip: expect_ping_host,
                out_ip: expect_ping_out,
            }
            if resume_vm:
                for vm_i in vm_list:
                    vm_i.wait_for_serial_login().close()
                    pause_resume_vm(str(vm_i.name))
            # A map of vm session and vm's ip addr
            session_n_ip = {}
            for vm_i in vm_list:
                mac = vm_xml.VMXML.get_first_mac_by_name(vm_i.name)
                sess = vm_i.wait_for_serial_login()
                vm_ip = utils_net.get_guest_ip_addr(sess, mac)
                session_n_ip[sess] = vm_ip
                logging.debug('Vm %s ip: %s', vm_i.name, vm_ip)
                if not vm_ip:
                    test.error("Got vm %s ip as None!" % vm_i.name)

            # Check ping result from each vm's session
            for i in (0, 1):
                sess = list(session_n_ip.keys())[i]
                another_sess = list(session_n_ip.keys())[1 - i]
                ping_expect[session_n_ip[another_sess]] = expect_ping_vm
                if not ping_func(sess, **ping_expect):
                    test.fail('Ping check failed')
                # Remove the other session's ip from ping result, then the
                # next round of ping check will not do a ping check to the vm itself
                ping_expect.pop(session_n_ip[another_sess])

        # Some test steps after ping check
        if feature == 'port_isolated':
            if case == 'attach_iface':
                # Test detach of iface
                for vm_name_i in new_ifaces:
                    virsh.detach_device(vm_name_i,
                                        new_ifaces[vm_name_i].xml,
                                        wait_for_event=True,
                                        debug=True,
                                        ignore_status=False)

                # Check whether iface successfully detached by checking 'ip l' output
                for vm_i in vm_list:
                    session = vm_i.wait_for_serial_login()
                    ip_l_after = session.cmd_output('ip l')
                    session.close()
                    if len(ip_l_before.splitlines()) == len(
                            ip_l_after.splitlines()):
                        test.fail(
                            'Output of "ip l" is not changed afte detach, '
                            'interface not successfully detached')
        if feature == 'macTableManager':
            for vm_i in vm_list:
                iface_mac = vm_xml.VMXML.get_first_mac_by_name(vm_i.name)
                tap_name = libvirt.get_ifname_host(vm_i.name, iface_mac)
                logging.debug("Check %s's params for vm %s", tap_name,
                              vm_i.name)
                res1 = check_br_tap_params(bridge_name, tap_name)
                res2 = check_fdb(iface_mac)
                if not res1 or (not res2):
                    test.fail("macTableManager function check failed!")