def do_test(): ping_opts = {"count": 200} if ipv in ['ipv4', 'both']: ping_proc = ping((guest1, g1_nic, 0, { "scope": 0 }), (host2, h2_nic, 0, { "scope": 0 }), options=ping_opts, expect="pass", bg=True) ctl.wait(2) turn_off_sriov() ctl.wait(2) ping_proc.intr() verify_tc_rules('ipv4') if ipv in ['ipv6', 'both']: ping_proc = ping6((guest1, g1_nic, 1, { "scope": 0 }), (host2, h2_nic, 1, { "scope": 0 }), options=ping_opts, expect="pass", bg=True) ctl.wait(2) turn_off_sriov() ctl.wait(2) ping_proc.intr() verify_tc_rules('ipv6')
if nperf_cpupin: m1.run("service irqbalance stop") m2.run("service irqbalance stop") dev_list = [(m1, m1_if), (m2, m2_if)] # this will pin devices irqs to cpu #0 for m, d in dev_list: pin_dev_irqs(m, d, 0) nperf_opts = "" if nperf_cpupin and nperf_num_parallel == 1: nperf_opts = " -T%s,%s" % (nperf_cpupin, nperf_cpupin) ctl.wait(15) def configure_ipsec(ciph_alg, ciph_key, hash_alg, hash_key, ip_version): if ip_version == "ipv4": m1_addr = m1_if_addr m2_addr = m2_if_addr else: m1_addr = m1_if_addr6 m2_addr = m2_if_addr6 # configure policy and state m1.run("ip xfrm policy flush") m1.run("ip xfrm state flush") m2.run("ip xfrm policy flush") m2.run("ip xfrm state flush")
def netperf(src, dst, server_opts={}, client_opts={}, baseline={}, timeout=60): """ Start a Netserver on the given machine and ip address Keyword arguments: src -- tuple of (HostAPI, InterfaceAPI/DeviceAPI, ip address index, ip addr selector) dst -- tuple of (HostAPI, InterfaceAPI/DeviceAPI, ip address index, ip addr selector) server_opts -- dictionary of additional options for the netperf server can't contain 'bind' or 'role' client_opts -- dictionary of additional options for the netperf client can't contain 'bind', 'role', 'netperf_server', 'threshold' or 'threshold_deviation' baseline -- optional dictionary with keys 'threshold' and 'threshold_deviation' that specifies the baseline of the netperf test timeout -- integer number of seconds specifing the maximum amount of time for the test, defaults to 60 """ server_opts = dict(server_opts) if 'bind' in server_opts or 'role' in server_opts: raise Exception("server_opts can't contain keys 'bind' and 'role'") client_opts = dict(client_opts) if 'bind' in client_opts or\ 'role' in client_opts or\ 'netperf_server' in client_opts: raise Exception("client_opts can't contain keys 'bind', 'role' "\ "and 'netperf_server'") if not isinstance(src, tuple) or len(src) < 2 or len(src) > 4: raise Exception('Invalid source specification') try: if len(src) == 3: h1, if1, addr_index1 = src client_ip = if1.get_ip(addr_index1) elif len(src) == 4: h1, if1, addr_index1, addr_selector1 = src client_ip = if1.get_ip(addr_index1, selector=addr_selector1) except: raise Exception('Invalid source specification') if not isinstance(dst, tuple) or len(dst) < 3 or len(dst) > 4: raise Exception('Invalid destination specification') try: if len(dst) == 3: h2, if2, addr_index2 = dst server_ip = if2.get_ip(addr_index2) elif len(dst) == 4: h2, if2, addr_index2, addr_selector2 = dst server_ip = if2.get_ip(addr_index2, addr_selector2) except: raise Exception('Invalid destination specification') server_opts["role"] = "server" server_opts["bind"] = server_ip client_opts["role"] = "client" client_opts["bind"] = client_ip client_opts["netperf_server"] = server_ip if "threshold" in baseline: client_opts["threshold"] = baseline["threshold"] if "threshold_deviation" in baseline: client_opts["threshold_deviation"] = baseline["threshold_deviation"] netserver_mod = ctl.get_module("Netperf", options=server_opts) netclient_mod = ctl.get_module("Netperf", options=client_opts) netserver = h2.run(netserver_mod, bg=True) ctl.wait(2) result = h1.run(netclient_mod, timeout=timeout) netserver.intr() return result
netperf_cli_tcp.update_options({"msg_size" : nperf_msg_size}) netperf_cli_udp.update_options({"msg_size" : nperf_msg_size}) netperf_cli_sctp.update_options({"msg_size" : nperf_msg_size}) netperf_cli_tcp6.update_options({"msg_size" : nperf_msg_size}) netperf_cli_udp6.update_options({"msg_size" : nperf_msg_size}) netperf_cli_sctp6.update_options({"msg_size" : nperf_msg_size}) # if we will run SCTP test make sure the SCTP will go out through the test # interfaces only if nperf_protocols.find("sctp") > -1: m1.run("iptables -I OUTPUT ! -o %s -p sctp -j DROP" % test_if1.get_devname()) m2.run("iptables -I OUTPUT ! -o %s -p sctp -j DROP" % test_if2.get_devname()) ctl.wait(15) for setting in offload_settings: dev_features = "" for offload in setting: dev_features += " %s %s" % (offload[0], offload[1]) m1.run("ethtool -K %s %s" % (test_if1.get_devname(), dev_features)) m2.run("ethtool -K %s %s" % (test_if2.get_devname(), dev_features)) if ipv in [ 'ipv4', 'both' ]: m1.run(ping_mod) server_proc = m1.run(netperf_srv, bg=True) ctl.wait(2) if nperf_protocols.find("tcp") > -1:
"-L %s -6" % h2.get_ip("vlan10", 1) }) for offload in offloads: for state in ["on", "off"]: h1.run("ethtool -K %s %s %s" % (h1.get_devname("nic"), offload, state)) g1.run("ethtool -K %s %s %s" % (g1.get_devname("guestnic"), offload, state)) h2.run("ethtool -K %s %s %s" % (h2.get_devname("nic"), offload, state)) if ipv == 'ipv4': g1.run(ping_mod) server_proc = g1.run(netperf_srv, bg=True) ctl.wait(2) h2.run(netperf_cli_tcp, timeout=70) h2.run(netperf_cli_udp, timeout=70) server_proc.intr() elif ipv == 'ipv6': g1.run(ping_mod6) server_proc = g1.run(netperf_srv6, bg=True) ctl.wait(2) h2.run(netperf_cli_tcp6, timeout=70) h2.run(netperf_cli_udp6, timeout=70) server_proc.intr() else: g1.run(ping_mod) server_proc = g1.run(netperf_srv, bg=True)
#============================================ # Host2 wait for guest start #============================================ guest = paramiko.SSHClient() guest.set_missing_host_key_policy(paramiko.AutoAddPolicy()) tries = 60 while tries: logging.info("Connecting to guest, %d tries remaining" % tries) tries -= 1 try: guest.connect(guest_hostname, username=guest_username, password=guest_password) break except paramiko.ssh_exception.NoValidConnectionsError as e: logging.debug(str(e)) ctl.wait(5) #============================================ # Host2 add openvswitch flows between DPDK NICs and Guest NICs #============================================ h2.run("ovs-ofctl del-flows br0") h2.run("ovs-ofctl add-flow br0 in_port=11,action=21") h2.run("ovs-ofctl add-flow br0 in_port=21,action=11") h2.run("ovs-ofctl add-flow br0 in_port=12,action=22") h2.run("ovs-ofctl add-flow br0 in_port=22,action=12") #============================================ # Guest configure DPDK for vhostuser nics #============================================
from lnst.Controller.Task import ctl m1 = ctl.get_host("testmachine1") m1.sync_resources(modules=["Custom"], tools=[]) test = m1.run("while true; do echo test; sleep 1; done", bg=True) ctl.wait(5) test.intr() output = test.get_result()["res_data"]["stdout"] custom = ctl.get_module("Custom", options={"fail": True}) if output.find("test") != -1: custom.update_options({"fail": False}) m1.run(custom)
}) netperf_tcp = ctl.get_module("Netperf", options={ "role": "client", "netperf_server": hostA.get_ip("testiface"), "duration": 60, "testname": "TCP_STREAM", "netperf_opts": "-L %s" % hostB.get_ip("testiface") }) netperf_udp = ctl.get_module("Netperf", options={ "role": "client", "netperf_server": hostA.get_ip("testiface"), "duration": 60, "testname": "UDP_STREAM" }) hostA.run(ping_mod, timeout=500) server_proc = hostA.run(netserver, bg=True) ctl.wait(2) hostB.run(netperf_tcp, timeout=100) hostB.run(netperf_udp, timeout=100) server_proc.intr()
h1_nic1 = h1.get_interface("if1") h1_nic2 = h1.get_interface("if2") h2_nic1 = h2.get_interface("if1") h2_nic2 = h2.get_interface("if2") #============================================ # WARMP UP - teach switch about mac addresses #============================================ h1_nic1.set_addresses(["192.168.1.1/24"]) h1_nic2.set_addresses(["192.168.1.3/24"]) h2_nic1.set_addresses(["192.168.1.2/24"]) h2_nic2.set_addresses(["192.168.1.4/24"]) ctl.wait(5) ping_opts = {"count": 100, "interval": 0.1, "limit_rate": 20} pings = [] pings.append( ping((h1, h1_nic1, 0, { "scope": 0 }), (h2, h2_nic1, 0, { "scope": 0 }), options=ping_opts, bg=True)) pings.append( ping((h1, h1_nic2, 0, { "scope": 0
h2_dev.set_mtu(mtu) nperf_opts = "" if nperf_cpupin: h1.run("service irqbalance stop") h2.run("service irqbalance stop") # this will pin devices irqs to cpu #0 for m, d in [(h1, h1_nic), (h2, h2_nic)]: pin_dev_irqs(m, d, 0) if nperf_cpupin and nperf_num_parallel == 1: nperf_opts = " -T%s,%s" % (nperf_cpupin, nperf_cpupin) ctl.wait(15) netperf_clients = [] netperf_servers = [] netperf_clients6 = [] netperf_servers6 = [] for h1_dev, h2_dev in devices: netperf_clients.append(ctl.get_module("Netperf", options={ "role" : "client", "netperf_server": h2_dev.get_ip(0), "bind": h1_dev.get_ip(0), "duration" : netperf_duration, "testname" : "TCP_STREAM",
raise RuntimeError("Cannot find vxlan device") def verify_tc_rules(proto): g1_mac = g1_nic.get_hwaddr() h2_mac = h2_nic.get_hwaddr() # encap rule m = tl.find_tc_rule(host1, 'tap1', g1_mac, h2_mac, proto, 'tunnel_key set') desc = "TC rule %s tunnel_key set" % proto if m: tl.custom(host1, desc) else: tl.custom(host1, desc, 'ERROR: cannot find tc rule') vxlan_dev = get_vxlan_dev(host1) # decap rule m = tl.find_tc_rule(host1, vxlan_dev, h2_mac, g1_mac, proto, 'tunnel_key unset') desc = "TC rule %s tunnel_key unset" % proto if m: tl.custom(host1, desc) else: tl.custom(host1, desc, 'ERROR: cannot find tc rule') # sleep a second before testing. ctl.wait(3) do_pings(warmup=True) do_pings()
h1_nic2 = h1.get_interface("if2") h2_nic1 = h2.get_interface("if1") h2_nic2 = h2.get_interface("if2") #============================================ # WARMP UP - teach switch about mac addresses #============================================ h1_nic1.set_addresses(["192.168.1.1/24"]) h1_nic2.set_addresses(["192.168.1.3/24"]) h2_nic1.set_addresses(["192.168.1.2/24"]) h2_nic2.set_addresses(["192.168.1.4/24"]) ctl.wait(5) ping_opts = {"count": 100, "interval": 0.1, "limit_rate": 20} pings = [] pings.append(ping((h1, h1_nic1, 0, {"scope": 0}), (h2, h2_nic1, 0, {"scope": 0}), options=ping_opts, bg=True)) pings.append(ping((h1, h1_nic2, 0, {"scope": 0}), (h2, h2_nic2, 0, {"scope": 0}), options=ping_opts, bg=True)) pings.append(ping((h2, h2_nic1, 0, {"scope": 0}), (h1, h1_nic1, 0, {"scope": 0}), options=ping_opts, bg=True)) pings.append(ping((h2, h2_nic2, 0, {"scope": 0}),
from lnst.Controller.Task import ctl m1 = ctl.get_host("testmachine1") m1.sync_resources(modules=["Custom"], tools=[]) test = m1.run("while true; do echo test; sleep 1; done", bg=True) ctl.wait(5) test.intr() output = test.get_result()["res_data"]["stdout"] custom = ctl.get_module("Custom", options={ "fail": True }) if output.find("test") != -1: custom.update_options({ "fail": False}) m1.run(custom)
h1_dev.set_mtu(mtu) h2_dev.set_mtu(mtu) nperf_opts = "" if nperf_cpupin: h1.run("service irqbalance stop") h2.run("service irqbalance stop") # this will pin devices irqs to cpu #0 for m, d in [(h1, h1_nic), (h2, h2_nic)]: pin_dev_irqs(m, d, 0) if nperf_cpupin and nperf_num_parallel == 1: nperf_opts = " -T%s,%s" % (nperf_cpupin, nperf_cpupin) ctl.wait(15) netperf_clients = [] netperf_servers = [] netperf_clients6 = [] netperf_servers6 = [] for h1_dev, h2_dev in devices: netperf_clients.append( ctl.get_module("Netperf", options={ "role": "client", "netperf_server": h2_dev.get_ip(0), "bind": h1_dev.get_ip(0), "duration": netperf_duration, "testname": "TCP_STREAM",