def Trigger(tc): tc.cmd_descr = "PCIE Fuzzing" for node in tc.nodes: pci_address = [] interfaces = api.GetNaplesHostInterfaces(node) # create a list of Naples PCI adresses for this node for interface in interfaces: cmd = "ethtool -i " + interface + "|awk -F \":\" '/bus-info/ { print $3 \":\" $4}'" api.Logger.info(f"On {node} run: {cmd}") if send_command(tc, cmd, node) is api.types.status.FAILURE: return api.types.status.FAILURE if (tc.resp.commands[0].stdout) is None: api.Logger.error( "ethtool command on {interface} returned null stdout") return api.types.status.FAILURE address = str.rstrip(tc.resp.commands[0].stdout) pci_address.append(address) api.Logger.info(pci_address) # unload drivers before wrting randomly to PCI host.UnloadDriver(api.GetNodeOs(node), node, "all") # run reg_write_random fuzzing on every Naples Eth PCI device if req_write_random(tc, node, pci_address) != api.types.status.SUCCESS: return api.types.status.FAILURE # run mem_write_randmo fuzzing on every Naples Eth PCI device if mem_write_random(tc, node) != api.types.status.SUCCESS: return api.types.status.FAILURE return api.types.status.SUCCESS
def LoadDriver(node_names, node_os, target_version='latest'): if target_version == 'latest': api.Logger.info('Target version is latest - nothing to change') else: resp = api.DownloadAssets(release_version = target_version) if not api.IsApiResponseOk(resp): api.Logger.error("Failed to download assets for %s" % target_version) return api.types.status.FAILURE manifest_file = os.path.join(api.GetTopDir(), 'images', target_version + '.json') for node in node_names: if node_os == OS_TYPE_LINUX: if __load_linux_driver(tc, node, manifest_file) == api.types.status.SUCCESS: api.Logger.info("Release Driver %s reload on %s" % (tc.target_version, node)) else: api.Logger.error("Failed to load release driver %s reload on %s" % (tc.target_version, node)) return api.types.status.FAILURE # this is required to bring the testbed into operation state # after driver unload interfaces need to be initialized wl_api.ReAddWorkloads(node) elif tc.os == OS_TYPE_ESX: host.UnloadDriver(tc.os, node) tc.driver_changed = True if __load_esxi_driver(node, node_os, manifest_file) == api.types.status.SUCCESS: api.Logger.info("Release Driver %s reload on %s" % (tc.target_version, node)) else: api.Logger.error("Failed to load release driver %s reload on %s" % (tc.target_version, node)) return api.types.status.FAILURE api.RestartNodes([node])
def bsd_ethtool_rx_sg_size_cmd(node, intf, size): args = { } args['hw.ionic.rx_sg_size'] = size if api.IsNaplesNode(node): host.UnloadDriver(host.OS_TYPE_BSD, node) cmds = naples.InsertIonicDriverCommands(os_type = host.OS_TYPE_BSD, **args) return cmds return " "
def Trigger(tc): if tc.os != host.OS_TYPE_BSD: api.Logger.info("Not implemented") return api.types.status.IGNORED # Unload ionic and ionic_fw for node in tc.nodes: host.UnloadDriver(tc.os, node, "all") host.UnloadDriver(tc.os, node, "ionic_fw") for node in tc.nodes: req = api.Trigger_CreateExecuteCommandsRequest(serial=True) # XXX: Find the Naples_fw.tar version. api.Trigger_AddHostCommand( req, node, "kenv hw.ionic.fw_update_ver=FILL_FW_VERSION") resp = api.Trigger(req) if resp is None: api.Logger.info("Failed kenv hw.ionic.fw_update_ver=X") return api.types.status.FAILURE if host.LoadDriver(tc.os, node) is api.types.status.FAILURE: api.Logger.info("ionic already loaded") return api.types.status.FAILURE if LoadFwDriver(tc.os, node) is api.types.status.FAILURE: return api.types.status.FAILURE for i in api.GetNaplesHostInterfaces(node): # # In local testing, this step completes in 35-40s, but the default # timeout is 30s. Therefore, increase the timeout to 60s. # # The iota logs may contain messages such as "CHECK_ERR: Nicmgr # crashed for host: node2?" Please note, this is due to finding # the string "fw heartbeat stuck" in the host dmesg. This is # currently the expected behavior when doing fw update. If nicmgr # does crash, than expect subsequent tests to fail, otherwise the # CHECK_ERR message in the iota test logs may be ignored. # api.Trigger_AddHostCommand(req, node, "sysctl dev.%s.fw_update=1" % host.GetNaplesSysctl(i), timeout=60) tc.resp = api.Trigger(req) return api.types.status.SUCCESS
def do_lif_reset_test(node, os): for i in range(3): api.Logger.info("LIF reset and driver reload test loop %d" % i) if host.UnloadDriver(os, node, "all") is api.types.status.FAILURE: api.Logger.error("ionic unload failed loop %d" % i) return api.types.status.FAILURE if host.LoadDriver(os, node) is api.types.status.FAILURE: api.Logger.error("ionic load failed loop %d" % i) return api.types.status.FAILURE wl_api.ReAddWorkloads(node) if api.GetNaplesHostInterfaces(node) is None: api.Logger.error("No ionic interface after loop %d" % i) return api.types.status.FAILURE for intf in api.GetNaplesHostInterfaces(node): req = api.Trigger_CreateExecuteCommandsRequest(serial=True) vlan_list = getVlanList(node, intf) filter_list = getFilterList(node, intf) # Single LIF reset api.Trigger_AddHostCommand( req, node, "sysctl dev.%s.reset=1" % (host.GetNaplesSysctl(intf))) resp = api.Trigger(req) time.sleep(5) vlan_list1 = getVlanList(node, intf) filter_list1 = getFilterList(node, intf) if vlan_list != vlan_list1: api.Logger.error( "VLAN list doesn't match for %s, before: %s after: %s" % (intf, str(vlan_list), str(vlan_list1))) return api.types.status.FAILURE if filter_list != filter_list1: api.Logger.error( "Filter list doesn't match for %s, before: %s after: %s" % (intf, str(filter_list), str(filter_list1))) return api.types.status.FAILURE api.Logger.info( "Success running LIF reset test on %s VLAN: %s, Filters; %s" % (intf, str(vlan_list), str(filter_list))) # Now stress test LIF reset for intf in api.GetNaplesHostInterfaces(node): req = api.Trigger_CreateExecuteCommandsRequest(serial=True) api.Trigger_AddHostCommand( req, node, "for ((i=0;i<10;i++)); do sysctl dev.%s.reset=1; done &" % (host.GetNaplesSysctl(intf))) # Some of LIF reset will fill fail since it will be running in background # with reload of driver. resp = api.Trigger(req) return api.types.status.SUCCESS
def Trigger(tc): loops = getattr(tc.args, "loops", 1) # TOOD: add traffic to the test case # first run through unload to init nodes for node in tc.nodes: host.UnloadDriver(tc.os, node, "all") # for every node, cycle through unload/load sequence for node in tc.nodes: for i in range(0, loops): if host.UnloadDriver(tc.os, node, "eth") is api.types.status.FAILURE: return api.types.status.FAILURE if host.LoadDriver(tc.os, node) is api.types.status.FAILURE: return api.types.status.FAILURE return api.types.status.SUCCESS
def bsd_ethtool_queue_size_cmd(node, intf,queue_type, size): args = { } if queue_type == "tx": args['hw.ionic.max_queues'] = size else: args['hw.ionic.max_queues'] = size if api.IsNaplesNode(node): host.UnloadDriver(host.OS_TYPE_BSD, node) cmds = naples.InsertIonicDriverCommands(os_type = host.OS_TYPE_BSD, **args) return cmds return " " #.join(["ethtool", "-L", intf, queue_type, str(size)])
def bsd_legacy_intr_mode_cmd(node, intf, op): args = { } if op == "on": args['hw.ionic.enable_msix'] = 0 else: args['hw.ionic.enable_msix'] = 1 if api.IsNaplesNode(node): host.UnloadDriver(host.OS_TYPE_BSD, node) cmds = naples.InsertIonicDriverCommands(os_type = host.OS_TYPE_BSD, **args) return cmds return " "
def Teardown(tc): hostname = tc.host os_name = api.GetNodeOs(hostname) # restore the workloads on the one host we tested if host.UnloadDriver(os_name, hostname, "eth") is api.types.status.FAILURE: return api.types.status.FAILURE if host.LoadDriver(os_name, hostname) is api.types.status.FAILURE: return api.types.status.FAILURE hw_config.ReAddWorkloads(hostname) return api.types.status.SUCCESS
def Verify(tc): hostname = tc.nodes[0] # restore the workloads on the one host we tested if host.UnloadDriver(tc.os, hostname, "eth") is api.types.status.FAILURE: return api.types.status.FAILURE if host.LoadDriver(tc.os, hostname) is api.types.status.FAILURE: return api.types.status.FAILURE bringup.ReAddWorkloads(hostname) return api.types.status.SUCCESS
def __load_linux_driver(node, node_os, manifest_file): image_manifest = parser.JsonParse(manifest_file) driver_images = list(filter(lambda x: x.OS == node_os, image_manifest.Drivers))[0].Images[0] if driver_images is None: api.Logger.error("Unable to load image manifest") return api.types.status.FAILURE drImgFile = os.path.join(Gl, driver_images.drivers_pkg) api.Logger.info("Fullpath for driver image: " + drImgFile) resp = api.CopyToHost(node, [drImgFile], "") if not api.IsApiResponseOk(resp): api.Logger.error("Failed to copy %s" % drImgFile) return api.types.status.FAILURE rundir = os.path.basename(driver_images.drivers_pkg).split('.')[0] req = api.Trigger_CreateExecuteCommandsRequest(serial = True) api.Trigger_AddHostCommand(req, node, "tar -xf " + os.path.basename(driver_images.drivers_pkg)) api.Trigger_AddHostCommand(req, node, "./build.sh", rundir=rundir) resp = api.Trigger(req) if not api.IsApiResponseOk(resp): api.Logger.error("TriggerCommand for driver build failed") return api.types.status.FAILURE for cmd in resp.commands: if cmd.exit_code != 0 and cmd.command != './build.sh': # Build.sh could fail -ignored (FIXME) api.Logger.error("Failed to exec cmds to build/load new driver") return api.types.status.FAILURE api.Logger.info("New driver image is built on target host. Prepare to load") if host.UnloadDriver(node_os, node) != api.types.status.SUCCESS: api.Logger.error("Failed to unload current driver - proceeding") req = api.Trigger_CreateExecuteCommandsRequest(serial = True) if node_os == OS_TYPE_LINUX: api.Trigger_AddHostCommand(req, node, "insmod " + os.path.join(rundir, "drivers/eth/ionic/ionic.ko")) elif node_os == OS_TYPE_BSD: api.Trigger_AddHostCommand(req, node, "kldload " + os.path.join(rundir, "drivers/eth/ionic/ionic.ko")) resp = api.Trigger(req) if not api.Trigger_IsSuccess(resp): api.Logger.error("TriggerCommand for driver installation failed") return api.types.status.FAILURE return api.types.status.SUCCESS
def Trigger(tc): if tc.os != host.OS_TYPE_BSD and tc.os != host.OS_TYPE_LINUX: api.Logger.info("Not implemented for %s" % tc.os) return api.types.status.IGNORED loops = getattr(tc.args, "loops", 1) # TOOD: add traffic to the test case # first run through unload to init nodes for node in tc.nodes: host.UnloadDriver(tc.os, node, "all") # for every node, cycle through unload/load sequence for node in tc.nodes: for i in range(0, loops): if host.UnloadDriver(tc.os, node, "eth") is api.types.status.FAILURE: return api.types.status.FAILURE if host.LoadDriver(tc.os, node) is api.types.status.FAILURE: return api.types.status.FAILURE return api.types.status.SUCCESS
def Teardown(tc): if tc.os != host.OS_TYPE_BSD: api.Logger.info("Not implemented") return api.types.status.IGNORED # for every node, cycle through unload/load sequence for node in tc.nodes: if host.UnloadDriver(tc.os, node, "eth") is api.types.status.FAILURE: return api.types.status.FAILURE if host.LoadDriver(tc.os, node) is api.types.status.FAILURE: return api.types.status.FAILURE # this is required to bring the testbed into operation state; # after driver unload interfaces need to be initialized wl_api.ReAddWorkloads(node) return api.types.status.SUCCESS
def Teardown(tc): # Clear the ip addr on the remote interface ClearIps(tc.host2, tc.pf_2) # Delte created VFs DeleteVFs(tc.host1, tc.pf_1) for hostname in [tc.host1, tc.host2]: os_name = api.GetNodeOs(hostname) # restore the workloads on the one host we tested if host.UnloadDriver(os_name, hostname, "eth") is api.types.status.FAILURE: return api.types.status.FAILURE if host.LoadDriver(os_name, hostname) is api.types.status.FAILURE: return api.types.status.FAILURE hw_config.ReAddWorkloads(hostname) return api.types.status.SUCCESS
def Trigger(tc): result = api.types.status.SUCCESS for node in tc.nodes: host.UnloadDriver(api.GetNodeOs(node), node, "all") if api.GetNodeOs(node) != "linux": api.Logger.error("Expected Linux node") result = api.types.status.FAILURE continue # Copy rpm tar to host resp = api.CopyToHost(node, [LOCAL_TAR], "") if resp is None: api.Logger.error("Failed to copy RPMs") result = api.types.status.FAILURE continue # untar the RPM archive cmd = f"tar -xvf linux-rpms.tar.xz" if send_command(tc, cmd, node) is api.types.status.FAILURE: result = api.types.status.FAILURE continue # Copy the script resp = api.CopyToHost(node, [SCRIPT_PWD], "") if resp is None: api.Logger.info("Failed to copy RPMs") result = api.types.status.FAILURE continue # run the test script cmd = f"sudo ./{SCRIPT}" if send_command(tc, cmd, node) is api.types.status.FAILURE: result = api.types.status.FAILURE continue return result
def Trigger(tc): errcnt = 0 tc.intf_id = {} # assume that the driver is loaded for node in api.GetNaplesHostnames(): errcnt += testLoadedMulticast(node, 2) errcnt += testLoadedLifs(tc, node) for node in api.GetNaplesHostnames(): # set up background tests # each of these seems to take about 4 seconds, so be sure to # increase the command delays for each when another is added prepUnloadedMulticastTest(node) prepUnloadedLifs(tc, node) # unload driver and give time for the background tests to finish api.Logger.info("Start driver unload on " + node) if host.UnloadDriver(tc.os, node, "eth") is api.types.status.FAILURE: return api.types.status.FAILURE time.sleep(10) # restart the drivers and gather the results if host.LoadDriver(tc.os, node) is api.types.status.FAILURE: return api.types.status.FAILURE wl_api.ReAddWorkloads(node) api.Logger.info("Driver reload on " + node) errcnt += gatherUnloadedMulticastTest(node) errcnt += gatherUnloadedLifs(tc, node) if errcnt != 0: return api.types.status.FAILURE return api.types.status.SUCCESS
def Setup(tc): #map iterators from IOTA infra to the enum for the toeplitz utilitys iterators = {'tcp4' : RSS.IPV4_TCP, 'udp4':RSS.IPV4_UDP, 'tcp6' : RSS.IPV6_TCP, \ 'udp6':RSS.IPV6_UDP, 'ip6':RSS.IPV6, 'ip4tcp':RSS.IPV4, 'ip4udp':RSS.IPV4, 'ip6tcp':RSS.IPV6, 'ip6udp':RSS.IPV6} tc.rss_enum = iterators.get(tc.iterators.rxflowhash, "none") # iperf options for iterators # IPv4 vs IPv6 iterator ip_proto_iterators = {'tcp4' : 'v4', 'udp4':'v4', 'tcp6' : 'v6', 'udp6':'v6', 'ip4tcp':'v4', \ 'ip4udp':'v4', 'ip6tcp':'v6', 'ip6udp':'v6'} tc.tc_ip_proto = ip_proto_iterators.get(tc.iterators.rxflowhash, "none") # UDP vs TCP iterator proto_iterators = {'tcp4' : 'tcp', 'udp4':'udp', 'tcp6':'tcp', 'udp6':'udp', 'ip4tcp':'tcp', 'ip4udp':'udp', 'ip6tcp':'tcp', 'ip6udp':'udp'} tc.proto = proto_iterators.get(tc.iterators.rxflowhash, "none") if tc.proto == "none" or tc.tc_ip_proto == "none " or tc.rss_enum == "none": api.Logger.error(f"Not able to map the iterators. {tc.iterators.rxflowhash} {tc.tc_ip_proto} {tc.proto} {tc.rss_enum}") return api.types.status.FAILURE # number of sessions iterator tc.num_sessions = int(getattr(tc.args, "num_sessions", 1)) # log which iterration is in progress: api.Logger.info(f"==================== %s ====================" % tc.rss_enum) api.Logger.info(f"ip_proto:{tc.tc_ip_proto}, proto: {tc.proto}, rss: {tc.iterators.rss}, iperf_sessions: {tc.num_sessions}") tc.nodes = api.GetWorkloadNodeHostnames() tc.os = api.GetNodeOs(tc.nodes[0]) if tc.os == 'freebsd': return api.types.status.SUCCESS # Identify the receiving node(testing RSS) # This will be the client node for iPerf # All configuration, and testing and verification will be done on this node for n in tc.nodes: if api.IsNaplesNode(n): api.Logger.info(f"Found Naples Node: [{n}]") receive_node = n break else: api.Logger.error(f"Failed to find a Naples Node!") return api.types.status.FAILURE # Get workload pars for iperf sessions workload_pairs = api.GetRemoteWorkloadPairs() if not workload_pairs: api.Logger.info("Skipping Testcase due to no workload pairs.") tc.skip = True # assign client/server node based on selected Receiving Node for pair in workload_pairs: if receive_node == pair[0].node_name: tc.client = pair[1] tc.server = pair[0] else: tc.client = pair[0] tc.server = pair[1] break # unload driver, to clear stats (server node only) # Re-add workloads if host.UnloadDriver(tc.os, tc.server.node_name, "all") is api.types.status.FAILURE: return api.types.status.FAILURE if host.LoadDriver(tc.os, tc.server.node_name) is api.types.status.FAILURE: return api.types.status.FAILURE wl_api.ReAddWorkloads(tc.server.node_name) if tc.tc_ip_proto == 'v6': tc.server_ip = ipaddress.ip_address(tc.server.ipv6_address) tc.server_ip = str(tc.server_ip.exploded) tc.client_ip = ipaddress.ip_address(tc.client.ipv6_address) tc.client_ip = str(tc.client_ip.exploded) else: tc.server_ip = tc.server.ip_address tc.client_ip = tc.client.ip_address return api.types.status.SUCCESS