def Setup(tc): api.Logger.info("RDMA Driver LIF Reset") tc.nodes = api.GetNaplesHostnames() tc.os = api.GetNodeOs(tc.nodes[0]) if tc.os not in [host.OS_TYPE_BSD, host.OS_TYPE_LINUX]: api.Logger.info("Not implemented") return api.types.status.IGNORED tc.stg1 = {} tc.stg2 = {} tc.intfs = [] for node in tc.nodes: for intf in api.GetNaplesHostInterfaces(node): tc.intfs.append((node,intf)) if tc.os == host.OS_TYPE_LINUX: # Cache this to save time tc.pci = {} for node in tc.nodes: for intf in api.GetNaplesHostInterfaces(node): pci = host.GetNaplesPci(node, intf) if pci is None: api.Logger.warn("%s %s couldn't find PCI device" % ( node, intf)) tc.pci[(node,intf)] = pci return api.types.status.SUCCESS
def do_lif_reset_test(node, os): for i in range(3): api.Logger.info("LIF reset and driver reload test loop %d" % i) if host.UnloadDriver(os, node, "all") is api.types.status.FAILURE: api.Logger.error("ionic unload failed loop %d" % i) return api.types.status.FAILURE if host.LoadDriver(os, node) is api.types.status.FAILURE: api.Logger.error("ionic load failed loop %d" % i) return api.types.status.FAILURE wl_api.ReAddWorkloads(node) if api.GetNaplesHostInterfaces(node) is None: api.Logger.error("No ionic interface after loop %d" % i) return api.types.status.FAILURE for intf in api.GetNaplesHostInterfaces(node): req = api.Trigger_CreateExecuteCommandsRequest(serial=True) vlan_list = getVlanList(node, intf) filter_list = getFilterList(node, intf) # Single LIF reset api.Trigger_AddHostCommand( req, node, "sysctl dev.%s.reset=1" % (host.GetNaplesSysctl(intf))) resp = api.Trigger(req) time.sleep(5) vlan_list1 = getVlanList(node, intf) filter_list1 = getFilterList(node, intf) if vlan_list != vlan_list1: api.Logger.error( "VLAN list doesn't match for %s, before: %s after: %s" % (intf, str(vlan_list), str(vlan_list1))) return api.types.status.FAILURE if filter_list != filter_list1: api.Logger.error( "Filter list doesn't match for %s, before: %s after: %s" % (intf, str(filter_list), str(filter_list1))) return api.types.status.FAILURE api.Logger.info( "Success running LIF reset test on %s VLAN: %s, Filters; %s" % (intf, str(vlan_list), str(filter_list))) # Now stress test LIF reset for intf in api.GetNaplesHostInterfaces(node): req = api.Trigger_CreateExecuteCommandsRequest(serial=True) api.Trigger_AddHostCommand( req, node, "for ((i=0;i<10;i++)); do sysctl dev.%s.reset=1; done &" % (host.GetNaplesSysctl(intf))) # Some of LIF reset will fill fail since it will be running in background # with reload of driver. resp = api.Trigger(req) return api.types.status.SUCCESS
def Trigger(tc): fail = 0 pairs = api.GetRemoteWorkloadPairs() hosts = pairs[0] for host in hosts: if not host.IsNaples(): continue api.Logger.info("Checking host %s" % host.node_name) lif_info = __getLifInfo(host.node_name) intfs = api.GetNaplesHostInterfaces(host.node_name) for intf in intfs: api.Logger.info("Checking interface %s" % intf) if lif_info.find(intf) == -1: api.Logger.info("interface %s not found" % intf) fail += 1 if fail != 0: return api.types.status.FAILURE return api.types.status.SUCCESS
def Trigger(tc): if tc.os == naples_host.OS_TYPE_ESX: api.Logger.info("Not implemented for %s" % tc.os) return api.types.status.IGNORED fail = 0 pairs = api.GetLocalWorkloadPairs() hosts = pairs[0] for host in hosts: if not host.IsNaples(): continue api.Logger.info("Checking host %s" % host.node_name) lif_info = __getLifInfo(host.node_name) intfs = api.GetNaplesHostInterfaces(host.node_name) for intf in intfs: # Windows HAL interface name is translated two times. if tc.os == naples_host.OS_TYPE_WINDOWS: intf = ionic_utils.winHalIntfName(host.node_name, intf) # HAL yaml o/p is Pen..Adapter, halctl cli op/ is pen..adap. intf = intf.lower() if lif_info.find(intf) == -1: api.Logger.error("interface %s not found" % intf) fail += 1 if fail != 0: return api.types.status.FAILURE return api.types.status.SUCCESS
def Trigger(tc): if tc.os != host.OS_TYPE_BSD: api.Logger.info("test not supported yet for os %s" %tc.os) return api.types.status.SUCCESS req = api.Trigger_CreateExecuteCommandsRequest(serial = True) tc.cmd_cookies = [] for n in tc.nodes: intfs = api.GetNaplesHostInterfaces(n) for i in intfs: api.Logger.info("getting Media status info from %s" % i) api.Trigger_AddHostCommand(req, n, "sysctl dev.%s.media_status" % host.GetNaplesSysctl(i)) tc.cmd_cookies.append('before') api.Trigger_AddHostCommand(req, n, "sysctl dev.%s.reset=1" % host.GetNaplesSysctl(i)) api.Trigger_AddHostCommand(req, n, "sysctl dev.%s.media_status" % host.GetNaplesSysctl(i)) tc.cmd_cookies.append('after') tc.resp = api.Trigger(req) if tc.resp == None: return api.types.status.FAILURE for cmd in tc.resp.commands: if cmd.exit_code != 0: api.Logger.error("Failed to get media status info (check if cable is plugged in)") api.Logger.info(cmd.stderr) return api.types.status.FAILURE return api.types.status.SUCCESS
def Trigger(tc): tc.cmd_descr = "PCIE Fuzzing" for node in tc.nodes: pci_address = [] interfaces = api.GetNaplesHostInterfaces(node) # create a list of Naples PCI adresses for this node for interface in interfaces: cmd = "ethtool -i " + interface + "|awk -F \":\" '/bus-info/ { print $3 \":\" $4}'" api.Logger.info(f"On {node} run: {cmd}") if send_command(tc, cmd, node) is api.types.status.FAILURE: return api.types.status.FAILURE if (tc.resp.commands[0].stdout) is None: api.Logger.error( "ethtool command on {interface} returned null stdout") return api.types.status.FAILURE address = str.rstrip(tc.resp.commands[0].stdout) pci_address.append(address) api.Logger.info(pci_address) # unload drivers before wrting randomly to PCI host.UnloadDriver(api.GetNodeOs(node), node, "all") # run reg_write_random fuzzing on every Naples Eth PCI device if req_write_random(tc, node, pci_address) != api.types.status.SUCCESS: return api.types.status.FAILURE # run mem_write_randmo fuzzing on every Naples Eth PCI device if mem_write_random(tc, node) != api.types.status.SUCCESS: return api.types.status.FAILURE return api.types.status.SUCCESS
def Setup(tc): host = None for _node in api.GetNaplesHostnames(): if api.IsNaplesNode(_node) and api.GetNodeOs(_node) == "linux": host = _node break if not host: api.Logger.error("Unable to find a Naples node with linux os") return api.types.status.ERROR tc.host = host tc.pf = api.GetNaplesHostInterfaces(host)[0] tc.num_vfs = GetSupportedVFs(tc.host, tc.pf) api.Logger.info("Host %s PF %s supports %d VFs" % (tc.host, tc.pf, tc.num_vfs)) if tc.num_vfs == 0: api.Logger.warn( "Max supported VFs on host %s is 0, expected non-zero" % host) return api.types.status.ERROR if CreateVFs(tc.host, tc.pf, tc.num_vfs) != api.types.status.SUCCESS: return api.types.status.ERROR return api.types.status.SUCCESS
def gatherUnloadedMulticastTest(node): # Gather up the results of the background jobs and # verify that they found no multicast filters errcnt = 0 for intf in api.GetNaplesHostInterfaces(node): tfile = "/tmp/load_defaults_mc-" + intf + ".txt" req = api.Trigger_CreateExecuteCommandsRequest() cmd = "cat " + tfile api.Trigger_AddNaplesCommand(req, node, cmd) resp = api.Trigger(req) for cmd in resp.commands: if cmd.exit_code != 0: api.Logger.error ("halctl cmds to get interface " + intf + " multicast info FAILED!") api.Logger.error("cmd = " + cmdstr) api.Logger.info(cmd.stdout) api.Logger.info(cmd.stderr) errcnt += 1 return errcnt count = int(cmd.stdout) if count != 0: api.Logger.error("Host " + node + " interface " + intf + " should have 0 multicast filters, but has %d" % count) errcnt += 1 else: api.Logger.info("Host " + node + " interface " + intf + " has %d multicast filters as expected" % count) return errcnt
def Setup(tc): parse_args(tc) api.SetTestsuiteAttr("mfg_test_intf", tc.test_intf) api.SetTestsuiteAttr("mfg_mode", "yes") api.SetTestsuiteAttr("preinit_script_path", NAPLES_PREINIT_SCRIPT_PATH) api.SetTestsuiteAttr("start_agent_script_path", NAPLES_START_AGENT_SCRIPT_PATH) # get node info tc.bitw_node_name = None tc.wl_node_name = None bitw_node_nic_pairs = athena_app_utils.get_athena_node_nic_names() classic_node_nic_pairs = utils.get_classic_node_nic_pairs() # Only one node for single-nic topology tc.bitw_node_name = bitw_node_nic_pairs[0][0] tc.wl_node_name = classic_node_nic_pairs[0][0] host_intfs = api.GetNaplesHostInterfaces(tc.wl_node_name) # Assuming single nic per host if len(host_intfs) != 2: api.Logger.error('Failed to get host interfaces') return api.types.status.FAILURE up0_intf = host_intfs[0] up1_intf = host_intfs[1] workloads = api.GetWorkloads() if len(workloads) == 0: api.Logger.error('No workloads available') return api.types.status.FAILURE wl_vlans = [] for wl in workloads: if (wl.parent_interface == up0_intf and tc.test_intf == 'up0') or (wl.parent_interface == up1_intf and tc.test_intf == 'up1'): if wl.uplink_vlan != 0: # Tagged workload wl_vlans.append(wl.uplink_vlan) if len(wl_vlans) < NUM_MFG_TEST_VLANS: api.Logger.error('Failed to fetch %d tagged workloads for mfg test' ' on uplink %s' % (NUM_MFG_TEST_VLANS, tc.test_intf)) return api.types.status.FAILURE # generate start agent script with testbed vlans if gen_start_agent_script(wl_vlans) != api.types.status.SUCCESS: return api.types.status.FAILURE # copy preinit script and start agent script to naples preinit_filename = api.GetTopDir() + '/' + WS_PREINIT_SCRIPT_PATH start_agent_filename = api.GetTopDir( ) + '/' + WS_IOTA_START_AGENT_SCRIPT_PATH api.CopyToNaples(tc.bitw_node_name, [preinit_filename, start_agent_filename], "") os.remove(start_agent_filename) return api.types.status.SUCCESS
def Trigger(tc): hostname = tc.nodes[0] if tc.os != "linux": return api.types.status.SUCCESS fail = 0 for intf in api.GetNaplesHostInterfaces(hostname): pci = _getPci(hostname, intf) if pci == None or pci == "": api.Logger.warn("No PCI found for host %s interface %s" % (hostname, intf)) return api.types.status.SUCCESS api.Logger.info("Checking FLR on host %s interface %s pci %s" % (hostname, intf, pci)) ret = _unbindDriver(hostname, pci) if ret != api.types.status.SUCCESS: fail = fail + 1 continue ret = _triggerFLR(hostname, pci) if ret != api.types.status.SUCCESS: fail = fail + 1 continue ret = _bindDriver(hostname, pci) if ret != api.types.status.SUCCESS: fail = fail + 1 continue if fail != 0: return api.types.status.FAILURE return api.types.status.SUCCESS
def Trigger(tc): req = api.Trigger_CreateExecuteCommandsRequest(serial = True) for n in tc.nodes: intfs = api.GetNaplesHostInterfaces(n) for i in intfs: api.Logger.info("Get Info of the Interface: %s" % i) if tc.os == host.OS_TYPE_LINUX: api.Trigger_AddHostCommand(req, n, "ethtool -i %s" % i) elif tc.os == host.OS_TYPE_BSD: # FreeBSD doesn't have a command # TODO: In verification we will need to do dmesg to extract same info api.types.status.SUCCESS else: return api.types.status.FAILURE tc.resp = api.Trigger(req) if tc.resp == None: return api.types.status.FAILURE for cmd in tc.resp.commands: if cmd.exit_code != 0: api.Logger.error ("ethtool to get interface info FAILED!") api.Logger.info(cmd.stderr) return api.types.status.FAILURE return api.types.status.SUCCESS
def GetHostLifInterface(tc): tc.host_lifs = {} for node in tc.nodes: tc.host_lifs[node] = [] ret, lif_info_yaml = GetLifInfo(node) node_workloads = api.GetWorkloads(node) intfs = api.GetNaplesHostInterfaces(node) #api.Logger.info("Host Intfs for Node: %s, intf: %s"%(node, intfs)) for intf in intfs: if IsHostLifIntf(lif_info_yaml, intf): wl_for_intf = "" for wl in node_workloads: if wl.interface == intf: wl_for_intf = wl.workload_name break intf_wl = {intf: wl_for_intf} api.Logger.info( "Retrieving Host Intfs for Node: %s, intf/wl: %s" % (node, intf_wl)) tc.host_lifs[node].append(intf_wl) #api.Logger.info("Retrieved Host Lifs: %s"%(tc.host_lifs)) #for key, items in tc.host_lifs.items(): # api.Logger.info("Node: %s, HostLifs(intf:wl) --> %s"%(key, items)) return api.types.status.SUCCESS
def Trigger(tc): if tc.os != naples_host.OS_TYPE_BSD and tc.os != naples_host.OS_TYPE_LINUX: api.Logger.info("Not implemented for %s" % tc.os) return api.types.status.IGNORED fail = 0 pairs = api.GetLocalWorkloadPairs() hosts = pairs[0] for host in hosts: if not host.IsNaples(): continue api.Logger.info("Checking host %s" % host.node_name) lif_info = __getLifInfo(host.node_name) intfs = api.GetNaplesHostInterfaces(host.node_name) for intf in intfs: api.Logger.info("Checking interface %s" % intf) if lif_info.find(intf) == -1: api.Logger.info("interface %s not found" % intf) fail += 1 if fail != 0: return api.types.status.FAILURE return api.types.status.SUCCESS
def GetNaplesHostInterfacesList(naples_node, device_name=None): #GetNaplesHostInterfaces API returns only ETH_HOST interfaces eth_host_intfs = list(api.GetNaplesHostInterfaces(naples_node, device_name)) #GetHostInternalMgmtInterfaces API returns only ETH_HOST_MGMT interfaces eth_host_mgmt_intfs = naples_host_utils.GetHostInternalMgmtInterfaces( naples_node, device_name) host_intf_list = eth_host_intfs + eth_host_mgmt_intfs return host_intf_list
def Trigger(tc): names = api.GetNaplesHostnames() hostname = names[0] if api.GetNodeOs(hostname) != host.OS_TYPE_LINUX: return api.types.status.SUCCESS for intf in api.GetNaplesHostInterfaces(hostname): api.Logger.info("Checking event queue use on host %s interface %s" % (hostname, intf)) pci = host.GetNaplesPci(hostname, intf) if pci is None: return api.types.status.FAILURE # get eth_eq_count and number of eq interrupts req = api.Trigger_CreateExecuteCommandsRequest(serial=True) cmd = "awk '/eth_eq_count/ {print $2}' < /sys/kernel/debug/ionic/" + pci + "/identity" api.Trigger_AddHostCommand(req, hostname, cmd) cmd = "grep -c -e 'ionic-" + pci + "-eq' /proc/interrupts" api.Trigger_AddHostCommand(req, hostname, cmd) resp = api.Trigger(req) if resp is None: api.Logger.error("Failed to get values from host %s interface %s" % (hostname, intf)) return api.types.status.FAILURE cmd = resp.commands.pop() if cmd.exit_code > 1: # exit code 1 from grep is "string not found", which is a valid answer here api.Logger.error( "Failed to get eth_eq_count from host %s interface %s" % (hostname, intf)) api.PrintCommandResults(cmd) return api.types.status.FAILURE eth_eq_count = int(cmd.stdout.strip()) cmd = resp.commands.pop() if cmd.exit_code != 0: api.Logger.error( "Failed to get interrupt count from host %s interface %s" % (hostname, intf)) api.PrintCommandResults(cmd) return api.types.status.FAILURE intr_count = int(cmd.stdout.strip()) api.Logger.info( "Found eth_eq_count %d and interrupt count %d from host %s interface %s" % (eth_eq_count, intr_count, hostname, intf)) if eth_eq_count == 0 and intr_count != 0: api.Logger.error("eq interrupts found when eth_eq_count == 0") return api.types.status.FAILURE elif eth_eq_count != 0 and intr_count == 0: api.Logger.error("No eq interrupts found when eth_eq_count != 0") return api.types.status.FAILURE return api.types.status.SUCCESS
def Trigger(tc): req = api.Trigger_CreateExecuteCommandsRequest(serial=True) if tc.os != host.OS_TYPE_BSD and tc.os != host.OS_TYPE_LINUX: api.Logger.info("Not implemented for %s" % tc.os) return api.types.status.IGNORED # set interrupt coalescing value for node in tc.nodes: interfaces = api.GetNaplesHostInterfaces(node) for interface in interfaces: api.Logger.info("Set Interrupt Coalescing on %s:%s to %d" % \ (node, interface, \ tc.iterators.coales_interval)) if tc.os == 'linux': api.Trigger_AddHostCommand(req, node, "ethtool -C %s rx-usecs %d" % \ (interface, \ tc.iterators.coales_interval)) elif tc.os == 'freebsd': api.Trigger_AddHostCommand(req, node, "sysctl dev.%s.intr_coal=%d" % \ (host.GetNaplesSysctl(interface), \ tc.iterators.coales_interval)) tc.resp = api.Trigger(req) if tc.resp is None: api.Logger.error("Command failed to respond") return api.types.status.FAILURE # validate the command response # for > than max, expect an error and a specific message for cmd in tc.resp.commands: if tc.iterators.coales_interval < tc.args.max_coales_interval: if cmd.exit_code != 0: #linux ethtool will not set the value if same as current if cmd.stderr.find("unmodified, ignoring") == -1: api.Logger.error("Failed to set interrupt coalescing") api.Logger.info(cmd.stderr) return api.types.status.FAILURE else: if tc.os == 'linux': if cmd.stderr.find("out of range") == -1: api.Logger.error("ionic did not error when coales value set (%d) > than supported (%d)" \ %(tc.iterators.coales_interval, tc.args.max_coales_interval)) api.Logger.info(cmd.stderr) return api.types.status.FAILURE elif tc.os == 'freebsd': if cmd.stderr.find("large") == -1: api.Logger.error( "ionic did not error when coales value set > than supported" ) api.Logger.info(cmd.stderr) return api.types.status.FAILURE return api.types.status.SUCCESS
def Verify(tc): req = api.Trigger_CreateExecuteCommandsRequest(serial=True) coales_period = tc.args.coales_period if tc.resp is None: api.Logger.error("Command failed to respond") return api.types.status.FAILURE # get the current coalescing value from FW/Driver for n in tc.nodes: intfs = api.GetNaplesHostInterfaces(n) api.Logger.info("Retrieve coalescing value from interfaces") for i in intfs: if tc.os == 'linux': api.Trigger_AddHostCommand(req, n, "ethtool -c %s" % i) elif tc.os == 'freebsd': api.Trigger_AddHostCommand(req, n, "sysctl dev.%s.curr_coal_us" % \ (host.GetNaplesSysctl(i))) tc.resp = api.Trigger(req) if tc.resp is None: api.Logger.error("Command failed to respond") return api.types.status.FAILURE # expecting the following value back from FW/Driver if tc.os == 'linux': # linux driver returns coalescing interval as uSecs # 3 is Naples interrupt period current_coalescing = str(int(tc.iterators.coales_interval/coales_period) \ *coales_period) elif tc.os == 'freebsd': # freebsd returns coalescing value, same as # what user programmed. current_coalescing = str(int(tc.iterators.coales_interval)) for cmd in tc.resp.commands: if cmd.exit_code != 0: api.Logger.error("Failed to read interrupt coalescing value") api.Logger.info(cmd.stderr) return api.types.status.FAILURE # for all values < max, validate returned value if tc.iterators.coales_interval < tc.args.max_coales_interval: api.Logger.info("Expecting Coalescing Value: ", current_coalescing) if cmd.stdout.find(current_coalescing) == -1: api.Logger.info("Failed to set coalescing value") api.PrintCommandResults(cmd) return api.types.status.FAILURE return api.types.status.SUCCESS
def testLoadedMulticast(node, expected_mc): # For each interface on the given node, check that they # have the expected number of multicast filters. errcnt = 0 for intf in api.GetNaplesHostInterfaces(node): mc = countMulticast(node, intf) if mc != expected_mc: errcnt = errcnt + 1 api.Logger.error("Host " + node + " interface " + intf + " should have %d multicast filters, but has %d" % (expected_mc, mc)) else: api.Logger.info("Host " + node + " interface " + intf + " has %d multicast filters as expected" % mc) return errcnt
def do_bsd_lif_resets(): nodes = api.GetNaplesHostnames() naples_node = nodes[0] if api.GetNodeOs(naples_node) != utils.OS_TYPE_BSD: return 0 for intf in api.GetNaplesHostInterfaces(naples_node): api.Logger.info("LIF reset %s interface %s" % (naples_node, intf)) status = utils.BsdLifReset(naples_node, intf) if status: return status return 0
def __config_max_mtu_on_host_intfs(): #req = api.Trigger_CreateAllParallelCommandsRequest() req = api.Trigger_CreateExecuteCommandsRequest() nodes = api.GetNaplesHostnames() for node in nodes: if api.GetNodeOs(node) == "esx": return True intf_list = api.GetNaplesHostInterfaces(node) api.Logger.verbose("Setting max mtu on %s in %s" % (intf_list, node)) for intf in intf_list: mtu_cmd = __get_mtu_cfg_cmd(node, intf) api.Trigger_AddHostCommand(req, node, mtu_cmd) resp = api.Trigger(req) return __verify_response(resp)
def Setup(tc): tc.skip = False node_names = api.GetWorkloadNodeHostnames() if api.IsNaplesNode(node_names[0]): tc.naples_node = node_names[0] tc.peer_node = node_names[1] elif api.IsNaplesNode(node_names[1]): tc.naples_node = node_names[1] tc.peer_node = node_names[0] else: api.Logger.verbose("Skipping as there are no Naples nodes") tc.skip = True return api.types.status.IGNORED tc.on_host = {} tc.host_intfs = list(api.GetNaplesHostInterfaces(tc.naples_node)) for intf in tc.host_intfs: tc.on_host[intf] = True # Mgmt interface on host for network connection to Naples over PCIE tc.host_int_intfs = naples_host_utils.GetHostInternalMgmtInterfaces( tc.naples_node) for intf in tc.host_int_intfs: tc.on_host[intf] = True tc.inband_intfs = naples_host_utils.GetNaplesInbandInterfaces( tc.naples_node) for intf in tc.inband_intfs: tc.on_host[intf] = False tc.naples_int_mgmt_intfs = naples_host_utils.GetNaplesInternalMgmtInterfaces( tc.naples_node) for intf in tc.naples_int_mgmt_intfs: tc.on_host[intf] = False tc.naples_oob_mgmt_intfs = naples_host_utils.GetNaplesOobInterfaces( tc.naples_node) for intf in tc.naples_oob_mgmt_intfs: tc.on_host[intf] = False tc.all_intfs = tc.host_intfs + tc.host_int_intfs + tc.inband_intfs + tc.naples_int_mgmt_intfs + tc.naples_oob_mgmt_intfs api.Logger.debug("Promiscuous test interfaces: ", tc.all_intfs) return api.types.status.SUCCESS
def Trigger(tc): if tc.os != host.OS_TYPE_BSD: api.Logger.info("Not implemented") return api.types.status.IGNORED # Unload ionic and ionic_fw for node in tc.nodes: host.UnloadDriver(tc.os, node, "all") host.UnloadDriver(tc.os, node, "ionic_fw") for node in tc.nodes: req = api.Trigger_CreateExecuteCommandsRequest(serial=True) # XXX: Find the Naples_fw.tar version. api.Trigger_AddHostCommand( req, node, "kenv hw.ionic.fw_update_ver=FILL_FW_VERSION") resp = api.Trigger(req) if resp is None: api.Logger.info("Failed kenv hw.ionic.fw_update_ver=X") return api.types.status.FAILURE if host.LoadDriver(tc.os, node) is api.types.status.FAILURE: api.Logger.info("ionic already loaded") return api.types.status.FAILURE if LoadFwDriver(tc.os, node) is api.types.status.FAILURE: return api.types.status.FAILURE for i in api.GetNaplesHostInterfaces(node): # # In local testing, this step completes in 35-40s, but the default # timeout is 30s. Therefore, increase the timeout to 60s. # # The iota logs may contain messages such as "CHECK_ERR: Nicmgr # crashed for host: node2?" Please note, this is due to finding # the string "fw heartbeat stuck" in the host dmesg. This is # currently the expected behavior when doing fw update. If nicmgr # does crash, than expect subsequent tests to fail, otherwise the # CHECK_ERR message in the iota test logs may be ignored. # api.Trigger_AddHostCommand(req, node, "sysctl dev.%s.fw_update=1" % host.GetNaplesSysctl(i), timeout=60) tc.resp = api.Trigger(req) return api.types.status.SUCCESS
def Trigger(tc): if tc.os != host.OS_TYPE_BSD: api.Logger.info("Not implemented") return api.types.status.IGNORED for node in tc.nodes: req = api.Trigger_CreateExecuteCommandsRequest(serial=True) for i in api.GetNaplesHostInterfaces(node): api.Trigger_AddHostCommand( req, node, 'sysctl dev.%s.reset_stats=1' % host.GetNaplesSysctl(i)) api.Trigger_AddHostCommand( req, node, 'sysctl dev.%s | grep -v ": 0"' % host.GetNaplesSysctl(i)) tc.resp = api.Trigger(req) return api.types.status.SUCCESS
def Main(tc): for node in api.GetNaplesHostnames(): ionic_utils.checkForIonicError(node) for dev_name in api.GetDeviceNames(node): req = api.Trigger_CreateExecuteCommandsRequest(serial=True) for i in api.GetNaplesHostInterfaces(node, dev_name): os = api.GetNodeOs(node) if os == host.OS_TYPE_BSD: api.Trigger_AddHostCommand( req, node, "bash " + IONIC_STATS_FILE + " -i %s -c" % (host.GetNaplesSysctl(i))) # Clear the stats. api.Trigger_AddHostCommand( req, node, 'sysctl dev.%s.reset_stats=1 1>/dev/null' % host.GetNaplesSysctl(i)) elif os == host.OS_TYPE_WINDOWS: intf = workload_api.GetNodeInterface(node, dev_name) name = intf.WindowsIntName(i) api.Trigger_AddHostCommand( req, node, "/mnt/c/Windows/temp/drivers-windows/IonicConfig.exe portstats -n '%s'" % name) else: api.Trigger_AddHostCommand( req, node, 'ethtool -S %s | grep packets' % i) resp = api.Trigger(req) if resp is None: api.Logger.error( "Failed to get stats for %s, is driver loaded?" % i) return api.types.status.FAILURE for cmd in resp.commands: if cmd.exit_code == 0: if cmd.stdout: #Log for debugging for now. api.Logger.info("Stats output for %s: %s" % (i, cmd.stdout)) else: api.Logger.error("Command failed to run: %s" % cmd.stderr) return api.types.status.FAILURE return api.types.status.SUCCESS
def Main(step): nodes = api.GetWorkloadNodeHostnames() req = api.Trigger_CreateExecuteCommandsRequest(serial=False) for n in nodes: intfs = api.GetNaplesHostInterfaces(n) for i in intfs: api.Logger.info("Enable l2-fwd-offload on intf %s" % i) api.Trigger_AddHostCommand(req, n, "ethtool -K %s l2-fwd-offload on" % i) resp = api.Trigger(req) if resp == None: return api.types.status.FAILURE else: for cmd in resp.commands: if cmd.exit_code != 0: api.Logger.info("Enable l2-fwd-offload FAILED!") api.Logger.info(cmd.stderr) return api.types.status.FAILURE return api.types.status.SUCCESS
def prepUnloadedLifs(tc, node): # Set up 'halctl show lifs' output with delay req = api.Trigger_CreateExecuteCommandsRequest() for intf in api.GetNaplesHostInterfaces(node): tfile = "/tmp/load_defaults_lif-" + intf + ".txt" ii = node + intf cmdstr = "sleep 7; /nic/bin/halctl show lif | grep ^" + tc.intf_id[ii] + " > " + tfile api.Trigger_AddNaplesCommand(req, node, cmdstr, background=True) resp = api.Trigger(req) for cmd in resp.commands: if cmd.exit_code != 0: api.Logger.error ("prep of halctl cmds to get interface " + intf + " lif info FAILED!") api.Logger.error("cmd = " + cmdstr) api.Logger.info(cmd.stdout) api.Logger.info(cmd.stderr) return -1
def gatherUnloadedLifs(tc, node): # Gather 'halctl show lifs' output and check that # the intf name doesn't show in the name column, # and that VStrip and VIns are false errcnt = 0 for intf in api.GetNaplesHostInterfaces(node): tfile = "/tmp/load_defaults_lif-" + intf + ".txt" req = api.Trigger_CreateExecuteCommandsRequest() cmdstr = "cat " + tfile api.Trigger_AddNaplesCommand(req, node, cmdstr) resp = api.Trigger(req) cmd = resp.commands[0] if cmd.exit_code != 0: api.Logger.error ("halctl cmds to get interface " + intf + " lif info FAILED!") api.Logger.error("cmd = " + cmdstr) api.Logger.info(cmd.stdout) api.Logger.info(cmd.stderr) errcnt += 1 return errcnt words = cmd.stdout.split() ii = node + intf tc.intf_id[ii] = words[0] if words[0] != tc.intf_id[ii]: api.Logger.error("Id should be " + tc.intf_id[ii] + ", shows " + words[0]) errcnt += 1 if words[1] != tc.intf_id[ii]: api.Logger.error("Name should be " + tc.intf_id[ii] + ", shows " + words[1]) errcnt += 1 if words[4] != "false": api.Logger.error("VStrip should be false, shows " + words[4]) errcnt += 1 if words[5] != "false": api.Logger.error("VIns should be false, shows " + words[5]) errcnt += 1 if errcnt == 0: api.Logger.info("Unloaded Lifs test " + intf + " on " + node + " looks good") return errcnt
def prepUnloadedMulticastTest(node): # Set up a background job for each interface that will # run after we've shut down the driver. The output is # saved to a file that we'll collect later. req = api.Trigger_CreateExecuteCommandsRequest() for intf in api.GetNaplesHostInterfaces(node): tfile = "/tmp/load_defaults_mc-" + intf + ".txt" cmdstr = "sleep 12 ; " + mc_cmd_base + intf + " > " + tfile api.Trigger_AddNaplesCommand(req, node, cmdstr, background=True) resp = api.Trigger(req) for cmd in resp.commands: if cmd.exit_code != 0: api.Logger.error ("prep of halctl cmds to get interface " + intf + " multicast info FAILED!") api.Logger.error("cmd = " + cmdstr) api.Logger.info(cmd.stdout) api.Logger.info(cmd.stderr) return -1
def Trigger(tc): if tc.os != host.OS_TYPE_BSD and tc.os != host.OS_TYPE_LINUX: api.Logger.info("Not implemented for %s" % tc.os) return api.types.status.IGNORED for node in tc.nodes: for intf in api.GetNaplesHostInterfaces(node): if tc.os == host.OS_TYPE_BSD: for i, val in enumerate(BSD_LINK_PARAM): # Valid values start at 1: # 1 - rx, 2 - tx & 3 - rx,tx if bsd_flow_ctrl(node, intf, 1, i+1, val) == -1: return api.types.status.FAILURE else: # Rx and Tx value is same which is 'on' & 'off' for i, rx_val in enumerate(LINUX_LINK_PARAM): for j, tx_val in enumerate(LINUX_LINK_PARAM): if linux_flow_ctrl(node, intf, rx_val, tx_val) == -1: return api.types.status.FAILURE return api.types.status.SUCCESS
def Main(args): if GlobalOptions.skip_setup: # No profile change is required for skip setup return api.types.status.SUCCESS total_vfs_read_cmd = "cat /sys/class/net/{hostIf}/device/sriov_totalvfs" vfs_write_cmd = "echo {total_vfs} > /sys/class/net/{hostIf}/device/sriov_numvfs" for n in api.GetNaplesHostnames(): # for host-interface and build command to be executed on the host # Commands: # total_vfs=`cat /sys/class/net/<hostif_device>/device/sriov_totalvfs` req = api.Trigger_CreateExecuteCommandsRequest(serial = True) hostIfs = api.GetNaplesHostInterfaces(n) for hostIf in hostIfs: api.Trigger_AddHostCommand(req, n, f"cat /sys/class/net/{hostIf}/device/sriov_totalvfs") resp = api.Trigger(req) if not api.Trigger_IsSuccess(resp): api.Logger.error("Failed to execute/collect sriov_totalvfs from hostIfs : %s" % n) return api.types.status.FAILURE if_totalvfs_map = {} for cmd in resp.commands: m = re.search("cat.*/([\w]+)/.*/sriov_totalvfs", cmd.command) if_totalvfs_map[m.group(1)] = cmd.stdout.rstrip() # for each host interface build command to be executed on the host # echo $total_vfs > /sys/class/net/<hostif_device>/device/sriov_numvfs req = api.Trigger_CreateExecuteCommandsRequest(serial = True) for hostIf, total_vfs in if_totalvfs_map.items(): api.Trigger_AddHostCommand(req, n, f"echo {total_vfs} > /sys/class/net/{hostIf}/device/sriov_numvfs") resp = api.Trigger(req) if not api.Trigger_IsSuccess(resp): api.Logger.error("Failed to execute/apply sriov_totalvfs for each hostIf : %s" % n) return api.types.status.FAILURE return api.RebuildTopology()