def bsd_flow_ctrl(node, inf, fc_type, fc_val, pattern): req = api.Trigger_CreateExecuteCommandsRequest(serial=True) api.Trigger_AddHostCommand(req, node, 'sysctl dev.%s.flow_ctrl=%d' % (host.GetNaplesSysctl(inf), fc_type)) api.Trigger_AddHostCommand(req, node, 'sysctl dev.%s.link_pause=%d' % (host.GetNaplesSysctl(inf), fc_val)) api.Trigger_AddHostCommand(req, node, BSD_IFCONFIG_MEDIA_CMD % inf) api.Logger.info("Setting %s link type: %d value: %d pattern: %s" % (inf, fc_type, fc_val, pattern)) resp = api.Trigger(req) if resp is None: return -1 # We are interested in only last command response. cmd = resp.commands[2] if cmd.exit_code != 0: api.Logger.error("Failed exit code: %d link type: %d value: %d, stderr: %s" % (cmd.exit_code, fc_type, fc_val, cmd.stderr)) api.PrintCommandResults(cmd) return -1 if cmd.stdout.find("[\n\t]*" + pattern + "[\n\t]*") != -1: api.Logger.error("Failed link type: %d value: %d, stdout: %s" % (cmd.exit_code, fc_type, fc_val, cmd.stdout)) api.PrintCommandResults(cmd) return -1 return 0
def Trigger(tc): if tc.os != host.OS_TYPE_BSD: api.Logger.info("test not supported yet for os %s" %tc.os) return api.types.status.SUCCESS req = api.Trigger_CreateExecuteCommandsRequest(serial = True) tc.cmd_cookies = [] for n in tc.nodes: intfs = api.GetNaplesHostInterfaces(n) for i in intfs: api.Logger.info("getting Media status info from %s" % i) api.Trigger_AddHostCommand(req, n, "sysctl dev.%s.media_status" % host.GetNaplesSysctl(i)) tc.cmd_cookies.append('before') api.Trigger_AddHostCommand(req, n, "sysctl dev.%s.reset=1" % host.GetNaplesSysctl(i)) api.Trigger_AddHostCommand(req, n, "sysctl dev.%s.media_status" % host.GetNaplesSysctl(i)) tc.cmd_cookies.append('after') tc.resp = api.Trigger(req) if tc.resp == None: return api.types.status.FAILURE for cmd in tc.resp.commands: if cmd.exit_code != 0: api.Logger.error("Failed to get media status info (check if cable is plugged in)") api.Logger.info(cmd.stderr) return api.types.status.FAILURE return api.types.status.SUCCESS
def Trigger(tc): tc.skip = True req = api.Trigger_CreateExecuteCommandsRequest() for n in tc.nodes: os = api.GetNodeOs(n) if os not in tc.os: continue tc.skip = False cmd = "./pnsotest_%s.py --cfg blocksize.yml globals.yml --test %s" % ( api.GetNodeOs(n), tc.args.test) if getattr(tc.args, "failtest", False): cmd += " --failure-test" api.Trigger_AddHostCommand(req, n, "dmesg -c > /dev/null") api.Trigger_AddHostCommand(req, n, cmd) # api.Trigger_AddHostCommand(req, n, "dmesg") if os == 'linux': for c in range(1, 5): api.Trigger_AddHostCommand( req, n, "cat /sys/module/pencake/status/%d" % c) else: # api.Trigger_AddHostCommand(req, n, "cat /dev/pencake") api.Trigger_AddHostCommand(req, n, "dmesg") api.Trigger_AddHostCommand(req, n, "dmesg > dmesg.log") api.Logger.info("Running PNSO test %s" % cmd) if tc.skip is False: tc.resp = api.Trigger(req) return api.types.status.SUCCESS
def LoadDriver (os_type, node): req = api.Trigger_CreateExecuteCommandsRequest(serial = True) if os_type == OS_TYPE_LINUX: api.Trigger_AddHostCommand(req, node, "insmod " + LinuxDriverPath) elif os_type == OS_TYPE_BSD: api.Trigger_AddHostCommand(req, node, "kldload " + FreeBSDDriverPath) else: api.Logger.info("Unknown os_type - %s" % os_type) return api.types.status.FAILURE resp = api.Trigger(req) if resp is None: return api.types.status.FAILURE for cmd in resp.commands: if cmd.exit_code != 0: if os_type == OS_TYPE_LINUX: if cmd.stdout.find("File exists") != -1: api.Logger.info("Load Driver Failed") api.PrintCommandResults(cmd) return api.types.status.FAILURE elif os_type == OS_TYPE_BSD: if cmd.stdout.find("already loaded") != -1: api.Logger.info("Load Driver Failed") api.PrintCommandResults(cmd) return api.types.status.FAILURE else: api.Logger.info("Driver was already loaded. Load is expected to fail") else: api.Logger.info("Unknown os_type - %s" % os_type) return api.types.status.FAILURE return api.types.status.SUCCESS
def Trigger(tc): req_uname = api.Trigger_CreateExecuteCommandsRequest(serial = True) req = api.Trigger_CreateExecuteCommandsRequest(serial = True) # Move driver package into position and build on naples nodes for n in tc.nodes: api.Logger.info("Building RDMA drivers and tools on: {node}".format(node=n)) if tc.os == host.OS_TYPE_LINUX: api.Trigger_AddHostCommand(req, n, "mkdir -p {path} && mv {pkg} {path} && cd {path} && tar -xma --strip-components 1 -f {pkg} && ./setup_libs.sh && ./build.sh" .format(path=tc.iota_path, pkg=tc.pkgname), timeout = 180) else: api.Trigger_AddHostCommand(req, n, "mkdir -p {path} && mv {pkg} {path} && cd {path} && tar -xm --strip-components 1 -f {pkg} && ./build.sh" .format(path=tc.iota_path, pkg=tc.pkgname), timeout = 180) api.Trigger_AddHostCommand(req_uname, n, "uname -r") # Move show_gid into position on other nodes for n in tc.other_nodes: if n in tc.nodes: continue api.Logger.info("Moving show_gid to tools on {node}" .format(node=n)) api.Trigger_AddHostCommand(req, n, "mkdir -p {path} && mv show_gid {path}" .format(path=tc.iota_path)) api.Trigger_AddHostCommand(req_uname, n, "uname -r") tc.resp_uname = api.Trigger(req_uname) tc.resp = api.Trigger(req) return api.types.status.SUCCESS
def Trigger(tc): req = api.Trigger_CreateExecuteCommandsRequest(serial=True) f = open('test/iris/testcases/nvme/nvmf_cfg.json') nvmf_cfg = json.load(f) api.Logger.info("Setting up target {0} for spdk".format(tc.nodes[0])) api.Trigger_AddHostCommand( req, tc.nodes[0], "python /naples/target_malloc.py --intf " + nvmf_cfg['intf'] + " --ip " + nvmf_cfg['tgt_ip'] + " --prefixlen " + nvmf_cfg['prefixlen'] + " --mtu " + nvmf_cfg['mtu'] + " --hmem " + nvmf_cfg['hmem'] + " --spdk_dir " + nvmf_cfg['spdk_dir'] + " --cpu_mask " + nvmf_cfg['cpu_mask'] + " --mdev_name " + nvmf_cfg['mdev_name'] + " --block_size " + nvmf_cfg['block_size'] + " --num_block " + nvmf_cfg['num_block'] + " --ctrl_name " + nvmf_cfg['ctrl_name'] + " --nqn " + nvmf_cfg['nqn']) api.Logger.info("Setting up host {0} for nvme over fabrics".format( tc.nodes[1])) api.Trigger_AddHostCommand( req, tc.nodes[1], "python /naples/initiator.py --intf " + nvmf_cfg['intf'] + " --ip " + nvmf_cfg['ini_ip'] + " --prefixlen " + nvmf_cfg['prefixlen'] + " --r_ip " + nvmf_cfg['tgt_ip'] + " --mtu " + nvmf_cfg['mtu'] + " --nqn " + nvmf_cfg['nqn']) tc.resp = api.Trigger(req) return api.types.status.SUCCESS
def Trigger(tc): # check that there have been packets showing up after previous tests # ip -s link show <pf> rate = tc.iterators.rate duration = 30 if __SetAndCheckRate(tc.host1, tc.pf_1, tc.vfid, rate) != api.types.status.SUCCESS: return api.types.status.ERROR servercmd = iperf.ServerCmd(server_ip=tc.remote_ip, port=7777, run_core=2) clientcmd = iperf.ClientCmd(tc.remote_ip, client_ip=tc.vf_ip, jsonOut=True, port=7777, proto='tcp', time=duration, run_core=2) sreq = api.Trigger_CreateExecuteCommandsRequest(serial=True) creq = api.Trigger_CreateExecuteCommandsRequest(serial=True) api.Trigger_AddHostCommand(sreq, tc.host2, servercmd, background=True) api.Trigger_AddHostCommand(creq, tc.host1, clientcmd, timeout=3600) server_resp = api.Trigger(sreq) if not server_resp: api.Logger.error("Unable to execute server command") return api.types.status.ERROR time.sleep(5) client_resp = api.Trigger(creq) if not client_resp: api.Logger.error("Unable to execute client command") return api.types.status.ERROR resp = client_resp.commands.pop() if resp.exit_code != 0: api.Logger.error("Iperf client failed with exit code %d" % resp.exit_code) api.PrintCommandResults(resp) return api.types.status.ERROR if not iperf.Success(resp.stdout): api.Logger.error("Iperf failed with error: %s" % iperf.Error(resp.stdout)) return api.types.status.ERROR api.Logger.info("Obs rate %sMbps" % iperf.GetSentMbps(resp.stdout)) obs_rate = float(iperf.GetSentMbps(resp.stdout)) delta = (abs(obs_rate - rate) * 100) / rate if delta > 10: api.Logger.error("Configured Tx rate %f but observed %f delta %f%%" % (rate, obs_rate, delta)) return api.types.status.FAILURE api.Trigger_TerminateAllCommands(server_resp) return api.types.status.SUCCESS
def debug_dump_HostRoutingTable(node): req = api.Trigger_CreateExecuteCommandsRequest(serial=False) cmd = "netstat -4r" api.Trigger_AddHostCommand(req, node, cmd) cmd = "netstat -6r" api.Trigger_AddHostCommand(req, node, cmd) resp = api.Trigger(req) return debug_dump_display_info(resp)
def AddPenctlCommand(req, node, cmd, device=None): if device: api.Trigger_AddHostCommand(req, node, __get_pen_ctl_cmd( node, device) + cmd, background=False, timeout=60 * 120) else: for naples in api.GetDeviceNames(node): api.Trigger_AddHostCommand(req, node, __get_pen_ctl_cmd( node, naples) + cmd, background=False, timeout=60 * 120)
def GetHostInternalMgmtInterfaces(node, device = None): # Relay on IOTA infra to provide this information (dual-nic friendly API) if api.IsNaplesNode(node): interface_names = api.GetNaplesHostMgmtInterfaces(node, device) if interface_names: return interface_names interface_names = [] req = api.Trigger_CreateExecuteCommandsRequest(serial = True) if api.GetNodeOs(node) == OS_TYPE_LINUX: pci_bdf_list = [] #find pci bdf first for mgmt device which has deviceId as 1004 cmd = "lspci -d :1004 | cut -d' ' -f1" api.Trigger_AddHostCommand(req, node, cmd) resp = api.Trigger(req) #find the interface name for all the pci_bdfs for all the mgmt interfaces pci_bdf_list = resp.commands[0].stdout.split("\n") for pci_bdf in pci_bdf_list: if (pci_bdf != ''): cmd = "ls /sys/bus/pci/devices/0000:" + pci_bdf + "/net/" req = api.Trigger_CreateExecuteCommandsRequest(serial = True) api.Trigger_AddHostCommand(req, node, cmd) resp = api.Trigger(req) for command in resp.commands: #iface_name = None iface_name = command.stdout interface_names.append(iface_name.strip("\n")) elif api.GetNodeOs(node) == OS_TYPE_ESX: #For now hardcoding. return ["eth1"] elif api.GetNodeOs(node) == OS_TYPE_WINDOWS: entries = GetWindowsPortMapping(node) if len(entries) == 0: return [] maxbus = 0 name = "" for k, v in entries.items(): if int(v["Bus"]) > maxbus: maxbus = int(v["Bus"]) name = k return [name] else: cmd = "pciconf -l | grep chip=0x10041dd8 | cut -d'@' -f1 | sed \"s/ion/ionic/g\"" api.Trigger_AddHostCommand(req, node, cmd) resp = api.Trigger(req) for command in resp.commands: iface_name = command.stdout interface_names.append(iface_name.strip("\n")) return interface_names
def Trigger(tc): # move device.json cmd = "mv /device.json /nic/conf/" resp = api.RunNaplesConsoleCmd(tc.nodes[0], cmd) # load drivers cmd = "insmod /nic/bin/ionic_mnic.ko && insmod /nic/bin/mnet_uio_pdrv_genirq.ko && insmod /nic/bin/mnet.ko" resp = api.RunNaplesConsoleCmd(tc.nodes[0], cmd) # start athena app cmd = "/nic/tools/start-agent-skip-dpdk.sh" resp = api.RunNaplesConsoleCmd(tc.nodes[0], cmd) # wait for athena app to be up utils.Sleep(80) # configure int_mnic0 cmd = "ifconfig int_mnic0 " + tc.int_mnic_ip + " netmask 255.255.255.0" resp = api.RunNaplesConsoleCmd(tc.nodes[0], cmd) # run plugctl to gracefully bring up the PCI device on host req = api.Trigger_CreateExecuteCommandsRequest(serial=True) cmd = "./plugctl.sh in" api.Trigger_AddHostCommand(req, tc.bitw_node_name, cmd) resp = api.Trigger(req) cmd = resp.commands[0] api.PrintCommandResults(cmd) if cmd.exit_code != 0: api.Logger.error("Failed to gracefully bring up the PCI device on host %s" % \ tc.bitw_node_name) return api.types.status.FAILURE # get host internal mgmt intf host_intfs = naples_host.GetHostInternalMgmtInterfaces(tc.bitw_node_name) # Assuming single nic per host if len(host_intfs) == 0: api.Logger.error('Failed to get host interfaces') return api.types.status.FAILURE intf = host_intfs[0] ip_addr = str(ip_address(tc.int_mnic_ip.rstrip()) + 1) req = api.Trigger_CreateExecuteCommandsRequest(serial=True) cmd = "ifconfig " + str(intf) + " " + ip_addr + "/24 up" api.Trigger_AddHostCommand(req, tc.bitw_node_name, cmd) resp = api.Trigger(req) cmd = resp.commands[0] api.PrintCommandResults(cmd) if cmd.exit_code != 0: api.Logger.error("Failed to gracefully bring up the internal mgmt intf on host %s" % \ tc.bitw_node_name) return api.types.status.FAILURE return api.types.status.SUCCESS
def __installPenCtl(node): fullpath = api.GetTopDir() + '/' + common.PENCTL_PKG resp = api.CopyToHost(node, [fullpath], "") if resp is None: return api.types.status.FAILURE if resp.api_response.api_status != types_pb2.API_STATUS_OK: api.Logger.error("Failed to copy penctl to Node: %s" % node) return api.types.status.FAILURE fullpath = api.GetTopDir() + '/' + common.PENCTL_TOKEN_FILE resp = api.CopyToHost(node, [fullpath], "") if resp is None: return api.types.status.FAILURE if resp.api_response.api_status != types_pb2.API_STATUS_OK: api.Logger.error("Failed to copy penctl token to Node: %s" % node) return api.types.status.FAILURE req = api.Trigger_CreateExecuteCommandsRequest() api.Trigger_AddHostCommand(req, node, "tar -xvf %s" % os.path.basename(common.PENCTL_PKG) + " && sync", background = False) #Create a symlink at top level execName = __penctl_exec(node) realPath = "realpath %s/%s " % (common.PENCTL_DEST_DIR, execName) api.Trigger_AddHostCommand(req, node, realPath, background = False) resp = api.Trigger(req) for cmd in resp.commands: api.PrintCommandResults(cmd) if cmd.exit_code != 0: return api.types.status.FAILURE common.PENCTL_EXEC[node] = resp.commands[1].stdout.split("\n")[0] req = api.Trigger_CreateExecuteCommandsRequest() #Create a symlink at top level realPath = "realpath %s " % (common.PENCTL_TOKEN_FILE_NAME) api.Trigger_AddHostCommand(req, node, realPath, background = False) resp = api.Trigger(req) for cmd in resp.commands: api.PrintCommandResults(cmd) if cmd.exit_code != 0: return api.types.status.FAILURE common.PENCTL_TOKEN[node] = resp.commands[0].stdout.split("\n")[0] return api.types.status.SUCCESS
def Trigger(tc): names = api.GetNaplesHostnames() hostname = names[0] if api.GetNodeOs(hostname) != host.OS_TYPE_LINUX: return api.types.status.SUCCESS for intf in api.GetNaplesHostInterfaces(hostname): api.Logger.info("Checking event queue use on host %s interface %s" % (hostname, intf)) pci = host.GetNaplesPci(hostname, intf) if pci is None: return api.types.status.FAILURE # get eth_eq_count and number of eq interrupts req = api.Trigger_CreateExecuteCommandsRequest(serial=True) cmd = "awk '/eth_eq_count/ {print $2}' < /sys/kernel/debug/ionic/" + pci + "/identity" api.Trigger_AddHostCommand(req, hostname, cmd) cmd = "grep -c -e 'ionic-" + pci + "-eq' /proc/interrupts" api.Trigger_AddHostCommand(req, hostname, cmd) resp = api.Trigger(req) if resp is None: api.Logger.error("Failed to get values from host %s interface %s" % (hostname, intf)) return api.types.status.FAILURE cmd = resp.commands.pop() if cmd.exit_code > 1: # exit code 1 from grep is "string not found", which is a valid answer here api.Logger.error( "Failed to get eth_eq_count from host %s interface %s" % (hostname, intf)) api.PrintCommandResults(cmd) return api.types.status.FAILURE eth_eq_count = int(cmd.stdout.strip()) cmd = resp.commands.pop() if cmd.exit_code != 0: api.Logger.error( "Failed to get interrupt count from host %s interface %s" % (hostname, intf)) api.PrintCommandResults(cmd) return api.types.status.FAILURE intr_count = int(cmd.stdout.strip()) api.Logger.info( "Found eth_eq_count %d and interrupt count %d from host %s interface %s" % (eth_eq_count, intr_count, hostname, intf)) if eth_eq_count == 0 and intr_count != 0: api.Logger.error("eq interrupts found when eth_eq_count == 0") return api.types.status.FAILURE elif eth_eq_count != 0 and intr_count == 0: api.Logger.error("No eq interrupts found when eth_eq_count != 0") return api.types.status.FAILURE return api.types.status.SUCCESS
def do_lif_reset_test(node, os): for i in range(3): api.Logger.info("LIF reset and driver reload test loop %d" % i) if host.UnloadDriver(os, node, "all") is api.types.status.FAILURE: api.Logger.error("ionic unload failed loop %d" % i) return api.types.status.FAILURE if host.LoadDriver(os, node) is api.types.status.FAILURE: api.Logger.error("ionic load failed loop %d" % i) return api.types.status.FAILURE wl_api.ReAddWorkloads(node) if api.GetNaplesHostInterfaces(node) is None: api.Logger.error("No ionic interface after loop %d" % i) return api.types.status.FAILURE for intf in api.GetNaplesHostInterfaces(node): req = api.Trigger_CreateExecuteCommandsRequest(serial=True) vlan_list = getVlanList(node, intf) filter_list = getFilterList(node, intf) # Single LIF reset api.Trigger_AddHostCommand( req, node, "sysctl dev.%s.reset=1" % (host.GetNaplesSysctl(intf))) resp = api.Trigger(req) time.sleep(5) vlan_list1 = getVlanList(node, intf) filter_list1 = getFilterList(node, intf) if vlan_list != vlan_list1: api.Logger.error( "VLAN list doesn't match for %s, before: %s after: %s" % (intf, str(vlan_list), str(vlan_list1))) return api.types.status.FAILURE if filter_list != filter_list1: api.Logger.error( "Filter list doesn't match for %s, before: %s after: %s" % (intf, str(filter_list), str(filter_list1))) return api.types.status.FAILURE api.Logger.info( "Success running LIF reset test on %s VLAN: %s, Filters; %s" % (intf, str(vlan_list), str(filter_list))) # Now stress test LIF reset for intf in api.GetNaplesHostInterfaces(node): req = api.Trigger_CreateExecuteCommandsRequest(serial=True) api.Trigger_AddHostCommand( req, node, "for ((i=0;i<10;i++)); do sysctl dev.%s.reset=1; done &" % (host.GetNaplesSysctl(intf))) # Some of LIF reset will fill fail since it will be running in background # with reload of driver. resp = api.Trigger(req) return api.types.status.SUCCESS
def Trigger(tc): req = api.Trigger_CreateExecuteCommandsRequest(serial = True) api.Logger.info("Check Write on namespace at host {0}".format(tc.nodes[1])) api.Trigger_AddHostCommand(req, tc.nodes[1], "head -c 1M < /dev/urandom > /root/tmp1.txt") api.Trigger_AddHostCommand(req, tc.nodes[1], "nvme write /dev/{} -z {} -d /root/tmp1.txt".format(tc.iterators.namespace, tc.iterators.datasize)) tc.resp = api.Trigger(req) return api.types.status.SUCCESS
def Trigger(tc): req = api.Trigger_CreateExecuteCommandsRequest(serial=True) if tc.os != host.OS_TYPE_BSD and tc.os != host.OS_TYPE_LINUX: api.Logger.info("Not implemented for %s" % tc.os) return api.types.status.IGNORED # set interrupt coalescing value for node in tc.nodes: interfaces = api.GetNaplesHostInterfaces(node) for interface in interfaces: api.Logger.info("Set Interrupt Coalescing on %s:%s to %d" % \ (node, interface, \ tc.iterators.coales_interval)) if tc.os == 'linux': api.Trigger_AddHostCommand(req, node, "ethtool -C %s rx-usecs %d" % \ (interface, \ tc.iterators.coales_interval)) elif tc.os == 'freebsd': api.Trigger_AddHostCommand(req, node, "sysctl dev.%s.intr_coal=%d" % \ (host.GetNaplesSysctl(interface), \ tc.iterators.coales_interval)) tc.resp = api.Trigger(req) if tc.resp is None: api.Logger.error("Command failed to respond") return api.types.status.FAILURE # validate the command response # for > than max, expect an error and a specific message for cmd in tc.resp.commands: if tc.iterators.coales_interval < tc.args.max_coales_interval: if cmd.exit_code != 0: #linux ethtool will not set the value if same as current if cmd.stderr.find("unmodified, ignoring") == -1: api.Logger.error("Failed to set interrupt coalescing") api.Logger.info(cmd.stderr) return api.types.status.FAILURE else: if tc.os == 'linux': if cmd.stderr.find("out of range") == -1: api.Logger.error("ionic did not error when coales value set (%d) > than supported (%d)" \ %(tc.iterators.coales_interval, tc.args.max_coales_interval)) api.Logger.info(cmd.stderr) return api.types.status.FAILURE elif tc.os == 'freebsd': if cmd.stderr.find("large") == -1: api.Logger.error( "ionic did not error when coales value set > than supported" ) api.Logger.info(cmd.stderr) return api.types.status.FAILURE return api.types.status.SUCCESS
def __execute_cmd(node_name, cmd, timeout=None): req = api.Trigger_CreateExecuteCommandsRequest(serial=True) if timeout: api.Trigger_AddHostCommand(req, node_name, cmd, timeout=timeout) else: api.Trigger_AddHostCommand(req, node_name, cmd) resp = api.Trigger(req) if resp is None: raise RuntimeError("Failed to trigger on host %s cmd %s" % (node_name, cmd)) return resp
def Teardown(tc): term_req = api.Trigger_CreateExecuteCommandsRequest(serial=True) # unload krping on both nodes for n in tc.nodes: if tc.os == host.OS_TYPE_LINUX: api.Trigger_AddHostCommand(term_req, n, "rmmod rdma_krping") else: api.Trigger_AddHostCommand(term_req, n, "kldunload krping") term_resp = api.Trigger(term_req) return api.types.status.SUCCESS
def send_pkt_h2s(tc, node, flow, pkt_gen): # Send and Receive packets in H2S direction pkt_gen.set_dir_('h2s') pkt_gen.set_sip(flow.sip) pkt_gen.set_dip(flow.dip) if flow.proto == 'UDP' or flow.proto == 'TCP': pkt_gen.set_sport(flow.sport) pkt_gen.set_dport(flow.dport) h2s_req = api.Trigger_CreateExecuteCommandsRequest(serial=False) # ========== # Rx Packet # ========== pkt_gen.set_encap(True) pkt_gen.set_Rx(True) pkt_gen.set_vlan(tc.up0_vlan) pkt_gen.set_smac(tc.up1_mac) pkt_gen.set_dmac(tc.up0_mac) pkt_gen.setup_pkt() recv_cmd = "./recv_pkt.py --intf_name %s --pcap_fname %s "\ "--timeout %s --pkt_cnt %d" % (tc.up0_intf, pktgen.DEFAULT_H2S_RECV_PKT_FILENAME, str(SNIFF_TIMEOUT), tc.pkt_cnt) api.Trigger_AddHostCommand(h2s_req, node.Name(), recv_cmd, background=True) # ========== # Tx Packet # ========== pkt_gen.set_encap(False) pkt_gen.set_Rx(False) pkt_gen.set_smac(tc.up1_mac) pkt_gen.set_dmac(tc.up0_mac) pkt_gen.set_vlan(tc.up1_vlan) pkt_gen.setup_pkt() send_cmd = "./send_pkt.py --intf_name %s --pcap_fname %s "\ "--pkt_cnt %d" % (tc.up1_intf, pktgen.DEFAULT_H2S_GEN_PKT_FILENAME, tc.pkt_cnt) api.Trigger_AddHostCommand(h2s_req, node.Name(), 'sleep 0.5') api.Trigger_AddHostCommand(h2s_req, node.Name(), send_cmd) trig_resp = api.Trigger(h2s_req) time.sleep(SNIFF_TIMEOUT) term_resp = api.Trigger_TerminateAllCommands(trig_resp) h2s_resp = api.Trigger_AggregateCommandsResponse(trig_resp, term_resp) tc.resp.append(h2s_resp)
def Verify(tc): req = api.Trigger_CreateExecuteCommandsRequest(serial=True) coales_period = tc.args.coales_period if tc.resp is None: api.Logger.error("Command failed to respond") return api.types.status.FAILURE # get the current coalescing value from FW/Driver for n in tc.nodes: intfs = api.GetNaplesHostInterfaces(n) api.Logger.info("Retrieve coalescing value from interfaces") for i in intfs: if tc.os == 'linux': api.Trigger_AddHostCommand(req, n, "ethtool -c %s" % i) elif tc.os == 'freebsd': api.Trigger_AddHostCommand(req, n, "sysctl dev.%s.curr_coal_us" % \ (host.GetNaplesSysctl(i))) tc.resp = api.Trigger(req) if tc.resp is None: api.Logger.error("Command failed to respond") return api.types.status.FAILURE # expecting the following value back from FW/Driver if tc.os == 'linux': # linux driver returns coalescing interval as uSecs # 3 is Naples interrupt period current_coalescing = str(int(tc.iterators.coales_interval/coales_period) \ *coales_period) elif tc.os == 'freebsd': # freebsd returns coalescing value, same as # what user programmed. current_coalescing = str(int(tc.iterators.coales_interval)) for cmd in tc.resp.commands: if cmd.exit_code != 0: api.Logger.error("Failed to read interrupt coalescing value") api.Logger.info(cmd.stderr) return api.types.status.FAILURE # for all values < max, validate returned value if tc.iterators.coales_interval < tc.args.max_coales_interval: api.Logger.info("Expecting Coalescing Value: ", current_coalescing) if cmd.stdout.find(current_coalescing) == -1: api.Logger.info("Failed to set coalescing value") api.PrintCommandResults(cmd) return api.types.status.FAILURE return api.types.status.SUCCESS
def untar(): req = api.Trigger_CreateExecuteCommandsRequest() for n in tc.Nodes: api.Trigger_AddHostCommand(req, n, "ls %s" % (def_tech_support_file_name)) api.Trigger_AddHostCommand( req, n, "tar -xvzf %s" % (def_tech_support_file_name)) resp = api.Trigger(req) for cmd in resp.commands: api.PrintCommandResults(cmd) if cmd.exit_code != 0: return api.types.status.FAILURE return api.types.status.SUCCESS
def __load_linux_driver(node, node_os, manifest_file): image_manifest = parser.JsonParse(manifest_file) driver_images = list(filter(lambda x: x.OS == node_os, image_manifest.Drivers))[0].Images[0] if driver_images is None: api.Logger.error("Unable to load image manifest") return api.types.status.FAILURE drImgFile = os.path.join(Gl, driver_images.drivers_pkg) api.Logger.info("Fullpath for driver image: " + drImgFile) resp = api.CopyToHost(node, [drImgFile], "") if not api.IsApiResponseOk(resp): api.Logger.error("Failed to copy %s" % drImgFile) return api.types.status.FAILURE rundir = os.path.basename(driver_images.drivers_pkg).split('.')[0] req = api.Trigger_CreateExecuteCommandsRequest(serial = True) api.Trigger_AddHostCommand(req, node, "tar -xf " + os.path.basename(driver_images.drivers_pkg)) api.Trigger_AddHostCommand(req, node, "./build.sh", rundir=rundir) resp = api.Trigger(req) if not api.IsApiResponseOk(resp): api.Logger.error("TriggerCommand for driver build failed") return api.types.status.FAILURE for cmd in resp.commands: if cmd.exit_code != 0 and cmd.command != './build.sh': # Build.sh could fail -ignored (FIXME) api.Logger.error("Failed to exec cmds to build/load new driver") return api.types.status.FAILURE api.Logger.info("New driver image is built on target host. Prepare to load") if host.UnloadDriver(node_os, node) != api.types.status.SUCCESS: api.Logger.error("Failed to unload current driver - proceeding") req = api.Trigger_CreateExecuteCommandsRequest(serial = True) if node_os == OS_TYPE_LINUX: api.Trigger_AddHostCommand(req, node, "insmod " + os.path.join(rundir, "drivers/eth/ionic/ionic.ko")) elif node_os == OS_TYPE_BSD: api.Trigger_AddHostCommand(req, node, "kldload " + os.path.join(rundir, "drivers/eth/ionic/ionic.ko")) resp = api.Trigger(req) if not api.Trigger_IsSuccess(resp): api.Logger.error("TriggerCommand for driver installation failed") return api.types.status.FAILURE return api.types.status.SUCCESS
def debug_dump_interface_info(node, interface): req = api.Trigger_CreateExecuteCommandsRequest(serial=False) cmd = "ifconfig " + interface api.Trigger_AddHostCommand(req, node, cmd) if api.GetNodeOs(node) == "linux": cmd = "ip -d link show " + interface api.Trigger_AddHostCommand(req, node, cmd) cmd = "ip maddr show " + interface api.Trigger_AddHostCommand(req, node, cmd) elif api.GetNodeOs(node) == "freebsd": cmd = "netstat -aI " + interface api.Trigger_AddHostCommand(req, node, cmd) resp = api.Trigger(req) return debug_dump_display_info(resp)
def Trigger(tc): req = api.Trigger_CreateExecuteCommandsRequest(serial=True) api.Logger.info( "Uninstalling nvme driver on the following nodes: {0}".format( tc.nodes)) # Change it to only run on host node for n in tc.nodes: api.Trigger_AddHostCommand(req, n, "rmmod nvme") # allow device to register before proceeding api.Trigger_AddHostCommand(req, n, "sleep 2") tc.resp = api.Trigger(req) return api.types.status.SUCCESS
def __node_api_handler(self, url, json_data=None, oper=CfgOper.ADD): if oper == CfgOper.DELETE: oper = "DELETE" elif oper == CfgOper.ADD: oper = "POST" elif oper == CfgOper.UPDATE: oper = "PUT" elif oper == CfgOper.GET: oper = "GET" else: print(oper) assert (0) if GlobalOptions.debug: api.Logger.info("Url : %s" % url) cmd = None if json_data and len(json.dumps(json_data)) > 100000: filename = "/tmp/temp_config.json" with open(filename, 'w') as outfile: json.dump(json_data, outfile) req = api.Trigger_CreateAllParallelCommandsRequest() cmd = ["rm", "-rf", "temp_config.json"] cmd = " ".join(cmd) api.Trigger_AddHostCommand(req, self.host_name, cmd, timeout=3600) api.Trigger(req) resp = api.CopyToHost(self.host_name, [filename], "") if not api.IsApiResponseOk(resp): assert (0) cmd = [ "curl", "-X", oper, "-d", "@temp_config.json", "-k", "-H", "\"Content-Type:application/json\"", url ] else: cmd = [ "curl", "-X", oper, "-k", "-d", "\'" + json.dumps(json_data) + "\'" if json_data else " ", "-H", "\"Content-Type:application/json\"", url ] cmd = " ".join(cmd) req = api.Trigger_CreateAllParallelCommandsRequest() api.Trigger_AddHostCommand(req, self.host_name, cmd, timeout=3600) resp = api.Trigger(req) if GlobalOptions.debug: print(" ".join(cmd)) return resp.commands[0].stdout
def getLinuxStats(node, intf, pat1): stats_map = [] cmd = 'ethtool -S ' + intf + ' | grep -e ' + pat1 + ' | cut -d ":" -f 2' req = api.Trigger_CreateExecuteCommandsRequest(serial=True) #api.Logger.info("Getting stats for: %s on host: %s intf: %s" # %(cmd, node.node_name, intf)) api.Trigger_AddHostCommand(req, node.node_name, cmd) resp = api.Trigger(req) if resp is None: api.Logger.error("Failed to run: %s on host: %s intf: %s" %(cmd, node.node_name, intf)) return None cmd = resp.commands[0] if cmd.exit_code != 0: api.Logger.error( "Failed to run: %s for host: %s, stderr: %s" %(cmd, node.node_name, cmd.stderr)) api.PrintCommandResults(cmd) return None if cmd.stdout == "": api.Logger.error("Output is empty for: %s on host: %s intf: %s" %(cmd, node.node_name, intf)) api.PrintCommandResults(cmd) return None stats_map = cmd.stdout.splitlines() stats_map = list(map(int,stats_map)) return stats_map
def getWindowsStats(node, intf, pat1): name = host.GetWindowsIntName(node.node_name, intf) cmd = "/mnt/c/Windows/Temp/drivers-windows/IonicConfig.exe DevStats -n '%s' | grep -e %s |" \ " cut -d ':' -f 2" % (name, pat1) req = api.Trigger_CreateExecuteCommandsRequest(serial=True) api.Trigger_AddHostCommand(req, node.node_name, cmd) resp = api.Trigger(req) if resp is None: api.Logger.error("Failed to run: %s on host: %s intf: %s" %(cmd, node.node_name, intf)) return None cmd = resp.commands[0] if cmd.exit_code != 0: api.Logger.error( "Failed to run: %s for host: %s, stderr: %s" %(cmd, node.node_name, cmd.stderr)) api.PrintCommandResults(cmd) return None if cmd.stdout == "": api.Logger.error("Output is empty for: %s on host: %s intf: %s" %(cmd, node.node_name, intf)) api.PrintCommandResults(cmd) return None stats_map = cmd.stdout.splitlines() stats_map = list(map(int,stats_map)) return stats_map
def start_single_pcap_capture(tc): try: req = api.Trigger_CreateExecuteCommandsRequest(serial=True) nodes = api.GetWorkloadNodeHostnames() tc.pcap_cmds = [] intf = api.GetRemoteWorkloadPairs()[0][0].interface tc.pcap_filename = pcap_file_name(intf) cmd = cmd_builder.tcpdump_cmd(intf, tc.pcap_filename) api.Trigger_AddHostCommand(req, n, cmd, background=True) resp = api.Trigger(req) for cmd in resp.commands: if cmd.handle == None or len(cmd.handle) == 0: api.Logger.error("Error starting pcap : %s " % cmd.command) api.Logger.error("Std Output : %s " % cmd.stdout) api.Logger.error("Std Err : %s " % cmd.stdout) return api.types.status.FAILURE api.Logger.info("Success running cmd : %s" % cmd.command) tc.pcap_trigger = resp return api.types.status.SUCCESS except: api.Logger.info("failed to start single pcap capture") api.Logger.debug( "failed to start single pcap capture. error was: {0}".format( traceback.format_exc())) return api.types.status.SUCCESS
def ChangeHostLifsAdminStatus(tc, shutdown=False): result = api.types.status.SUCCESS req = api.Trigger_CreateAllParallelCommandsRequest() tc.cmd_cookies = [] for node in tc.nodes: for intf_wl in tc.host_lifs[node]: intf, wl = next(iter(intf_wl.items())) # Change admin status cmd = "ifconfig %s %s" % (intf, ("down" if shutdown else "up")) if wl: cmd_cookie = "Node: %s, WL: %s, intf: %s, shutdown: %s" % ( node, wl, intf, shutdown) tc.cmd_cookies.append(cmd_cookie) #api.Logger.info("%s"%(cmd_cookie)) api.Trigger_AddCommand(req, node, wl, cmd) else: cmd_cookie = "Node: %s, intf: %s, shutdown: %s" % (node, intf, shutdown) tc.cmd_cookies.append(cmd_cookie) #api.Logger.info("%s"%(cmd_cookie)) api.Trigger_AddHostCommand(req, node, cmd) tc.resp = api.Trigger(req) cookie_idx = 0 for cmd in tc.resp.commands: if cmd.exit_code != 0: result = api.types.status.FAILURE api.Logger.info("Failed to change Admin for %s" % (tc.cmd_cookies[cookie_idx])) api.PrintCommandResults(cmd) cookie_idx += 1 return result
def Main(step): if GlobalOptions.skip_setup: return api.types.status.SUCCESS req = api.Trigger_CreateExecuteCommandsRequest(serial=True) api.ChangeDirectory("iperf") for naples_host in api.GetNaplesHostnames(): if api.GetNodeOs(naples_host) == host.OS_TYPE_BSD: api.CopyToHost(naples_host, [IONIC_STATS_SCRIPT], "") api.Trigger_AddHostCommand( req, naples_host, "cp ionic_stats.sh " + api.HOST_NAPLES_DIR) api.CopyToNaples(naples_host, [IPERF_BINARY], "", naples_dir="/usr/bin/") api.Trigger_AddNaplesCommand( req, naples_host, "ln -s /usr/bin/iperf3_aarch64 /usr/bin/iperf3") resp = api.Trigger(req) for cmd in resp.commands: api.PrintCommandResults(cmd) if cmd.exit_code != 0: result = api.types.status.FAILURE return api.types.status.SUCCESS