def Setup(tc): tc.desc = ''' Test: ibv_rc_pingpong Opcode: N/A Num QP: 1 modes: workload1 as server, workload2 as client workload2 as server, workload1 as client ''' tc.iota_path = api.GetTestsuiteAttr("driver_path") pairs = api.GetRemoteWorkloadPairs() # get workloads from each node tc.w = [] tc.w.append(pairs[0][0]) tc.w.append(pairs[0][1]) tc.devices = [] tc.gid = [] tc.ib_prefix = [] for i in range(2): tc.devices.append(api.GetTestsuiteAttr(tc.w[i].ip_address + '_device')) tc.gid.append(api.GetTestsuiteAttr(tc.w[i].ip_address + '_gid')) if tc.w[i].IsNaples(): tc.ib_prefix.append('cd ' + tc.iota_path + ' && ./run_rdma.sh ') else: tc.ib_prefix.append('') tc.stats_results = [] return api.types.status.SUCCESS
def Setup(tc): tc.wl0 = api.GetTestsuiteAttr("kni_wl") tc.sub_wl = api.GetTestsuiteAttr("kni_sub_wl") tc.bitw_node_name = api.GetTestsuiteAttr("bitw_node_name") tc.wl_node_name = api.GetTestsuiteAttr("wl_node_name") tc.mnic_p2p_ip = api.GetTestsuiteAttr("mnic_p2p_ip") return api.types.status.SUCCESS
def Setup(tc): # parse tc args parse_args(tc) tc.bitw_node_name = api.GetTestsuiteAttr("bitw_node_name") tc.wl_node_name = api.GetTestsuiteAttr("wl_node_name") tc.intfs = api.GetTestsuiteAttr("inb_mnic_intfs") device_json_fname = api.GetTopDir() + '/nic/conf/athena/device.json' api.CopyToNaples(tc.bitw_node_name, [device_json_fname], "") return api.types.status.SUCCESS
def Setup(tc): tc.desc = ''' Test : rdma_perftest_bw Opcode : Send, Read, Write Num QP : 1, 2, ..., 1000 Transport : RC MTU : 4096 RDMA CM : Yes, No modes : workload1 as server, workload2 as client workload2 as server, workload1 as client ''' tc.iota_path = api.GetTestsuiteAttr("driver_path") pairs = api.GetRemoteWorkloadPairs() # get workloads from each node tc.w = [] tc.w.append(pairs[0][0]) tc.w.append(pairs[0][1]) tc.nodes = api.GetNaplesHostnames() tc.os = api.GetNodeOs(tc.nodes[0]) if getattr(tc.iterators, 'transport', None) == 'UD': unames = api.GetTestsuiteAttr("unames") for name in unames: # skip, UD in user space is broken with ib_uverbs of older uek kernel m = re.match(r'^4\.14\.35-(\d+)\..*\.el7uek', name) if m and int(m.group(1)) < 1844: api.Logger.info("Skip UD perftest with uname %s" % (name, )) return api.types.status.IGNORED tc.devices = [] tc.gid = [] tc.ib_prefix = [] for i in range(2): tc.devices.append(api.GetTestsuiteAttr(tc.w[i].ip_address + '_device')) tc.gid.append(api.GetTestsuiteAttr(tc.w[i].ip_address + '_gid')) if tc.w[i].IsNaples(): tc.ib_prefix.append('cd ' + tc.iota_path + ' && ./run_rdma.sh ') else: tc.ib_prefix.append('') if getattr(tc.iterators, 'tcpdump', None) == 'yes': for n in api.GetNaplesHostnames(): if api.GetNodeOs(n) not in [host.OS_TYPE_BSD]: api.Logger.info("IGNORED: TCPDUMP tests on non-FreeBSD") return api.types.status.IGNORED return api.types.status.SUCCESS
def Setup(tc): # get node info tc.bitw_node_name = None bitw_node_nic_pairs = athena_app_utils.get_athena_node_nic_names() # Only one node for single-nic topology tc.bitw_node_name = bitw_node_nic_pairs[0][0] tc.preinit_script_path = api.GetTestsuiteAttr("preinit_script_path") tc.start_agent_script_path = api.GetTestsuiteAttr( "start_agent_script_path") return api.types.status.SUCCESS
def PostTrafficTestCommands(req, tc, w, pcp_or_dscp): #verify PFC/PAUSE frames post traffic if w.IsNaples(): dev = api.GetTestsuiteAttr(w.ip_address+'_device')[-1] if dev == '0': port = 'Eth1/1' elif dev == '1': port = 'Eth1/2' else: api.Logger.info("invalid dev number {}; defaulting to port 1".format(str(dev))) port = 'Eth1/1' cmd = '/nic/bin/halctl show port --port ' + port + ' statistics' api.Logger.info("Running command {} on node_name {} workload_name {}"\ .format(cmd, w.node_name, w.workload_name)) api.Trigger_AddNaplesCommand(req, w.node_name, cmd) mode = "pcp" if tc.class_type==1 else "dscp" tc.cmd_cookies.append(cmd + " for " + mode + " " + str(pcp_or_dscp)) # show drops command cmd = '/nic/bin/halctl show system statistics drop | grep -i "occupancy"' api.Logger.info("Running show drops command {} on node_name {}"\ .format(cmd, w.node_name)) api.Trigger_AddNaplesCommand(req, w.node_name, cmd) tc.cmd_cookies.append("show drops cmd for node {} ip_address {}".format(w.node_name, w.ip_address)) else: api.Logger.info("node {} is not Naples; cannot check for PFC frames".format(w.node_name));
def Setup(tc): # parse iterator args parse_args(tc) tc.athena_node_nic_pairs = athena_app_utils.get_athena_node_nic_names() # Get list of workloads for nodes tc.wl_node_nic_pairs = utils.get_classic_node_nic_pairs() tc.wl_node_nic_pairs.sort(key=lambda x: x[0]) # sort in order of nodes # tc.wl_node_nic_pairs[0] -> (node1, naples(classic)) # tc.wl_node_nic_pairs[1] -> (node2, naples(classic)) nodes = [pair[0] for pair in tc.wl_node_nic_pairs] workloads_node1 = api.GetWorkloads(nodes[0]) workloads_node2 = api.GetWorkloads(nodes[1]) # Get index of testcase vnic for policy.json node1_plcy_obj, node2_plcy_obj = None, None node1_vnic_idx, node2_vnic_idx = None, None with open(api.GetTestsuiteAttr("node1_dp_policy_json_path")) as fd: node1_plcy_obj = json.load(fd) with open(api.GetTestsuiteAttr("node2_dp_policy_json_path")) as fd: node2_plcy_obj = json.load(fd) node1_vnic_idx = utils.get_vnic_pos(node1_plcy_obj, tc.vnic_type, tc.nat, _stateful=tc.stateful) node2_vnic_idx = utils.get_vnic_pos(node2_plcy_obj, tc.vnic_type, tc.nat, _stateful=tc.stateful) api.Logger.info('node1 vnic idx %d, node2 vnic idx %d' % ( node1_vnic_idx, node2_vnic_idx)) # Use workloads on up0 for node1 and use workloads # on up1 for node2 since they match switch vlan config node1_wl = workloads_node1[utils.get_wl_idx(0, node1_vnic_idx+1)] node2_wl = workloads_node2[utils.get_wl_idx(1, node2_vnic_idx+1)] tc.wl = [node1_wl, node2_wl] # tc.wl[0] should be node1 wl # tc.wl[1] should be node2 wl # fetch vnic_id for flow verification later tc.node1_vnic_id = utils.get_vnic_id(node1_plcy_obj, tc.vnic_type, tc.nat, _stateful=tc.stateful) tc.node2_vnic_id = utils.get_vnic_id(node2_plcy_obj, tc.vnic_type, tc.nat, _stateful=tc.stateful) return api.types.status.SUCCESS
def Main(tc): api.Logger.info("Verify Link .") isDisabled = api.GetTestsuiteAttr("LinkTestValidationDisable") if (isDisabled == 1): api.Logger.info( "Verify Links is disabled through LinkTestValidationDisable gloabal var" ) return api.types.status.SUCCESS isSkip = api.GetTestsuiteAttr("LinkTestValidationSkip") if (isSkip == 1): api.Logger.info( "Verify Link test will be skipped dues to LinkTestValidationDisable globale var" ) api.SetTestsuiteAttr("LinkTestValidationSkip", 0) finalResult = api.types.status.SUCCESS for naples_host in api.GetNaplesHostnames(): totalNumberLinkFlapped = linkmgr.GetLinkStatusChangeCount(naples_host) if totalNumberLinkFlapped == None: if GlobalOptions.dryrun: return api.types.status.SUCCESS api.Logger.error( "Failure happend while trying to read number of link flapped on Naples:" + naples_host) return api.types.status.FAILURE lastNUmberLinkFlapped = linkmgr.GetLastLinkStatusChangeCount( naples_host) if (isSkip == 1): linkmgr.ResetLastLinkStatusChangeCount(naples_host, totalNumberLinkFlapped) elif (totalNumberLinkFlapped == lastNUmberLinkFlapped): api.Logger.info( "Verify Link test passed. No new link flap were detected on Naples " + naples_host) elif (lastNUmberLinkFlapped < totalNumberLinkFlapped): api.Logger.error( "Verify Link test failed on Naples [%s]. [%d] new link flaps were detected" % (naples_host, (totalNumberLinkFlapped - lastNUmberLinkFlapped))) finalResult = api.types.status.FAILURE else: api.Logger.error( "Verify Link test failed on Naples [%s] with UNKNOWN ERROR. Number of total link flap [%d] is less then number of link flap detected before this test [%d] " % (naples_host, totalNumberLinkFlapped, lastNUmberLinkFlapped)) finalResult = api.types.status.FAILURE linkmgr.ResetLastLinkStatusChangeCount(naples_host, totalNumberLinkFlapped) return finalResult
def GetStaticFlowInfo(self): plcy_obj = None with open(api.GetTestsuiteAttr("dp_policy_json_path")) as fd: plcy_obj = json.load(fd) if plcy_obj.get('v4_flows', None): v4_flows = plcy_obj['v4_flows'] for v4_flow in v4_flows: if v4_flow['proto'] == '17': udp_flow_info = { 'sip_lo': v4_flow['sip_lo'], 'sip_hi': v4_flow['sip_hi'], 'dip_lo': v4_flow['dip_lo'], 'dip_hi': v4_flow['dip_hi'], 'sport_lo': v4_flow['sport_lo'], 'sport_hi': v4_flow['sport_hi'], 'dport_lo': v4_flow['dport_lo'], 'dport_hi': v4_flow['dport_hi'] } self.udp_flow_set.static_udp_flow_info.append( udp_flow_info) elif v4_flow['proto'] == '6': tcp_flow_info = { 'sip_lo': v4_flow['sip_lo'], 'sip_hi': v4_flow['sip_hi'], 'dip_lo': v4_flow['dip_lo'], 'dip_hi': v4_flow['dip_hi'], 'sport_lo': v4_flow['sport_lo'], 'sport_hi': v4_flow['sport_hi'], 'dport_lo': v4_flow['dport_lo'], 'dport_hi': v4_flow['dport_hi'] } self.tcp_flow_set.static_tcp_flow_info.append( tcp_flow_info) else: #TODO icmp static info not available pass # Get nat txlate info l3_vnic = GetVnic('L3', 'yes', plcy_obj) l3_nat_info = self.nat_flow_set.nat_txlate['L3'] l3_nat_info['local_ip_lo'] = l3_vnic['nat']['local_ip_lo'] l3_nat_info['local_ip_hi'] = l3_vnic['nat']['local_ip_hi'] l3_nat_info['nat_ip_lo'] = l3_vnic['nat']['nat_ip_lo'] l3_nat_info['nat_ip_hi'] = l3_vnic['nat']['nat_ip_hi'] l2_vnic = GetVnic('L2', 'yes', plcy_obj) l2_nat_info = self.nat_flow_set.nat_txlate['L2'] l2_nat_info['local_ip_lo'] = l2_vnic['nat']['local_ip_lo'] l2_nat_info['local_ip_hi'] = l2_vnic['nat']['local_ip_hi'] l2_nat_info['nat_ip_lo'] = l2_vnic['nat']['nat_ip_lo'] l2_nat_info['nat_ip_hi'] = l2_vnic['nat']['nat_ip_hi']
def QosGetTcForDscp(dscp): tclass = 0 dscp_low_index = 8 * (int(dscp) // 8) dscp_high_index = dscp_low_index + 7 dscp_to_tc_str = 'dscp_' + str(dscp_low_index) + '_' + str(dscp_high_index) + '_to_tc' dscp_to_tc = api.GetTestsuiteAttr(dscp_to_tc_str) dscp_index = 2 * (int(dscp) % 8) tclass = dscp_to_tc[dscp_index] return tclass
def Setup(tc): tc.desc = ''' Test : QoS Traffic test Opcode : Config, Verify ''' tc.nodes = api.GetNaplesHostnames() tc.os = api.GetNodeOs(tc.nodes[0]) pairs = api.GetRemoteWorkloadPairs() # get workloads from each node tc.w = [] tc.w.append(pairs[0][0]) tc.w.append(pairs[0][1]) tc.cmd_cookies = [] tc.server_idx = 0 tc.client_idx = 0 tc.iota_path = api.GetTestsuiteAttr("driver_path") tc.vlan_idx = api.GetTestsuiteAttr("vlan_idx") tc.w.append(pairs[tc.vlan_idx][0]) tc.w.append(pairs[tc.vlan_idx][1]) tc.devices = [] tc.gid = [] tc.ib_prefix = [] for i in range(4): tc.devices.append(api.GetTestsuiteAttr(tc.w[i].ip_address+'_device')) tc.gid.append(api.GetTestsuiteAttr(tc.w[i].ip_address+'_gid')) if tc.w[i].IsNaples(): tc.ib_prefix.append('cd ' + tc.iota_path + ' && ./run_rdma.sh ') else: tc.ib_prefix.append('') return api.types.status.SUCCESS
def Verify(tc): if tc.resp is None: return api.types.status.FAILURE api.Logger.info("show_gid results") for cmd in tc.resp.commands: api.PrintCommandResults(cmd) if cmd.exit_code != 0 and not api.Trigger_IsBackgroundCommand(cmd): return api.types.status.FAILURE #set the path for testcases in this testsuite to use w = [tc.w1, tc.w2, tc.vlan_w1, tc.vlan_w2] for i in range(len(w)): if api.IsDryrun(): api.SetTestsuiteAttr(w[i].ip_address + "_device", '0') else: api.SetTestsuiteAttr( w[i].ip_address + "_device", rdma.GetWorkloadDevice(tc.resp.commands[i + 1].stdout)) if api.IsDryrun(): api.SetTestsuiteAttr(w[i].ip_address + "_gid", '0') else: api.SetTestsuiteAttr( w[i].ip_address + "_gid", rdma.GetWorkloadGID(tc.resp.commands[i + 1].stdout)) cookie_idx = 0 for cmd in tc.resp.commands: if "show drops cmd" in tc.cmd_cookies[cookie_idx]: cookie_attrs = tc.cmd_cookies[cookie_idx].split() ip_address = cookie_attrs[-1] node_name = cookie_attrs[5] dev = api.GetTestsuiteAttr(ip_address + "_device")[-1] curr_drops = qos.QosGetDropsForDevFromOutput(cmd.stdout, dev) qos.QosSetDropsForDev(cmd.stdout, dev, node_name) if "QoS sysctl get" in tc.cmd_cookies[cookie_idx]: qos.QosSetTestsuiteAttrs(cmd.stdout) if "show lif" in tc.cmd_cookies[cookie_idx]: lif_list = [] lines = cmd.stdout.split('\n') for line in lines: api.Logger.info("{}".format(line)) if len(line) == 0: continue lif = line.split(' ')[0] lif_list.append(lif) api.SetTestsuiteAttr("lifs", lif_list) cookie_idx += 1 return api.types.status.SUCCESS
def Setup(tc): tc.desc = ''' Test : krping Opcode: REG_MR, LOCAL_INV, Send, Read, Write Num QP: 1 Pad : No Inline: No modes : inherently bidirectional ''' tc.iota_path = api.GetTestsuiteAttr("driver_path") tc.nodes = api.GetNaplesHostnames() tc.os = api.GetNodeOs(tc.nodes[0]) for n in tc.nodes: if api.GetNodeOs(n) not in [host.OS_TYPE_LINUX, host.OS_TYPE_BSD]: return api.types.status.IGNORED pairs = api.GetRemoteWorkloadPairs() # get workloads from each node tc.w = [] tc.w.append(pairs[0][0]) tc.w.append(pairs[0][1]) tc.devices = [] tc.gid = [] tc.ib_prefix = [] for i in range(2): tc.devices.append(api.GetTestsuiteAttr(tc.w[i].ip_address + '_device')) tc.gid.append(api.GetTestsuiteAttr(tc.w[i].ip_address + '_gid')) if not tc.w[i].IsNaples(): api.Logger.info("IGNORED: mlx side will hit local prot err") return api.types.status.IGNORED tc.stats_results = [] return api.types.status.SUCCESS
def Setup(tc): tc.desc = ''' Test : ib_*_bw test : send, write, read Opcode: Only Num QP: 1, 2 Pad : No Inline: No modes : bidirectional rdma_cm: yes, no flip : yes, no ''' tc.iota_path = api.GetTestsuiteAttr("driver_path") tc.vlan_idx = api.GetTestsuiteAttr("vlan_idx") pairs = api.GetRemoteWorkloadPairs() # get workloads from each node tc.w = [] if getattr(tc.iterators, 'flip', None) == 'no': tc.w.append(pairs[tc.vlan_idx][0]) tc.w.append(pairs[tc.vlan_idx][1]) else: tc.w.append(pairs[tc.vlan_idx][1]) tc.w.append(pairs[tc.vlan_idx][0]) tc.devices = [] tc.gid = [] tc.ib_prefix = [] for i in range(2): tc.devices.append(api.GetTestsuiteAttr(tc.w[i].ip_address + '_device')) tc.gid.append(api.GetTestsuiteAttr(tc.w[i].ip_address + '_gid')) if tc.w[i].IsNaples(): tc.ib_prefix.append('cd ' + tc.iota_path + ' && ./run_rdma.sh ') else: tc.ib_prefix.append('') return api.types.status.SUCCESS
def Setup(tc): tc.desc = ''' Test: ibv_ud_pingpong Opcode: N/A Num QP: 1 modes: workload1 as server, workload2 as client workload2 as server, workload1 as client ''' unames = api.GetTestsuiteAttr("unames") for name in unames: # skip, UD in user space is broken with ib_uverbs of older uek kernel m = re.match(r'^4\.14\.35-(\d+)\..*\.el7uek', name) if m and int(m.group(1)) < 1844: api.Logger.info("Skip ibv_ud_pingpong test with uname %s" % (name, )) return api.types.status.IGNORED tc.iota_path = api.GetTestsuiteAttr("driver_path") pairs = api.GetRemoteWorkloadPairs() # get workloads from each node tc.w = [] tc.w.append(pairs[0][0]) tc.w.append(pairs[0][1]) tc.devices = [] tc.gid = [] tc.ib_prefix = [] for i in range(2): tc.devices.append(api.GetTestsuiteAttr(tc.w[i].ip_address + '_device')) tc.gid.append(api.GetTestsuiteAttr(tc.w[i].ip_address + '_gid')) if tc.w[i].IsNaples(): tc.ib_prefix.append('cd ' + tc.iota_path + ' && ./run_rdma.sh ') else: tc.ib_prefix.append('') return api.types.status.SUCCESS
def Setup(tc): tc.iota_path = api.GetTestsuiteAttr("driver_path") tc.nodes = api.GetNaplesHostnames() tc.os = api.GetNodeOs(tc.nodes[0]) # On linux, these options appended to insmod # On freebsd, if not '', options for kenv before loading tc.insmod_opts = '' if hasattr(tc.args, 'spec'): if tc.os == host.OS_TYPE_LINUX: tc.insmod_opts += " spec=" + tc.args.spec else: tc.insmod_opts += " hw.ionic_rdma.spec=" + tc.args.spec api.SetTestsuiteAttr("insmod_opts", tc.insmod_opts) return api.types.status.SUCCESS
def Setup(tc): # parse tc args parse_args(tc) tc.wl0 = api.GetTestsuiteAttr("kni_wl") tc.sub_wl = api.GetTestsuiteAttr("kni_sub_wl") tc.bitw_node_name = api.GetTestsuiteAttr("bitw_node_name") tc.wl_node_name = api.GetTestsuiteAttr("wl_node_name") tc.mnic_p2p_ip = api.GetTestsuiteAttr("mnic_p2p_ip") tc.mnic_p2p_sub_ip = api.GetTestsuiteAttr("mnic_p2p_sub_ip") tc.mfg_mode = api.GetTestsuiteAttr("mfg_mode") if tc.mfg_mode is None: tc.mfg_mode = 'no' return api.types.status.SUCCESS
def Setup(tc): tc.desc = ''' Test : Dump RDMA qstate Opcode : N/A ''' tc.nodes = api.GetNaplesHostnames() tc.os = api.GetNodeOs(tc.nodes[0]) pairs = api.GetRemoteWorkloadPairs() # get workloads from each node tc.w = [] tc.w.append(pairs[0][0]) tc.w.append(pairs[0][1]) tc.lifs = api.GetTestsuiteAttr("lifs") return api.types.status.SUCCESS
def Setup(tc): api.SetTestsuiteAttr("driver_path", api.GetHostToolsDir() + '/') tc.iota_path = api.GetTestsuiteAttr("driver_path") tc.nodes = api.GetNaplesHostnames() tc.other_nodes = api.GetWorkloadNodeHostnames() tc.os = api.GetNodeOs(tc.nodes[0]) platform_gendir = api.GetTopDir()+'/platform/gen/' if tc.os == host.OS_TYPE_LINUX: tc.pkgname = 'drivers-linux.tar.xz' tc.showgid = 'drivers-linux/show_gid' else: tc.pkgname = 'drivers-freebsd.tar.xz' tc.showgid = 'drivers-freebsd/show_gid' # Copy RDMA driver to naples nodes for n in tc.nodes: api.Logger.info("Copying {pkg} to {node}" .format(pkg=tc.pkgname, node=n)) resp = api.CopyToHost(n, [platform_gendir + tc.pkgname]) if not api.IsApiResponseOk(resp): api.Logger.error("Failed to copy {pkg} to {node}: {resp}" .format(pkg=tc.pkgname, node=n, resp=resp)) return api.types.status.FAILURE # Copy show_gid to other nodes for n in tc.other_nodes: if n in tc.nodes: continue api.Logger.info("Copying show_gid to {node}" .format(node=n)) resp = api.CopyToHost(n, [platform_gendir + tc.showgid]) if not api.IsApiResponseOk(resp): api.Logger.error("Failed to copy show_gid to {node}: {resp}" .format(node=n, resp=resp)) return api.types.status.FAILURE return api.types.status.SUCCESS
def Setup(tc): tc.dualnic = False if hasattr(tc.args, 'dualnic'): tc.dualnic = tc.args.dualnic # Set absolute path for json files. api.SetTestsuiteAttr("template_policy_json_path", api.GetTopDir() + \ TEMPLATE_PLCY_JSON_PATH) tc.template_policy_json_path = api.GetTestsuiteAttr("template_policy_json_path") tc.athena_node_nic_pairs = athena_app_utils.get_athena_node_nic_names() tc.wl_node_nic_pairs = utils.get_classic_node_nic_pairs() tc.host_ifs = {} if tc.dualnic: for node, nic in tc.wl_node_nic_pairs: tc.host_ifs[(node, nic)] = api.GetNaplesHostInterfaces(node, nic) workloads = api.GetWorkloads(node) for wl in workloads: tc.host_ifs[(node, nic)].append(wl.interface) gen_plcy_cfg_e2e_wl_topo(tc) wl_nodes = [nname for nname, nic in tc.wl_node_nic_pairs] # Install python scapy packages install.InstallScapyPackge(tc, wl_nodes) else: # Assuming only one bitw node and one workload node tc.bitw_node_name, tc.bitw_nic = tc.athena_node_nic_pairs[0] tc.wl_node_name, tc.classic_nic = tc.wl_node_nic_pairs[0] gen_plcy_cfg_local_wl_topo(tc) # Install python scapy packages install.InstallScapyPackge(tc, [tc.wl_node_name]) return api.types.status.SUCCESS
def __setup_default_params(tc): def __set(tc, key, value): if getattr(tc.args, key, None) is None and\ getattr(tc.iterators, key, None) is None: setattr(tc.args, key, value) return batch_depth = __get_param(tc, 'batch_depth', pnsodefs.PNSO_TEST_DEFAULT_BATCH_DEPTH) __set(tc, 'repeat', pnsodefs.PNSO_TEST_DEFAULT_REPEAT * batch_depth) __set(tc, 'key1', pnsodefs.PNSO_TEST_DEFAULT_KEY1) __set(tc, 'key2', pnsodefs.PNSO_TEST_DEFAULT_KEY2) __set(tc, 'wait', str(pnsodefs.PNSO_TEST_DEFAULT_WAIT)) __set(tc, 'pcqdepth', pnsodefs.PNSO_TEST_DEFAULT_PCQDEPTH) __set(tc, 'batch_depth', pnsodefs.PNSO_TEST_DEFAULT_BATCH_DEPTH) __set(tc, 'mode', pnsodefs.PNSO_TEST_DEFAULT_MODE) num_cpus = __get_param(tc, 'cpus', pnsodefs.PNSO_TEST_DEFAULT_NUM_CPUS) max_cpus = api.GetTestsuiteAttr(pnsodefs.PNSO_TEST_MAXCPUS_ATTR) num_cpus = min(num_cpus, max_cpus) __set(tc, 'cpumask', (1 << num_cpus) - 1) return
def GetFlows(flows_req): flows_resp = None if flows_req.proto not in ['UDP', 'TCP', 'ICMP']: raise Exception('Flows requested for invalid protocol %s' % \ flows_req.proto) if flows_req.vnic_type not in ['L3', 'L2']: raise Exception('Flows requested for invalid vnic type %s' % \ flows_req.vnic_type) if flows_req.flow_type not in ['static', 'dynamic']: raise Exception('Flows requested for invalid flow type %s' % \ flows_req.flow_type) if flows_req.nat not in ['yes', 'no']: raise Exception('Flows requested for invalid nat choice %s' % \ flows_req.nat) if flows_req.nat == 'yes' and flows_req.flow_type == 'static': raise Exception("Invalid flow request: nat = 'yes', flow_type " "= 'static'") if flows_req.proto == 'ICMP' and flows_req.flow_type == 'static': raise Exception("Invalid flow request: proto = 'ICMP', flow_type " "= 'static'") flow_set = api.GetTestsuiteAttr("FlowSet") if flows_req.nat == 'yes': nat_flow_set = flow_set.nat_flow_set if flows_req.vnic_type == 'L3': l3_nat_flows = nat_flow_set.l3_nat_flows l3_nat_flow_cnt = nat_flow_set.l3_nat_flow_cnt if flows_req.proto == 'UDP': FlowCntCheck(flows_req.flow_count, l3_nat_flow_cnt['UDP'], 'UDP') flows_resp = l3_nat_flows['UDP'][:flows_req.flow_count] elif flows_req.proto == 'TCP': FlowCntCheck(flows_req.flow_count, l3_nat_flow_cnt['TCP'], 'TCP') flows_resp = l3_nat_flows['TCP'][:flows_req.flow_count] else: FlowCntCheck(flows_req.flow_count, l3_nat_flow_cnt['ICMP'], 'ICMP') flows_resp = l3_nat_flows['ICMP'][:flows_req.flow_count] else: l2_nat_flows = nat_flow_set.l2_nat_flows l2_nat_flow_cnt = nat_flow_set.l2_nat_flow_cnt if flows_req.proto == 'UDP': FlowCntCheck(flows_req.flow_count, l2_nat_flow_cnt['UDP'], 'UDP') flows_resp = l2_nat_flows['UDP'][:flows_req.flow_count] elif flows_req.proto == 'TCP': FlowCntCheck(flows_req.flow_count, l2_nat_flow_cnt['TCP'], 'TCP') flows_resp = l2_nat_flows['TCP'][:flows_req.flow_count] else: FlowCntCheck(flows_req.flow_count, l2_nat_flow_cnt['ICMP'], 'ICMP') flows_resp = l2_nat_flows['ICMP'][:flows_req.flow_count] else: if flows_req.proto == 'UDP': udp_flow_set = flow_set.udp_flow_set if flows_req.flow_type == 'static': static_udp_flow_cnt = udp_flow_set.static_udp_flow_cnt FlowCntCheck(flows_req.flow_count, static_udp_flow_cnt, 'UDP') flows_resp = udp_flow_set.static_udp_flows[:flows_req. flow_count] else: dyn_udp_flow_cnt = udp_flow_set.dyn_udp_flow_cnt FlowCntCheck(flows_req.flow_count, dyn_udp_flow_cnt, 'UDP') flows_resp = udp_flow_set.dyn_udp_flows[:flows_req.flow_count] elif flows_req.proto == 'TCP': tcp_flow_set = flow_set.tcp_flow_set if flows_req.flow_type == 'static': static_tcp_flow_cnt = tcp_flow_set.static_tcp_flow_cnt FlowCntCheck(flows_req.flow_count, static_tcp_flow_cnt, 'TCP') flows_resp = tcp_flow_set.static_tcp_flows[:flows_req. flow_count] else: dyn_tcp_flow_cnt = tcp_flow_set.dyn_tcp_flow_cnt FlowCntCheck(flows_req.flow_count, dyn_tcp_flow_cnt, 'TCP') flows_resp = tcp_flow_set.dyn_tcp_flows[:flows_req.flow_count] else: icmp_flow_set = flow_set.icmp_flow_set if flows_req.flow_type == 'static': static_icmp_flow_cnt = icmp_flow_set.static_icmp_flow_cnt FlowCntCheck(flows_req.flow_count, static_icmp_flow_cnt, 'ICMP') flows_resp = \ icmp_flow_set.static_icmp_flows[:flows_req.flow_count] else: dyn_icmp_flow_cnt = icmp_flow_set.dyn_icmp_flow_cnt FlowCntCheck(flows_req.flow_count, dyn_icmp_flow_cnt, 'ICMP') flows_resp = icmp_flow_set.dyn_icmp_flows[:flows_req. flow_count] return flows_resp
def Verify(tc): #============================================================== # verify the output #============================================================== if tc.resp is None: return api.types.status.FAILURE result = api.types.status.SUCCESS expect_drops = api.GetTestsuiteAttr('qos_expect_drops') cookie_idx = 0 for cmd in tc.resp.commands: api.Logger.info("{}".format(tc.cmd_cookies[cookie_idx])) api.PrintCommandResults(cmd) if cmd.exit_code != 0: result = api.types.status.FAILURE if "run_rdma" in tc.cmd_cookies[cookie_idx]: get_bw_avg_flag = False if tc.w[tc.server_idx].ip_address in tc.cmd_cookies[cookie_idx]: # client cmd cs_str = 'CLIENT' else: # server cmd cs_str = 'SERVER' lines = cmd.stdout.split('\n') for line in lines: if "BW average" in line: get_bw_avg_flag = True continue if get_bw_avg_flag == True: line_attrs = line.split() if len(line_attrs) == 5: api.Logger.info("{} : BW average: {} Gbps".format(cs_str, line_attrs[3])) else: api.Logger.info("Invalid number of line attributes: {}; cannot get BW average".format(len(line_attrs))) break ''' # TODO: get the sqcb and check for retransmissions (if any) sqcb=0 lines = cmd.stdout.split('\n') for line in lines: if "local address" in line: line_attrs = line.split() for i in range(0, len(line_attrs)): if(line_attr[i] == "QPN"): sqcb = int(line_attr[i+1], 0) break if(sqcb != 0): cmd = 'cd ' + tc.iota_path + ' && ./rdmactl.py --DEVNAME '\ + tc.devices[tc.client_idx] + ' --sqcb0 ' + sqcb api.Trigger_AddCommand(req, tc.w[tc.client_idx].node_name, tc.w[tc.client_idx].workload_name, cmd) tc.cmd_cookies.append(cmd) else: api.Logger.error("Couldnt extract sqcb") #TODO: look for p_index1 and c_index1 to be 0 ''' # PFC verification cmd elif "halctl show port" in tc.cmd_cookies[cookie_idx]: # cmd output # halctl show port --port Eth1/1 statistics # FRAMES RX PAUSE 0 # FRAMES RX PRIPAUSE 194074 # FRAMES TX PAUSE 0 # FRAMES TX PRIPAUSE 0 # FRAMES RX PRI 3 194074 # FRAMES TX PRI 3 0 pcp_or_dscp = tc.cmd_cookies[cookie_idx].split()[-1] rx_pause_counter = 'FRAMES RX PAUSE' tx_pause_counter = 'FRAMES TX PAUSE' rx_pfc_counter = 'FRAMES RX PRIPAUSE' tx_pfc_counter = 'FRAMES TX PRIPAUSE' if tc.class_type == 1: tclass = qos.QosGetTcForPcp(pcp_or_dscp) else: tclass = qos.QosGetTcForDscp(pcp_or_dscp) cos = qos.QosGetCosForTc(tclass) rx_pfc_class_counter = 'FRAMES RX PRI ' + str(cos) tx_pfc_class_counter = 'FRAMES TX PRI ' + str(cos) lines = cmd.stdout.split('\n') for line in lines: if rx_pfc_counter in line: line_attrs = line.split() rx_pripause = int(line_attrs[len(line_attrs)-1]) if rx_pripause == 0: api.Logger.info("No PFC frames received") else: api.Logger.info("PFC Frames received: {}".format(rx_pripause)) elif tx_pfc_counter in line: line_attrs = line.split() tx_pripause = int(line_attrs[len(line_attrs)-1]) if tx_pripause == 0: api.Logger.info("No PFC frames transmitted") else: api.Logger.info("PFC Frames transmitted: {}".format(tx_pripause)) elif rx_pfc_class_counter in line: line_attrs = line.split() rx_pri = int(line_attrs[len(line_attrs)-1]) if rx_pri == 0: api.Logger.info("No Pri {} PFC frames received".format(cos)) else: api.Logger.info("Pri {} PFC Frames received: {}".format(cos, rx_pri)) elif tx_pfc_class_counter in line: line_attrs = line.split() tx_pri = int(line_attrs[len(line_attrs)-1]) if tx_pri == 0: api.Logger.info("No Pri {} PFC frames transmitted".format(cos)) else: api.Logger.info("Pri {} PFC Frames transmitted: {}".format(cos, tx_pri)) elif rx_pause_counter in line: line_attrs = line.split() rx_pause = int(line_attrs[len(line_attrs)-1]) if rx_pause == 0: api.Logger.info("No Link Level PAUSE frames received") else: api.Logger.info("Link Level PAUSE Frames received: {}".format(rx_pause)) elif tx_pause_counter in line: line_attrs = line.split() tx_pause = int(line_attrs[len(line_attrs)-1]) if tx_pause == 0: api.Logger.info("No Link Level PAUSE frames transmitted") else: api.Logger.info("Link Level PAUSE Frames transmitted: {}".format(tx_pause)) #Check if there are drops elif "show drops cmd" in tc.cmd_cookies[cookie_idx]: cookie_attrs = tc.cmd_cookies[cookie_idx].split() ip_address = cookie_attrs[-1] node_name = cookie_attrs[5] dev = api.GetTestsuiteAttr(ip_address+"_device")[-1] curr_drops = qos.QosGetDropsForDevFromOutput(cmd.stdout, dev) prev_drops = qos.QosGetDropsForDevFromTestSuite(dev, node_name) qos.QosSetDropsForDev(cmd.stdout, dev, node_name) if int(curr_drops) > int(prev_drops): api.Logger.error("Additional {} drops found on {}".format(int(curr_drops)-int(prev_drops), node_name)) if expect_drops == False: # Fail the test only if drops are not expected return api.types.status.FAILURE cookie_idx += 1 return result
def Setup(tc): tc.bitw_node_name = api.GetTestsuiteAttr("bitw_node_name") tc.intfs = api.GetTestsuiteAttr("inb_mnic_intfs") tc.nodes = api.GetNaplesHostnames() # copy device.json to naples device_json_fname = api.GetTopDir() + '/nic/conf/athena/device.json' api.CopyToNaples(tc.bitw_node_name, [device_json_fname], "") # copy plugctl.sh to host plugctl_fname = api.GetTopDir( ) + '/iota/test/athena/testcases/networking/scripts/plugctl.sh' api.CopyToHost(tc.bitw_node_name, [plugctl_fname], "") # get the IP address of int_mnic and store it req = api.Trigger_CreateExecuteCommandsRequest(serial=True) tc.int_mnic_ip = None cmd = "ifconfig int_mnic0 | grep inet | cut -d ':' -f 2 | cut -d ' ' -f 1" api.Trigger_AddNaplesCommand(req, tc.bitw_node_name, cmd) resp = api.Trigger(req) cmd = resp.commands[0] api.PrintCommandResults(cmd) if cmd.exit_code != 0: api.Logger.error("Failed to get int_mnic0 IP on node %s" % \ tc.bitw_node_name) return api.types.status.FAILURE else: tc.int_mnic_ip = str(cmd.stdout) # delete pensando_pre_init.sh req = api.Trigger_CreateExecuteCommandsRequest(serial=True) cmd = "cd /sysconfig/config0 && touch pensando_pre_init.sh && rm pensando_pre_init.sh" api.Trigger_AddNaplesCommand(req, tc.bitw_node_name, cmd) # bring down linux interfaces for intf in tc.intfs: # unconfigure inb_mnic0 and inb_mnic1 ip_addr = str(ip_address(intf['ip']) + 1) utils.configureNaplesIntf(req, tc.bitw_node_name, intf['name'], ip_addr, '24', vlan=intf['vlan'], unconfig=True) resp = api.Trigger(req) for cmd in resp.commands: api.PrintCommandResults(cmd) if cmd.exit_code != 0: api.Logger.error("Failed to bring down linux interfaces on node %s" % \ tc.bitw_node_name) return api.types.status.FAILURE # unconfigure int_mnic0 cmd = "ifconfig int_mnic0 down && ip addr del " + tc.int_mnic_ip resp = api.RunNaplesConsoleCmd(tc.nodes[0], cmd) # unload drivers cmd = "rmmod mnet && rmmod mnet_uio_pdrv_genirq && rmmod ionic_mnic" resp = api.RunNaplesConsoleCmd(tc.nodes[0], cmd) # run plugctl to gracefully bring down the PCI device on host req = api.Trigger_CreateExecuteCommandsRequest(serial=True) cmd = "./plugctl.sh out" api.Trigger_AddHostCommand(req, tc.bitw_node_name, cmd) resp = api.Trigger(req) cmd = resp.commands[0] api.PrintCommandResults(cmd) if cmd.exit_code != 0: api.Logger.error("Failed to gracefully bring down the PCI device on host %s" % \ tc.bitw_node_name) return api.types.status.FAILURE # kill athena primary app cmd = "pkill athena_app" resp = api.RunNaplesConsoleCmd(tc.nodes[0], cmd) return api.types.status.SUCCESS
def Setup(tc): # parse iterator args parse_args(tc) # setup policy.json obj tc.plcy_obj = None # read from policy.json with open(api.GetTestsuiteAttr("dp_policy_json_path")) as fd: tc.plcy_obj = json.load(fd) # get vnic tc.vnic = utils.get_vnic(tc.plcy_obj, tc.vnic_type, tc.nat, _stateful=True) # get vnic id tc.vnic_id = utils.get_vnic_id(tc.plcy_obj, tc.vnic_type, tc.nat, _stateful=True) api.Logger.info('vnic id: {}'.format(tc.vnic_id)) # get node info tc.bitw_node_name = api.GetTestsuiteAttr("bitw_node_name") tc.wl_node_name = api.GetTestsuiteAttr("wl_node_name") tc.wl_node = api.GetTestsuiteAttr("wl_node") tc.nodes = api.GetNodes() host_intfs = api.GetNaplesHostInterfaces(tc.wl_node_name) # Assuming single nic per host if len(host_intfs) != 2: api.Logger.error('Failed to get host interfaces') return api.types.status.FAILURE tc.up0_intf = host_intfs[0] tc.up1_intf = host_intfs[1] # get uplink vlans tc.up0_vlan = tc.vnic['rewrite_underlay']['vlan_id'] tc.up1_vlan = tc.vnic['vlan_id'] # get uplink mac tc.up0_mac = tc.vnic['rewrite_underlay']['dmac'] tc.up1_mac = tc.vnic['session']['to_switch']['host_mac'] if not tc.up0_mac or not tc.up1_mac: api.Logger.error('Failed to get workload sub-intf mac addresses') return api.types.status.FAILURE if not tc.up0_vlan or not tc.up1_vlan: api.Logger.error('Failed to get workload sub-intf vlan value') return api.types.status.FAILURE api.Logger.info('Workload0: up0_intf %s up0_vlan %s up0_mac %s' % (tc.up0_intf, tc.up0_vlan, tc.up0_mac)) api.Logger.info('Workload1: up1_intf %s up1_vlan %s up1_mac %s' % (tc.up1_intf, tc.up1_vlan, tc.up1_mac)) # fetch flows needed for the test tc.flows = get_flows(tc) # copy send/recv scripts to node send_pkt_script_fname = api.GetTestsuiteAttr("send_pkt_path") recv_pkt_script_fname = api.GetTestsuiteAttr("recv_pkt_path") for node in tc.nodes: if node is tc.wl_node: api.CopyToHost(node.Name(), [send_pkt_script_fname], "") api.CopyToHost(node.Name(), [recv_pkt_script_fname], "") # init response list tc.resp = [] return api.types.status.SUCCESS
def Setup(tc): tc.mfg_mode = api.GetTestsuiteAttr("mfg_mode") if tc.mfg_mode is None: tc.mfg_mode = 'no' tc.test_intf = api.GetTestsuiteAttr("mfg_test_intf") if tc.test_intf is None: tc.test_intf = 'up1' # default up1 for kni tests # get node info tc.bitw_node_name = api.GetTestsuiteAttr("bitw_node_name") tc.wl_node_name = api.GetTestsuiteAttr("wl_node_name") host_intfs = api.GetNaplesHostInterfaces(tc.wl_node_name) # Assuming single nic per host if len(host_intfs) != 2: api.Logger.error('Failed to get host interfaces') return api.types.status.FAILURE up0_intf = host_intfs[0] up1_intf = host_intfs[1] workloads = api.GetWorkloads() if len(workloads) == 0: api.Logger.error('No workloads available') return api.types.status.FAILURE tc.sub_wl = [] for wl in workloads: if (wl.parent_interface == up0_intf and tc.test_intf == 'up0') or (wl.parent_interface == up1_intf and tc.test_intf == 'up1'): if wl.uplink_vlan == 0: # Native workload tc.wl0 = wl else: # Tagged workload tc.sub_wl.append(wl) # 1 subintf is used by default for kni tests # 3 subintf are used by default for mfg mode tests (2 for positive test and # 1 for negative test) if tc.mfg_mode == 'yes': tc.sub_wl = tc.sub_wl[:3] else: tc.sub_wl = tc.sub_wl[:1] api.SetTestsuiteAttr("kni_wl", tc.wl0) api.SetTestsuiteAttr("kni_sub_wl", tc.sub_wl) api.Logger.info("wl0: vlan: {}, mac: {}, ip: {}".format( tc.wl0.uplink_vlan, tc.wl0.mac_address, tc.wl0.ip_address)) for idx, sub_wl in enumerate(tc.sub_wl): api.Logger.info("sub_wl[{}]: vlan: {}, mac: {}, ip: {}".format( idx, sub_wl.uplink_vlan, sub_wl.mac_address, sub_wl.ip_address)) # check if mnic_p2p interface is present req = api.Trigger_CreateExecuteCommandsRequest(serial=True) cmd = "ifconfig mnic_p2p" api.Trigger_AddNaplesCommand(req, tc.bitw_node_name, cmd) resp = api.Trigger(req) for cmd in resp.commands: api.PrintCommandResults(cmd) if cmd.exit_code != 0: api.Logger.error("mnic_p2p intf not found on naples %s" % \ tc.bitw_node_name) return api.types.status.FAILURE return api.types.status.SUCCESS
def GetTunnelManager(): return api.GetTestsuiteAttr("MPLSUDP_TUNNEL_MANAGER")
def GetLastLinkStatusChangeCount(naples_host): return api.GetTestsuiteAttr("LinkTestValidationNumberOfFlaps" + naples_host)
def ClearFlowSet(): flow_set = api.GetTestsuiteAttr("FlowSet") if flow_set is not None: api.SetTestsuiteAttr("FlowSet", None) del flow_set return api.types.status.SUCCESS
def Trigger(tc): expect_pkts = True if tc.num_pkts == 0: expect_pkts = False curr_time = time.time() flow_log_curr_window_start = api.GetTestsuiteAttr( "flow_log_curr_window_start") #Wait for at least 65 secs before checking the log file time_window_diff = 65.0 - (curr_time - flow_log_curr_window_start) if time_window_diff > 0: api.Logger.info( 'Sleeping for {} secs before reading the flow log file'.format( time_window_diff)) time.sleep(time_window_diff) #Update the time window api.SetTestsuiteAttr("flow_log_curr_window_start", flow_log_curr_window_start + 60) flow_log_curr_file_num = api.GetTestsuiteAttr("flow_log_curr_file_num") flow_log_curr_file = "/data/flow_log_{}.txt".format(flow_log_curr_file_num) #Shift to the other file for the next read flow_log_curr_file_num = (flow_log_curr_file_num + 1) % 2 api.SetTestsuiteAttr("flow_log_curr_file_num", flow_log_curr_file_num) for node, nic in tc.node_nic_pairs: cmd = "cat {}".format(flow_log_curr_file) req = api.Trigger_CreateExecuteCommandsRequest() api.Trigger_AddNaplesCommand(req, node, cmd, nic) resp = api.Trigger(req) for cmd in resp.commands: api.PrintCommandResults(cmd) if cmd.exit_code != 0: api.Logger.error("cat {} failed on " "{}".format(flow_log_curr_file, node)) return api.types.status.FAILURE if not cmd.stdout: if expect_pkts == True: api.Logger.error("Error: Packet should be logged " "but flow_log is empty. Node: " "{}".format((node))) return api.types.status.FAILURE else: pkt_match = False lines = cmd.stdout.split("\n") for line in lines: if "Data" in line: data_tokens = line.split("==>") data_str = data_tokens[0] data_vals = data_tokens[1].split(',') for data_val in data_vals: if "pkts_from_host" in data_val: curr_num_pkts = int(data_val.split(': ')[1]) if curr_num_pkts == tc.num_pkts: pkt_match = True break if pkt_match == False: api.Logger.error( "Error: Packet count mismatch for node {}. " "expected: {}".format(node, tc.num_pkts)) return api.types.status.FAILURE return api.types.status.SUCCESS