def Setup(tc): parse_args(tc) api.SetTestsuiteAttr("mfg_test_intf", tc.test_intf) api.SetTestsuiteAttr("mfg_mode", "yes") api.SetTestsuiteAttr("preinit_script_path", NAPLES_PREINIT_SCRIPT_PATH) api.SetTestsuiteAttr("start_agent_script_path", NAPLES_START_AGENT_SCRIPT_PATH) # get node info tc.bitw_node_name = None tc.wl_node_name = None bitw_node_nic_pairs = athena_app_utils.get_athena_node_nic_names() classic_node_nic_pairs = utils.get_classic_node_nic_pairs() # Only one node for single-nic topology tc.bitw_node_name = bitw_node_nic_pairs[0][0] tc.wl_node_name = classic_node_nic_pairs[0][0] host_intfs = api.GetNaplesHostInterfaces(tc.wl_node_name) # Assuming single nic per host if len(host_intfs) != 2: api.Logger.error('Failed to get host interfaces') return api.types.status.FAILURE up0_intf = host_intfs[0] up1_intf = host_intfs[1] workloads = api.GetWorkloads() if len(workloads) == 0: api.Logger.error('No workloads available') return api.types.status.FAILURE wl_vlans = [] for wl in workloads: if (wl.parent_interface == up0_intf and tc.test_intf == 'up0') or (wl.parent_interface == up1_intf and tc.test_intf == 'up1'): if wl.uplink_vlan != 0: # Tagged workload wl_vlans.append(wl.uplink_vlan) if len(wl_vlans) < NUM_MFG_TEST_VLANS: api.Logger.error('Failed to fetch %d tagged workloads for mfg test' ' on uplink %s' % (NUM_MFG_TEST_VLANS, tc.test_intf)) return api.types.status.FAILURE # generate start agent script with testbed vlans if gen_start_agent_script(wl_vlans) != api.types.status.SUCCESS: return api.types.status.FAILURE # copy preinit script and start agent script to naples preinit_filename = api.GetTopDir() + '/' + WS_PREINIT_SCRIPT_PATH start_agent_filename = api.GetTopDir( ) + '/' + WS_IOTA_START_AGENT_SCRIPT_PATH api.CopyToNaples(tc.bitw_node_name, [preinit_filename, start_agent_filename], "") os.remove(start_agent_filename) return api.types.status.SUCCESS
def Trigger(tc): req = api.Trigger_CreateExecuteCommandsRequest(serial=True) tc.mnic_p2p_ip = str(ip_address(tc.wl0.ip_address) + 1) utils.configureNaplesIntf(req, tc.bitw_node_name, 'mnic_p2p', tc.mnic_p2p_ip, '255.255.255.0') tc.mnic_p2p_sub_ip = {} for idx, sub_wl in enumerate(tc.sub_wl): tc.mnic_p2p_sub_ip[sub_wl.workload_name] = \ str(ip_address(sub_wl.ip_address) + 1) utils.configureNaplesIntf(req, tc.bitw_node_name, 'mnic_p2p', tc.mnic_p2p_sub_ip[sub_wl.workload_name], '255.255.255.0', vlan=str(sub_wl.uplink_vlan)) api.SetTestsuiteAttr("mnic_p2p_ip", tc.mnic_p2p_ip) api.SetTestsuiteAttr("mnic_p2p_sub_ip", tc.mnic_p2p_sub_ip) # pre testpmd setup cmd = "echo 256 > /sys/kernel/mm/hugepages/hugepages-2048kB/nr_hugepages" api.Trigger_AddNaplesCommand(req, tc.bitw_node_name, cmd) cmd = "mkdir -p /dev/hugepages" api.Trigger_AddNaplesCommand(req, tc.bitw_node_name, cmd) cmd = "mount -t hugetlbfs nodev /dev/hugepages" api.Trigger_AddNaplesCommand(req, tc.bitw_node_name, cmd) # start testpmd common_args = [] common_args.append({'vdev': 'net_ionic2'}) if tc.test_intf == 'up0': common_args.append({'vdev': 'net_ionic0'}) else: common_args.append({'vdev': 'net_ionic1'}) args = [] args.append({'coremask': '0x6'}) args.append({'portmask': '0x3'}) args.append({'stats-period': '3'}) args.append({'max-pkt-len': '9208'}) args.append({'mbuf-size': '10240'}) args.append({'total-num-mbufs': '10240'}) testpmd.StartTestpmd(req, tc.bitw_node_name, common_args, args) # wait till testpmd is ready api.Trigger_AddNaplesCommand(req, tc.bitw_node_name, "sleep 5") # verify that testpmd has started cmd = 'ps -ef | grep testpmd | grep -v grep' api.Trigger_AddNaplesCommand(req, tc.bitw_node_name, cmd) tc.resp = api.Trigger(req) return api.types.status.SUCCESS
def Verify(tc): if tc.resp is None: return api.types.status.FAILURE api.Logger.info("show_gid results") for cmd in tc.resp.commands: api.PrintCommandResults(cmd) if cmd.exit_code != 0 and not api.Trigger_IsBackgroundCommand(cmd): return api.types.status.FAILURE #set the path for testcases in this testsuite to use w = [tc.w1, tc.w2, tc.vlan_w1, tc.vlan_w2] for i in range(len(w)): if api.IsDryrun(): api.SetTestsuiteAttr(w[i].ip_address + "_device", '0') else: api.SetTestsuiteAttr( w[i].ip_address + "_device", rdma.GetWorkloadDevice(tc.resp.commands[i + 1].stdout)) if api.IsDryrun(): api.SetTestsuiteAttr(w[i].ip_address + "_gid", '0') else: api.SetTestsuiteAttr( w[i].ip_address + "_gid", rdma.GetWorkloadGID(tc.resp.commands[i + 1].stdout)) cookie_idx = 0 for cmd in tc.resp.commands: if "show drops cmd" in tc.cmd_cookies[cookie_idx]: cookie_attrs = tc.cmd_cookies[cookie_idx].split() ip_address = cookie_attrs[-1] node_name = cookie_attrs[5] dev = api.GetTestsuiteAttr(ip_address + "_device")[-1] curr_drops = qos.QosGetDropsForDevFromOutput(cmd.stdout, dev) qos.QosSetDropsForDev(cmd.stdout, dev, node_name) if "QoS sysctl get" in tc.cmd_cookies[cookie_idx]: qos.QosSetTestsuiteAttrs(cmd.stdout) if "show lif" in tc.cmd_cookies[cookie_idx]: lif_list = [] lines = cmd.stdout.split('\n') for line in lines: api.Logger.info("{}".format(line)) if len(line) == 0: continue lif = line.split(' ')[0] lif_list.append(lif) api.SetTestsuiteAttr("lifs", lif_list) cookie_idx += 1 return api.types.status.SUCCESS
def Main(tc): api.Logger.info("Config Link init.") api.SetTestsuiteAttr("LinkTestValidationSkip", 0) api.SetTestsuiteAttr("LinkTestValidationDisable", 0) for naples_host in api.GetNaplesHostnames(): totalNumberLinkFlapped = linkmgr.GetLinkStatusChangeCount(naples_host) if totalNumberLinkFlapped == None: if GlobalOptions.dryrun: return api.types.status.SUCCESS api.Logger.error( "Failure happend while trying to read number of lik flapped on Naples" + naples_host) return api.types.status.FAILURE linkmgr.ResetLastLinkStatusChangeCount(naples_host, totalNumberLinkFlapped) return api.types.status.SUCCESS
def QosSetTestsuiteAttrs(output): lines = output.split('\n') for line in lines: if line == None or line == "": continue attrs = line.split('.') attr = attrs[4] [key, value] = attr.split(": ") api.SetTestsuiteAttr(key, value.replace(" ", ","))
def QosSetDropsForDev(output, dev, node_name): if not output: drops = '0' else: lines = output.split('\n') for line in lines: line_attrs = line.split() if line_attrs[0] == dev: drops = line_attrs[4] break attr = node_name + "_dev" + dev + "_drops" api.SetTestsuiteAttr(attr, drops)
def Setup(tc): tc.dmesg_commands = [] tc.output_commands = [] node_list = api.GetNaplesHostnames() # Run it only on first Naples #tc.nodes = [ node_list[0] ] tc.nodes = node_list tc.os = api.GetNodeOs(tc.nodes[0]) api.SetTestsuiteAttr(pnsodefs.PNSO_TEST_MAXCPUS_ATTR, tc.args.maxcpus) return api.types.status.SUCCESS
def Setup(tc): # get node info tc.bitw_node_name = None tc.wl_node_name = None tc.wl_node = None # Assuming only one bitw node and one workload node nics = store.GetTopology().GetNicsByPipeline("athena") for nic in nics: tc.bitw_node_name = nic.GetNodeName() break workloads = api.GetWorkloads() if len(workloads) == 0: api.Logger.error('No workloads available') return api.types.status.FAILURE tc.wl_node_name = workloads[0].node_name tc.nodes = api.GetNodes() for node in tc.nodes: if node.Name() == tc.wl_node_name: tc.wl_node = node api.SetTestsuiteAttr("bitw_node_name", tc.bitw_node_name) api.SetTestsuiteAttr("wl_node_name", tc.wl_node_name) api.SetTestsuiteAttr("wl_node", tc.wl_node) api.SetTestsuiteAttr( "send_pkt_path", api.GetTopDir() + '/iota/test/athena/testcases/networking/scripts/send_pkt.py') api.SetTestsuiteAttr( "recv_pkt_path", api.GetTopDir() + '/iota/test/athena/testcases/networking/scripts/recv_pkt.py') return api.types.status.SUCCESS
def Trigger(tc): result = api.types.status.SUCCESS expect_drops = False #Enable Service Policy on the interfaces if getattr(tc.args, 'intf_qos', False) == True: result = api.EnableQosPorts(tc.nodes, "pmap-iota") if result != api.types.status.SUCCESS: api.Logger.error("QoS Service policy configure failed. Ignoring.") else: result = api.DisableQosPorts(tc.nodes, "pmap-iota") if result != api.types.status.SUCCESS: api.Logger.error("QoS Service policy unconfigure failed. Ignoring.") pause_enable = getattr(tc.args, 'pause_enable', False) pause_type = getattr(tc.args, 'pause_type', 0) api.Logger.info("pause_enable {}, pause_type {}".format(pause_enable, pause_type)) if pause_type == 0: #LLFC if pause_enable: result = api.EnablePausePorts(tc.nodes) if result != api.types.status.SUCCESS: api.Logger.error("Port pause type LLFC configure failed. Ignoring.") else: result = api.DisablePausePorts(tc.nodes) if result != api.types.status.SUCCESS: api.Logger.error("Port pause type LLFC unconfigure failed. Ignoring.") else: #PFC if pause_enable: result = api.EnablePfcPorts(tc.nodes) if result != api.types.status.SUCCESS: api.Logger.error("Port pause type PFC configure failed. Ignoring.") else: result = api.DisablePfcPorts(tc.nodes) if result != api.types.status.SUCCESS: api.Logger.error("Port pause type PFC unconfigure failed. Ignoring.") # Configuring pause on the switch failed. Expect drops. #if result != api.types.status.SUCCESS: # expect_drops = True # Ignoring drops for till the infra return issue is fixed expect_drops = True api.SetTestsuiteAttr('qos_expect_drops', expect_drops) #============================================================== # trigger the commands #============================================================== return api.types.status.SUCCESS
def Setup(tc): tc.iota_path = api.GetTestsuiteAttr("driver_path") tc.nodes = api.GetNaplesHostnames() tc.os = api.GetNodeOs(tc.nodes[0]) # On linux, these options appended to insmod # On freebsd, if not '', options for kenv before loading tc.insmod_opts = '' if hasattr(tc.args, 'spec'): if tc.os == host.OS_TYPE_LINUX: tc.insmod_opts += " spec=" + tc.args.spec else: tc.insmod_opts += " hw.ionic_rdma.spec=" + tc.args.spec api.SetTestsuiteAttr("insmod_opts", tc.insmod_opts) return api.types.status.SUCCESS
def Setup(tc): global max_dyn_flow_cnt max_dyn_flow_cnt = getattr(tc.args, "max_dyn_flow_count", 1) tc.use_cfg = 'yes' if hasattr(tc.args, 'use_cfg'): tc.use_cfg = tc.args.use_cfg # Create a flowset consisting of all types of flows flow_set = FlowSet(tc.use_cfg) flow_set.CreateAllFlowSets() api.SetTestsuiteAttr("FlowSet", flow_set) return api.types.status.SUCCESS
def Main(tc): api.Logger.info("Verify Link .") isDisabled = api.GetTestsuiteAttr("LinkTestValidationDisable") if (isDisabled == 1): api.Logger.info( "Verify Links is disabled through LinkTestValidationDisable gloabal var" ) return api.types.status.SUCCESS isSkip = api.GetTestsuiteAttr("LinkTestValidationSkip") if (isSkip == 1): api.Logger.info( "Verify Link test will be skipped dues to LinkTestValidationDisable globale var" ) api.SetTestsuiteAttr("LinkTestValidationSkip", 0) finalResult = api.types.status.SUCCESS for naples_host in api.GetNaplesHostnames(): totalNumberLinkFlapped = linkmgr.GetLinkStatusChangeCount(naples_host) if totalNumberLinkFlapped == None: if GlobalOptions.dryrun: return api.types.status.SUCCESS api.Logger.error( "Failure happend while trying to read number of link flapped on Naples:" + naples_host) return api.types.status.FAILURE lastNUmberLinkFlapped = linkmgr.GetLastLinkStatusChangeCount( naples_host) if (isSkip == 1): linkmgr.ResetLastLinkStatusChangeCount(naples_host, totalNumberLinkFlapped) elif (totalNumberLinkFlapped == lastNUmberLinkFlapped): api.Logger.info( "Verify Link test passed. No new link flap were detected on Naples " + naples_host) elif (lastNUmberLinkFlapped < totalNumberLinkFlapped): api.Logger.error( "Verify Link test failed on Naples [%s]. [%d] new link flaps were detected" % (naples_host, (totalNumberLinkFlapped - lastNUmberLinkFlapped))) finalResult = api.types.status.FAILURE else: api.Logger.error( "Verify Link test failed on Naples [%s] with UNKNOWN ERROR. Number of total link flap [%d] is less then number of link flap detected before this test [%d] " % (naples_host, totalNumberLinkFlapped, lastNUmberLinkFlapped)) finalResult = api.types.status.FAILURE linkmgr.ResetLastLinkStatusChangeCount(naples_host, totalNumberLinkFlapped) return finalResult
def Setup(tc): tc.dmesg_commands = [] tc.output_commands = [] node_list = api.GetNaplesHostnames() # Run it only on first Naples #tc.nodes = [ node_list[0] ] tc.nodes = node_list tc.os = api.GetNodeOs(tc.nodes[0]) tc.sonicpkg = api.GetTopDir() + '/' + tc.args.package ret = __copy_sonic_to_all_naples(tc) if ret != api.types.status.SUCCESS: return ret api.SetTestsuiteAttr(pnsodefs.PNSO_TEST_MAXCPUS_ATTR, tc.args.maxcpus) return api.types.status.SUCCESS
def Setup(tc): api.SetTestsuiteAttr("driver_path", api.GetHostToolsDir() + '/') tc.iota_path = api.GetTestsuiteAttr("driver_path") tc.nodes = api.GetNaplesHostnames() tc.other_nodes = api.GetWorkloadNodeHostnames() tc.os = api.GetNodeOs(tc.nodes[0]) platform_gendir = api.GetTopDir()+'/platform/gen/' if tc.os == host.OS_TYPE_LINUX: tc.pkgname = 'drivers-linux.tar.xz' tc.showgid = 'drivers-linux/show_gid' else: tc.pkgname = 'drivers-freebsd.tar.xz' tc.showgid = 'drivers-freebsd/show_gid' # Copy RDMA driver to naples nodes for n in tc.nodes: api.Logger.info("Copying {pkg} to {node}" .format(pkg=tc.pkgname, node=n)) resp = api.CopyToHost(n, [platform_gendir + tc.pkgname]) if not api.IsApiResponseOk(resp): api.Logger.error("Failed to copy {pkg} to {node}: {resp}" .format(pkg=tc.pkgname, node=n, resp=resp)) return api.types.status.FAILURE # Copy show_gid to other nodes for n in tc.other_nodes: if n in tc.nodes: continue api.Logger.info("Copying show_gid to {node}" .format(node=n)) resp = api.CopyToHost(n, [platform_gendir + tc.showgid]) if not api.IsApiResponseOk(resp): api.Logger.error("Failed to copy show_gid to {node}: {resp}" .format(node=n, resp=resp)) return api.types.status.FAILURE return api.types.status.SUCCESS
def Setup(tc): tc.dualnic = False if hasattr(tc.args, 'dualnic'): tc.dualnic = tc.args.dualnic # Set absolute path for json files. api.SetTestsuiteAttr("template_policy_json_path", api.GetTopDir() + \ TEMPLATE_PLCY_JSON_PATH) tc.template_policy_json_path = api.GetTestsuiteAttr("template_policy_json_path") tc.athena_node_nic_pairs = athena_app_utils.get_athena_node_nic_names() tc.wl_node_nic_pairs = utils.get_classic_node_nic_pairs() tc.host_ifs = {} if tc.dualnic: for node, nic in tc.wl_node_nic_pairs: tc.host_ifs[(node, nic)] = api.GetNaplesHostInterfaces(node, nic) workloads = api.GetWorkloads(node) for wl in workloads: tc.host_ifs[(node, nic)].append(wl.interface) gen_plcy_cfg_e2e_wl_topo(tc) wl_nodes = [nname for nname, nic in tc.wl_node_nic_pairs] # Install python scapy packages install.InstallScapyPackge(tc, wl_nodes) else: # Assuming only one bitw node and one workload node tc.bitw_node_name, tc.bitw_nic = tc.athena_node_nic_pairs[0] tc.wl_node_name, tc.classic_nic = tc.wl_node_nic_pairs[0] gen_plcy_cfg_local_wl_topo(tc) # Install python scapy packages install.InstallScapyPackge(tc, [tc.wl_node_name]) return api.types.status.SUCCESS
def Verify(tc): if tc.resp_uname is None or tc.resp is None: return api.types.status.FAILURE result = api.types.status.SUCCESS api.Logger.info("build_rdma results for the following nodes: {0}".format(tc.nodes)) unames = [] for cmd in tc.resp_uname.commands: api.PrintCommandResults(cmd) if cmd.exit_code != 0 and not api.Trigger_IsBackgroundCommand(cmd): result = api.types.status.FAILURE else: unames.append(cmd.stdout) api.SetTestsuiteAttr("unames", unames) for cmd in tc.resp.commands: api.PrintCommandResults(cmd) if cmd.exit_code != 0 and not api.Trigger_IsBackgroundCommand(cmd): result = api.types.status.FAILURE return result
def Trigger(tc): for node, nic in tc.node_nic_pairs: req = api.Trigger_CreateExecuteCommandsRequest() cmd = "ps -aef | grep athena_flow_logger | grep -v 'grep'" api.Trigger_AddNaplesCommand(req, node, cmd, nic) resp = api.Trigger(req) cmd_resp = resp.commands[0] api.PrintCommandResults(cmd_resp) if cmd_resp.exit_code != 0: api.Logger.error("ps failed on {}".format((node, nic))) #return api.types.status.FAILURE if "athena_flow_logger" in cmd_resp.stdout: req = api.Trigger_CreateExecuteCommandsRequest() athena_flow_logger_pid = cmd_resp.stdout.strip().split()[1] api.Logger.info("athena_flow_logger already running on {}. Killing pid {}".format(node, athena_flow_logger_pid)) cmd = "kill -SIGUSR1 {}".format(athena_flow_logger_pid) api.Trigger_AddNaplesCommand(req, node, cmd, nic) resp = api.Trigger(req) req = api.Trigger_CreateExecuteCommandsRequest() cmd = "truncate /data/flow_log_* -s 0" api.Trigger_AddNaplesCommand(req, node, cmd, nic) resp = api.Trigger(req) for cmd in resp.commands: api.PrintCommandResults(cmd) if cmd.exit_code != 0: api.Logger.error("truncate flow_log files failed on " "{}".format((node, nic))) req = api.Trigger_CreateExecuteCommandsRequest() resp = api.Trigger(req) cmd = "/nic/tools/start-flow-logger-iota.sh" api.Trigger_AddNaplesCommand(req, node, cmd, nic, background = True) resp = api.Trigger(req) for cmd in resp.commands: api.PrintCommandResults(cmd) if cmd.exit_code != 0: api.Logger.error("start-flow-logger.sh failed on " "{}".format((node, nic))) return api.types.status.FAILURE #Set the current time and use it to read the correct log file later curr_time = time.time() api.SetTestsuiteAttr("flow_log_curr_window_start", curr_time) api.SetTestsuiteAttr("flow_log_curr_file_num", 0) for node, nic in tc.node_nic_pairs: req = api.Trigger_CreateExecuteCommandsRequest() cmd = "ps -aef | grep athena_flow_logger | grep -v 'grep'" api.Trigger_AddNaplesCommand(req, node, cmd, nic) resp = api.Trigger(req) cmd_resp = resp.commands[0] api.PrintCommandResults(cmd_resp) if cmd_resp.exit_code != 0: api.Logger.error("ps failed on {}".format((node, nic))) #return api.types.status.FAILURE if "athena_flow_logger" not in cmd_resp.stdout: api.Logger.error("athena_flow_logger not running on {}".format((node, nic))) return api.types.status.FAILURE return api.types.status.SUCCESS
def Trigger(tc): pairs = api.GetRemoteWorkloadPairs() tc.vlan_idx = -1 for i in range(len(pairs)): if tc.vlan_idx > -1: break for j in range(len(pairs[0])): if pairs[i][j].encap_vlan != 0: tc.vlan_idx = i break if tc.vlan_idx < 1: return api.types.status.FAILURE tc.w1 = pairs[0][0] tc.w2 = pairs[0][1] tc.vlan_w1 = pairs[tc.vlan_idx][0] tc.vlan_w2 = pairs[tc.vlan_idx][1] tc.cmd_cookies = [] api.SetTestsuiteAttr("vlan_idx", tc.vlan_idx) #============================================================== # get the device and GID #============================================================== req = api.Trigger_CreateExecuteCommandsRequest(serial=True) api.Logger.info("Extracting device and GID using show_gid") api.Logger.info("Interfaces are {0} {1}".format(tc.w1.interface, tc.w2.interface)) # sleep for 10 secs to ensure that show_gid is returning gids on naples cmd = 'sleep 10' api.Trigger_AddCommand(req, tc.w1.node_name, tc.w1.workload_name, cmd) tc.cmd_cookies.append(cmd) cmd = "show_gid | grep %s | grep v2" % tc.w1.ip_address api.Trigger_AddCommand(req, tc.w1.node_name, tc.w1.workload_name, tc.iota_path + cmd, timeout=60) tc.cmd_cookies.append(cmd) cmd = "show_gid | grep %s | grep v2" % tc.w2.ip_address api.Trigger_AddCommand(req, tc.w2.node_name, tc.w2.workload_name, tc.iota_path + cmd, timeout=60) tc.cmd_cookies.append(cmd) cmd = "show_gid | grep %s | grep v2" % tc.vlan_w1.ip_address api.Trigger_AddCommand(req, tc.vlan_w1.node_name, tc.vlan_w1.workload_name, tc.iota_path + cmd, timeout=60) tc.cmd_cookies.append(cmd) cmd = "show_gid | grep %s | grep v2" % tc.vlan_w2.ip_address api.Trigger_AddCommand(req, tc.vlan_w2.node_name, tc.vlan_w2.workload_name, tc.iota_path + cmd, timeout=60) tc.cmd_cookies.append(cmd) # dump rdma qstate for w in [tc.w1, tc.w2]: if not w.IsNaples(): continue cmd = '/nic/bin/halctl show lif | grep host | grep -v host-management' api.Trigger_AddNaplesCommand(req, w.node_name, cmd) tc.cmd_cookies.append(cmd) #Show drops command for QOS testing cmd = '/nic/bin/halctl show system statistics drop | grep -i "occupancy"' if tc.w1.IsNaples(): api.Logger.info("Running show drops command {} on node_name {}"\ .format(cmd, tc.w1.node_name)) api.Trigger_AddNaplesCommand(req, tc.w1.node_name, cmd) tc.cmd_cookies.append( "show drops cmd for node {} ip_address {}".format( tc.w1.node_name, tc.w1.ip_address)) if tc.w2.IsNaples(): api.Logger.info("Running show drops command {} on node_name {}"\ .format(cmd, tc.w2.node_name)) api.Trigger_AddNaplesCommand(req, tc.w2.node_name, cmd) tc.cmd_cookies.append( "show drops cmd for node {} ip_address {}".format( tc.w2.node_name, tc.w2.ip_address)) if tc.os == host.OS_TYPE_BSD: cmd = 'sysctl dev.ionic.0.qos' if tc.w1.IsNaples(): api.Logger.info("Running show drops command {} on node_name {}"\ .format(cmd, tc.w1.node_name)) api.Trigger_AddCommand(req, tc.w1.node_name, tc.w1.workload_name, cmd) tc.cmd_cookies.append("QoS sysctl get".format( tc.w1.node_name, tc.w1.ip_address)) elif tc.w2.IsNaples(): api.Logger.info("Running show drops command {} on node_name {}"\ .format(cmd, tc.w2.node_name)) api.Trigger_AddCommand(req, tc.w2.node_name, tc.w2.workload_name, cmd) tc.cmd_cookies.append("QoS sysctl get".format( tc.w1.node_name, tc.w1.ip_address)) tc.resp = api.Trigger(req) return api.types.status.SUCCESS
def Init(): tmgr = MplsOverUdpTunnelManager() api.SetTestsuiteAttr("MPLSUDP_TUNNEL_MANAGER", tmgr) return api.types.status.SUCCESS
def ResetLastLinkStatusChangeCount(naples_host, newValue): api.SetTestsuiteAttr("LinkTestValidationNumberOfFlaps" + naples_host, newValue)
def ClearFlowSet(): flow_set = api.GetTestsuiteAttr("FlowSet") if flow_set is not None: api.SetTestsuiteAttr("FlowSet", None) del flow_set return api.types.status.SUCCESS
def Trigger(tc): expect_pkts = True if tc.num_pkts == 0: expect_pkts = False curr_time = time.time() flow_log_curr_window_start = api.GetTestsuiteAttr( "flow_log_curr_window_start") #Wait for at least 65 secs before checking the log file time_window_diff = 65.0 - (curr_time - flow_log_curr_window_start) if time_window_diff > 0: api.Logger.info( 'Sleeping for {} secs before reading the flow log file'.format( time_window_diff)) time.sleep(time_window_diff) #Update the time window api.SetTestsuiteAttr("flow_log_curr_window_start", flow_log_curr_window_start + 60) flow_log_curr_file_num = api.GetTestsuiteAttr("flow_log_curr_file_num") flow_log_curr_file = "/data/flow_log_{}.txt".format(flow_log_curr_file_num) #Shift to the other file for the next read flow_log_curr_file_num = (flow_log_curr_file_num + 1) % 2 api.SetTestsuiteAttr("flow_log_curr_file_num", flow_log_curr_file_num) for node, nic in tc.node_nic_pairs: cmd = "cat {}".format(flow_log_curr_file) req = api.Trigger_CreateExecuteCommandsRequest() api.Trigger_AddNaplesCommand(req, node, cmd, nic) resp = api.Trigger(req) for cmd in resp.commands: api.PrintCommandResults(cmd) if cmd.exit_code != 0: api.Logger.error("cat {} failed on " "{}".format(flow_log_curr_file, node)) return api.types.status.FAILURE if not cmd.stdout: if expect_pkts == True: api.Logger.error("Error: Packet should be logged " "but flow_log is empty. Node: " "{}".format((node))) return api.types.status.FAILURE else: pkt_match = False lines = cmd.stdout.split("\n") for line in lines: if "Data" in line: data_tokens = line.split("==>") data_str = data_tokens[0] data_vals = data_tokens[1].split(',') for data_val in data_vals: if "pkts_from_host" in data_val: curr_num_pkts = int(data_val.split(': ')[1]) if curr_num_pkts == tc.num_pkts: pkt_match = True break if pkt_match == False: api.Logger.error( "Error: Packet count mismatch for node {}. " "expected: {}".format(node, tc.num_pkts)) return api.types.status.FAILURE return api.types.status.SUCCESS
def Setup(tc): # get node info tc.bitw_node_name = None tc.wl_node_name = None # Assuming only one bitw node and one workload node nics = store.GetTopology().GetNicsByPipeline("athena") for nic in nics: tc.bitw_node_name = nic.GetNodeName() break api.SetTestsuiteAttr("bitw_node_name", tc.bitw_node_name) workloads = api.GetWorkloads() if len(workloads) == 0: api.Logger.error('No workloads available') return api.types.status.FAILURE tc.wl_node_name = workloads[0].node_name api.SetTestsuiteAttr("wl_node_name", tc.wl_node_name) host_intfs = api.GetNaplesHostInterfaces(tc.wl_node_name) # Assuming single nic per host if len(host_intfs) != 2: api.Logger.error('Failed to get host interfaces') return api.types.status.FAILURE tc.wl = [] for wl in workloads: tc.wl.append(wl) api.Logger.info("wl: vlan: {}, mac: {}, ip: {}".format( wl.uplink_vlan, wl.mac_address, wl.ip_address)) tc.intfs = [] tc.intfs.append({ 'name': 'inb_mnic0', 'ip': str(tc.wl[0].ip_address), 'sub_ip': str(tc.wl[2].ip_address), 'vlan': str(tc.wl[2].uplink_vlan) }) tc.intfs.append({ 'name': 'inb_mnic1', 'ip': str(tc.wl[1].ip_address), 'sub_ip': str(tc.wl[3].ip_address), 'vlan': str(tc.wl[3].uplink_vlan) }) api.SetTestsuiteAttr("inb_mnic_intfs", tc.intfs) # copy device_bootstrap.json to naples bootstrap_json_fname = api.GetTopDir( ) + '/nic/conf/athena/device_bootstrap.json' api.CopyToNaples(tc.bitw_node_name, [bootstrap_json_fname], "") # write and copy pensando_pre_init.sh to naples f = open('pensando_pre_init.sh', "w") f.write('echo "copying device.json"\n') f.write('cp /data/device_bootstrap.json /nic/conf/device.json\n') f.close() api.CopyToNaples(tc.bitw_node_name, ['pensando_pre_init.sh'], "") os.remove('pensando_pre_init.sh') # move pensando_pre_init.sh to /sysconfig/config0/ and restart Athena Node req = api.Trigger_CreateExecuteCommandsRequest() cmd = "mv /pensando_pre_init.sh /sysconfig/config0/" api.Trigger_AddNaplesCommand(req, tc.bitw_node_name, cmd) cmd = "mv /device_bootstrap.json /data/" api.Trigger_AddNaplesCommand(req, tc.bitw_node_name, cmd) resp = api.Trigger(req) cmd = resp.commands[0] api.PrintCommandResults(cmd) if cmd.exit_code != 0: api.Logger.error("Bootstrap setup failed on node %s" % \ tc.bitw_node_name) return api.types.status.FAILURE # reboot the node api.Logger.info("Rebooting {}".format(tc.bitw_node_name)) return api.RestartNodes([tc.bitw_node_name], 'reboot')
def gen_plcy_cfg_e2e_wl_topo(tc): api.SetTestsuiteAttr("node1_dp_policy_json_path", api.GetTopDir() + \ E2E_NODE1_DP_PLCY_JSON_PATH) api.SetTestsuiteAttr("node2_dp_policy_json_path", api.GetTopDir() + \ E2E_NODE2_DP_PLCY_JSON_PATH) node1_dp_plcy_json_path = api.GetTestsuiteAttr("node1_dp_policy_json_path") node2_dp_plcy_json_path = api.GetTestsuiteAttr("node2_dp_policy_json_path") # Get list of workloads for nodes nodes = [pair[0] for pair in tc.wl_node_nic_pairs] workloads_node1 = api.GetWorkloads(nodes[0]) workloads_node2 = api.GetWorkloads(nodes[1]) # Read template policy.json file t_plcy_obj = None with open(tc.template_policy_json_path) as fd: t_plcy_obj = json.load(fd) t_vnics = t_plcy_obj['vnic'] n1_plcy_obj = deepcopy(t_plcy_obj) n2_plcy_obj = deepcopy(t_plcy_obj) for idx, t_vnic in enumerate(t_vnics): # Use workloads on up0 for node1 and use workloads # on up1 for node2 since they match switch vlan config node1_wl = workloads_node1[utils.get_wl_idx(0, idx+1)] node2_wl = workloads_node2[utils.get_wl_idx(1, idx+1)] #TODO: tmp fix. Need infra query api # total vlans = 36, so add 12 for vlan in 2nd grp tc.encap_vlan_id = node1_wl.uplink_vlan + 12 api.Logger.info("idx %s vnic: encap vlan %s" % ( idx, tc.encap_vlan_id)) node1_up0_mac = node1_wl.mac_address node2_up1_mac = node2_wl.mac_address for node in nodes: if node == 'node1': vnic = n1_plcy_obj['vnic'][idx] else: vnic = n2_plcy_obj['vnic'][idx] vnic_id = vnic['vnic_id'] api.Logger.info('Setup policy.json file for No.%s vnic ' 'on node %s' % (vnic_id, node)) vlan_id, host_mac = None, None src_slot_id, dst_slot_id = None, None if node == 'node1': vlan_id = node1_wl.uplink_vlan src_slot_id = _get_slot_id('node1', int(vnic_id)) dst_slot_id = _get_slot_id('node2', int(vnic_id)) host_mac = node1_up0_mac else: vlan_id = node2_wl.uplink_vlan src_slot_id = _get_slot_id('node2', int(vnic_id)) dst_slot_id = _get_slot_id('node1', int(vnic_id)) host_mac = node2_up1_mac api.Logger.info("%s workload for vnic %s: vlan %s, " "host mac %s" % (node, vnic_id, vlan_id, host_mac)) # these keys need to be changed for both L2 and L3 with or without NAT. vnic['vlan_id'] = str(vlan_id) vnic['slot_id'] = str(src_slot_id) vnic['session']['to_switch']['host_mac'] = str(host_mac) vnic['rewrite_underlay']['vlan_id'] = str(tc.encap_vlan_id) if vnic['rewrite_underlay']['type'] == 'mplsoudp': vnic['rewrite_underlay']['mpls_label2'] = str(dst_slot_id) elif vnic['rewrite_underlay']['type'] == 'geneve': vnic['rewrite_underlay']['dst_slot_id'] = str(dst_slot_id) # only applicable to L3 vnics if not utils.is_L2_vnic(vnic): if node == 'node1': vnic['rewrite_host']['smac'] = str(node2_up1_mac) vnic['rewrite_host']['dmac'] = str(node1_up0_mac) else: vnic['rewrite_host']['smac'] = str(node1_up0_mac) vnic['rewrite_host']['dmac'] = str(node2_up1_mac) # only applicable to L2 vnics if utils.is_L2_vnic(vnic): if node == 'node1': vnic['l2_flows_range']['h2s_mac_lo'] = str(node2_up1_mac) vnic['l2_flows_range']['h2s_mac_hi'] = str(node2_up1_mac) vnic['l2_flows_range']['s2h_mac_lo'] = str(node1_up0_mac) vnic['l2_flows_range']['s2h_mac_hi'] = str(node1_up0_mac) else: vnic['l2_flows_range']['h2s_mac_lo'] = str(node1_up0_mac) vnic['l2_flows_range']['h2s_mac_hi'] = str(node1_up0_mac) vnic['l2_flows_range']['s2h_mac_lo'] = str(node2_up1_mac) vnic['l2_flows_range']['s2h_mac_hi'] = str(node2_up1_mac) # write modified plcy objects to file with open(node1_dp_plcy_json_path, 'w+') as fd: json.dump(n1_plcy_obj, fd, indent=4) with open(node2_dp_plcy_json_path, 'w+') as fd: json.dump(n2_plcy_obj, fd, indent=4) # copy both policy.json files to respective nodes tmp_plcy_json_path = api.GetTopDir() + DP_PLCY_JSON_PATH node, nic = tc.athena_node_nic_pairs[0] copyfile(node1_dp_plcy_json_path, tmp_plcy_json_path) api.CopyToNaples(node, [tmp_plcy_json_path], "", nic) node, nic = tc.athena_node_nic_pairs[1] copyfile(node2_dp_plcy_json_path, tmp_plcy_json_path) api.CopyToNaples(node, [tmp_plcy_json_path], "", nic) os.remove(tmp_plcy_json_path)
def Setup(tc): tc.mfg_mode = api.GetTestsuiteAttr("mfg_mode") if tc.mfg_mode is None: tc.mfg_mode = 'no' tc.test_intf = api.GetTestsuiteAttr("mfg_test_intf") if tc.test_intf is None: tc.test_intf = 'up1' # default up1 for kni tests # get node info tc.bitw_node_name = api.GetTestsuiteAttr("bitw_node_name") tc.wl_node_name = api.GetTestsuiteAttr("wl_node_name") host_intfs = api.GetNaplesHostInterfaces(tc.wl_node_name) # Assuming single nic per host if len(host_intfs) != 2: api.Logger.error('Failed to get host interfaces') return api.types.status.FAILURE up0_intf = host_intfs[0] up1_intf = host_intfs[1] workloads = api.GetWorkloads() if len(workloads) == 0: api.Logger.error('No workloads available') return api.types.status.FAILURE tc.sub_wl = [] for wl in workloads: if (wl.parent_interface == up0_intf and tc.test_intf == 'up0') or (wl.parent_interface == up1_intf and tc.test_intf == 'up1'): if wl.uplink_vlan == 0: # Native workload tc.wl0 = wl else: # Tagged workload tc.sub_wl.append(wl) # 1 subintf is used by default for kni tests # 3 subintf are used by default for mfg mode tests (2 for positive test and # 1 for negative test) if tc.mfg_mode == 'yes': tc.sub_wl = tc.sub_wl[:3] else: tc.sub_wl = tc.sub_wl[:1] api.SetTestsuiteAttr("kni_wl", tc.wl0) api.SetTestsuiteAttr("kni_sub_wl", tc.sub_wl) api.Logger.info("wl0: vlan: {}, mac: {}, ip: {}".format( tc.wl0.uplink_vlan, tc.wl0.mac_address, tc.wl0.ip_address)) for idx, sub_wl in enumerate(tc.sub_wl): api.Logger.info("sub_wl[{}]: vlan: {}, mac: {}, ip: {}".format( idx, sub_wl.uplink_vlan, sub_wl.mac_address, sub_wl.ip_address)) # check if mnic_p2p interface is present req = api.Trigger_CreateExecuteCommandsRequest(serial=True) cmd = "ifconfig mnic_p2p" api.Trigger_AddNaplesCommand(req, tc.bitw_node_name, cmd) resp = api.Trigger(req) for cmd in resp.commands: api.PrintCommandResults(cmd) if cmd.exit_code != 0: api.Logger.error("mnic_p2p intf not found on naples %s" % \ tc.bitw_node_name) return api.types.status.FAILURE return api.types.status.SUCCESS
def gen_plcy_cfg_local_wl_topo(tc): api.SetTestsuiteAttr("dp_policy_json_path", api.GetTopDir() + \ DP_PLCY_JSON_PATH) tc.dp_policy_json_path = api.GetTestsuiteAttr("dp_policy_json_path") tc.skip_flow_log_vnics = getattr(tc.args, "skip_flow_log_vnics", []) # Read template policy.json file plcy_obj = None with open(tc.template_policy_json_path) as fd: plcy_obj = json.load(fd) workloads = api.GetWorkloads() if len(workloads) == 0: api.Logger.error('No workloads available') return api.types.status.FAILURE host_intfs = api.GetNaplesHostInterfaces(tc.wl_node_name) # Assuming single nic per host if len(host_intfs) != 2: api.Logger.error('Failed to get host interfaces') return api.types.status.FAILURE tc.host_ifs[(tc.wl_node_name, tc.classic_nic)] = host_intfs up0_intf = host_intfs[0] up1_intf = host_intfs[1] vnics = plcy_obj['vnic'] for idx, vnic in enumerate(vnics): vnic_id = vnic['vnic_id'] # vnic_type has 2 options: L2 or L3 tc.vnic_type = 'L2' if "vnic_type" in vnic and vnic['vnic_type'] == 'L2' else 'L3' tc.nat = 'yes' if "nat" in vnic else 'no' api.Logger.info('Setup policy.json file for No.%s vnic' % (vnic_id)) up0_vlan, up1_vlan = None, None up0_mac, up1_mac = None, None mac_lo = 'ff:ff:ff:ff:ff:ff' mac_hi = '00:00:00:00:00:00' wl_up0_idx = utils.get_wl_idx(0, idx+1) wl_up1_idx = utils.get_wl_idx(1, idx+1) wl_up0 = workloads[wl_up0_idx] wl_up1 = workloads[wl_up1_idx] if wl_up0.parent_interface == up0_intf: up0_vlan = wl_up0.uplink_vlan up0_mac = wl_up0.mac_address else: api.Logger.error('The interface order prediction is wrong') if wl_up1.parent_interface == up1_intf: up1_vlan = wl_up1.uplink_vlan up1_mac = wl_up1.mac_address else: api.Logger.error('The interface order prediction is wrong') if not up0_mac or not up1_mac: api.Logger.error('Failed to get workload sub-intf mac addresses') return api.types.status.FAILURE if not up0_vlan or not up1_vlan: api.Logger.error('Failed to get workload sub-intf vlan value') return api.types.status.FAILURE mac_lo = min(mac_lo, up0_mac, up1_mac) mac_hi = max(mac_hi, up0_mac, up1_mac) api.Logger.info('Workload0: up0_intf %s up0_vlan %s up0_mac %s' % ( up0_intf, up0_vlan, up0_mac)) api.Logger.info('Workload1: up1_intf %s up1_vlan %s up1_mac %s' % ( up1_intf, up1_vlan, up1_mac)) api.Logger.info('mac_lo %s mac_hi %s' % (mac_lo, mac_hi)) # these keys need to be changed for both L2 and L3 with or without NAT. vnic['vlan_id'] = str(up1_vlan) vnic['rewrite_underlay']['vlan_id'] = str(up0_vlan) vnic['session']['to_switch']['host_mac'] = str(up1_mac) vnic['rewrite_underlay']['dmac'] = str(up0_mac) # these fields need to be changed only for L3 if tc.vnic_type == 'L3': vnic['rewrite_host']['dmac'] = str(up1_mac) # only applicable to L2 vnics if tc.vnic_type == 'L2': vnic['l2_flows_range']['h2s_mac_lo'] = str(mac_lo) vnic['l2_flows_range']['h2s_mac_hi'] = str(mac_hi) vnic['l2_flows_range']['s2h_mac_lo'] = str(mac_lo) vnic['l2_flows_range']['s2h_mac_hi'] = str(mac_hi) # Set skip_flow_log if vnic is part of the skip_flow_log_vnics if int(vnic_id) in tc.skip_flow_log_vnics: api.Logger.info('Setting skip_flow_log for vnic %d' % ( int(vnic_id))) vnic['session']['skip_flow_log'] = "true" # write vlan/mac addr and flow info to actual file with open(tc.dp_policy_json_path, 'w+') as fd: json.dump(plcy_obj, fd, indent=4) # copy policy.json file to node api.CopyToNaples(tc.bitw_node_name, [tc.dp_policy_json_path], "")