def dump_trace(tc, num): cmd = utils.g_path + 'captrace dump /tmp/captrace%s.bin' % num api.Trigger_AddNaplesCommand(g_req, tc.naples_node, cmd) api.Logger.info("cmd - %s" % cmd) cmd = 'ls -l /tmp/captrace%s.bin' % num api.Trigger_AddNaplesCommand(g_req, tc.naples_node, cmd) api.Logger.info("cmd - %s" % cmd)
def TriggerUnderlayConnectivityTest(ping_count=20, connectivity = 'bgp_peer', interval = 0.01): req = None req = api.Trigger_CreateAllParallelCommandsRequest() cmd_cookies = [] naplesHosts = api.GetNaplesHostnames() if connectivity == 'bgp_peer': for node in naplesHosts: for bgppeer in bgp_peer.client.Objects(node): cmd_cookie = "%s --> %s" %\ (str(bgppeer.LocalAddr), str(bgppeer.PeerAddr)) api.Trigger_AddNaplesCommand(req, node, \ f"ping -i {interval} -c {ping_count} -s 64 {str(bgppeer.PeerAddr)}") api.Logger.verbose(f"Ping test from {cmd_cookie}") cmd_cookies.append(cmd_cookie) else: for node1 in naplesHosts: for node2 in naplesHosts: if node1 == node2: continue objs = device.client.Objects(node1) device1 = next(iter(objs)) objs = device.client.Objects(node2) device2 = next(iter(objs)) cmd_cookie = "%s --> %s" %\ (device1.IP, device2.IP) api.Trigger_AddNaplesCommand(req, node1, \ f"ping -i {interval} -c {ping_count} -s 64 {device2.IP}") api.Logger.verbose(f"Loopback ping test from {cmd_cookie}") cmd_cookies.append(cmd_cookie) resp = api.Trigger(req) return resp, cmd_cookies
def Trigger(tc): naples_nodes = api.GetNaplesHostnames() for node in naples_nodes: api.Logger.info("Start second athena_app to pick up policy.json") req = api.Trigger_CreateExecuteCommandsRequest() api.Trigger_AddNaplesCommand(req, node, "/nic/tools/start-sec-agent.sh") api.Trigger_AddNaplesCommand(req, node, "\r\n") resp = api.Trigger(req) cmd = resp.commands[0] api.PrintCommandResults(cmd) if cmd.exit_code != 0: api.Logger.error("Start second athena_app failed on node {}".format(node)) return api.types.status.FAILURE time.sleep(10) for node in naples_nodes: req = api.Trigger_CreateExecuteCommandsRequest() api.Trigger_AddNaplesCommand(req, node, "ps -aef | grep athena_app") resp = api.Trigger(req) cmd = resp.commands[0] api.PrintCommandResults(cmd) if cmd.exit_code != 0: api.Logger.error("ps failed on Node {}".format(node)) return api.types.status.FAILURE if "athena_app" not in cmd.stdout: # TODO: If athena_app is not running, run start_agent.sh manually api.Logger.error("no athena_app running on Node {}, need to start athena_app first".format(node)) return api.types.status.FAILURE athena_sec_app_pid = cmd.stdout.strip().split()[1] api.Logger.info("athena_app up and running on Node {} with PID {}".format(node, athena_sec_app_pid)) return api.types.status.SUCCESS
def Verify(tc): showMirrorCmd = "/nic/bin/halctl show mirror" showFlowMonitorCmd = "/nic/bin/halctl show flow-monitor" req = api.Trigger_CreateExecuteCommandsRequest(serial=False) for node_name in api.GetNaplesHostnames(): api.Trigger_AddNaplesCommand(req, node_name, showMirrorCmd) api.Trigger_AddNaplesCommand(req, node_name, showFlowMonitorCmd) resp = api.Trigger(req) for cmd in resp.commands: api.PrintCommandResults(cmd) if not api.Trigger_IsSuccess(resp): api.Logger.error( "Failed to execute HAL command to dump mirror and flow monitor.") return api.types.status.FAILURE for cmd in resp.commands: if len(cmd.stdout): api.Logger.error("Commad: %s validation failed. Expecting empty" % cmd.command) return api.types.status.FAILURE return api.types.status.SUCCESS
def VerifyDumpCount(tc): if not hasattr(tc.args, "dump_file") or not hasattr(tc.args, "dump_sym"): return api.types.status.SUCCESS expected_count = 0 if hasattr(tc.args, "dump_expected_count"): expected_count = int(getattr(tc.args, "dump_expected_count")) for node, nic in tc.node_nic_pairs: req = api.Trigger_CreateExecuteCommandsRequest() if hasattr(tc.args, "stdout_file"): api.Trigger_AddNaplesCommand(req, node, "cat %s" % (tc.args.stdout_file), nic) api.Trigger_AddNaplesCommand( req, node, "grep -c %s %s" % (tc.args.dump_sym, tc.args.dump_file), nic) resp = api.Trigger(req) for cmd in resp.commands: api.PrintCommandResults(cmd) if "grep -c" in cmd.command: actual_count = cmd.stdout.strip().split()[0] if int(actual_count) != expected_count: api.Logger.error( "%s actual_count %s != expected_count %d in %s" % (tc.args.dump_sym, actual_count, expected_count, tc.args.dump_file)) return api.types.status.FAILURE return api.types.status.SUCCESS
def Trigger(tc): req = api.Trigger_CreateExecuteCommandsRequest(serial=True) tc.mnic_p2p_ip = str(ip_address(tc.wl0.ip_address) + 1) utils.configureNaplesIntf(req, tc.bitw_node_name, 'mnic_p2p', tc.mnic_p2p_ip, '255.255.255.0') tc.mnic_p2p_sub_ip = {} for idx, sub_wl in enumerate(tc.sub_wl): tc.mnic_p2p_sub_ip[sub_wl.workload_name] = \ str(ip_address(sub_wl.ip_address) + 1) utils.configureNaplesIntf(req, tc.bitw_node_name, 'mnic_p2p', tc.mnic_p2p_sub_ip[sub_wl.workload_name], '255.255.255.0', vlan=str(sub_wl.uplink_vlan)) api.SetTestsuiteAttr("mnic_p2p_ip", tc.mnic_p2p_ip) api.SetTestsuiteAttr("mnic_p2p_sub_ip", tc.mnic_p2p_sub_ip) # pre testpmd setup cmd = "echo 256 > /sys/kernel/mm/hugepages/hugepages-2048kB/nr_hugepages" api.Trigger_AddNaplesCommand(req, tc.bitw_node_name, cmd) cmd = "mkdir -p /dev/hugepages" api.Trigger_AddNaplesCommand(req, tc.bitw_node_name, cmd) cmd = "mount -t hugetlbfs nodev /dev/hugepages" api.Trigger_AddNaplesCommand(req, tc.bitw_node_name, cmd) # start testpmd common_args = [] common_args.append({'vdev': 'net_ionic2'}) if tc.test_intf == 'up0': common_args.append({'vdev': 'net_ionic0'}) else: common_args.append({'vdev': 'net_ionic1'}) args = [] args.append({'coremask': '0x6'}) args.append({'portmask': '0x3'}) args.append({'stats-period': '3'}) args.append({'max-pkt-len': '9208'}) args.append({'mbuf-size': '10240'}) args.append({'total-num-mbufs': '10240'}) testpmd.StartTestpmd(req, tc.bitw_node_name, common_args, args) # wait till testpmd is ready api.Trigger_AddNaplesCommand(req, tc.bitw_node_name, "sleep 5") # verify that testpmd has started cmd = 'ps -ef | grep testpmd | grep -v grep' api.Trigger_AddNaplesCommand(req, tc.bitw_node_name, cmd) tc.resp = api.Trigger(req) return api.types.status.SUCCESS
def PostTrafficTestCommands(req, tc, w, pcp_or_dscp): #verify PFC/PAUSE frames post traffic if w.IsNaples(): dev = api.GetTestsuiteAttr(w.ip_address+'_device')[-1] if dev == '0': port = 'Eth1/1' elif dev == '1': port = 'Eth1/2' else: api.Logger.info("invalid dev number {}; defaulting to port 1".format(str(dev))) port = 'Eth1/1' cmd = '/nic/bin/halctl show port --port ' + port + ' statistics' api.Logger.info("Running command {} on node_name {} workload_name {}"\ .format(cmd, w.node_name, w.workload_name)) api.Trigger_AddNaplesCommand(req, w.node_name, cmd) mode = "pcp" if tc.class_type==1 else "dscp" tc.cmd_cookies.append(cmd + " for " + mode + " " + str(pcp_or_dscp)) # show drops command cmd = '/nic/bin/halctl show system statistics drop | grep -i "occupancy"' api.Logger.info("Running show drops command {} on node_name {}"\ .format(cmd, w.node_name)) api.Trigger_AddNaplesCommand(req, w.node_name, cmd) tc.cmd_cookies.append("show drops cmd for node {} ip_address {}".format(w.node_name, w.ip_address)) else: api.Logger.info("node {} is not Naples; cannot check for PFC frames".format(w.node_name));
def Trigger(tc): pairs = api.GetLocalWorkloadPairs() tc.cmd_cookies = [] server, client = pairs[0] naples = server if not server.IsNaples(): naples = client if not client.IsNaples(): return api.types.status.SUCCESS else: client, server = pairs[0] cmd_cookie = "%s(%s) --> %s(%s)" %\ (server.workload_name, server.ip_address, client.workload_name, client.ip_address) api.Logger.info("Starting clear & show stress test from %s" % (cmd_cookie)) basecmd = 'iperf -p %d ' % api.AllocateTcpPort() proto = 6 timeout = 250 #tc.secprof = sec_profile_obj.gl_securityprofile_json_template #timeout = int(tc.secprof['security-profiles'][0]['spec']['timeouts']['tcp']) + \ # int(tc.secprof['security-profiles'][0]['spec']['timeouts']['tcp-close']) if tc.iterators.proto == 'udp': basecmd = 'iperf -u -p %d ' % api.AllocateUdpPort() proto = 17 timeout = 150 #timeout = tc.security_profile['security-profiles'][0]['spec']['timeouts']['udp'] req = api.Trigger_CreateExecuteCommandsRequest(serial=True) for cnt in range(tc.args.count): cmd_cookie = "iperf -s" api.Trigger_AddCommand(req, server.node_name, server.workload_name, "%s-s -t 300" % basecmd, background=True) tc.cmd_cookies.append(cmd_cookie) cmd_cookie = "iperf -c " api.Trigger_AddCommand( req, client.node_name, client.workload_name, "%s -c %s -P 100" % (basecmd, server.ip_address)) tc.cmd_cookies.append(cmd_cookie) cmd_cookie = "Show session" api.Trigger_AddNaplesCommand(req, naples.node_name, "/nic/bin/halctl show session") tc.cmd_cookies.append(cmd_cookie) cmd_cookie = "Clear session" api.Trigger_AddNaplesCommand(req, naples.node_name, "/nic/bin/halctl clear session") tc.cmd_cookies.append(cmd_cookie) trig_resp = api.Trigger(req) term_resp = api.Trigger_TerminateAllCommands(trig_resp) tc.resp = api.Trigger_AggregateCommandsResponse(trig_resp, term_resp) return api.types.status.SUCCESS
def Teardown(tc): for node, nic in tc.node_nic_pairs: ret = athena_app_utils.athena_sec_app_kill(node, nic) if ret != api.types.status.SUCCESS: return ret req = api.Trigger_CreateExecuteCommandsRequest() if tc.policy_type == "default": api.Trigger_AddNaplesCommand(req, node, "> /data/flows_sec.log", nic) else: try: os.remove(tc.gen_custom_plcy_fname) except OSError: pass api.Trigger_AddNaplesCommand( req, node, "rm -f /data/policy.json && > /data/flows_sec.log", nic) resp = api.Trigger(req) cmd = resp.commands[0] api.PrintCommandResults(cmd) if cmd.exit_code != 0: api.Logger.error("clean flow log failed on {}".format((node, nic))) return api.types.status.FAILURE return api.types.status.SUCCESS
def Trigger(tc): pairs = api.GetRemoteWorkloadPairs() w1 = pairs[0][0] w2 = pairs[0][1] req = api.Trigger_CreateExecuteCommandsRequest(serial=True) tc.cmd_descr = "Server: %s(%s) <--> Client: %s(%s)" %\ (w1.workload_name, w1.ip_address, w2.workload_name, w2.ip_address) api.Logger.info("Starting Ping test from %s" % (tc.cmd_descr)) if w1.IsNaples() and w2.IsNaples(): api.Trigger_AddNaplesCommand( req, w1.node_name, "ifconfig bond0 20.20.20.20 netmask 255.255.255.0 up") api.SetBondIp(w1.node_name, "20.20.20.20") api.Trigger_AddNaplesCommand( req, w2.node_name, "ifconfig bond0 20.20.20.21 netmask 255.255.255.0 up") api.SetBondIp(w2.node_name, "20.20.20.21") api.Trigger_AddNaplesCommand(req, w1.node_name, "ping -I bond0 -c3 20.20.20.21") tc.resp = api.Trigger(req) else: tc.resp = None return api.types.status.SUCCESS
def Trigger(tc): api.Logger.info("Starting interrupt test") for node, intr in intr_list: req = api.Trigger_CreateExecuteCommandsRequest(serial=True) api.Trigger_AddNaplesCommand(req, node, intr.get_count_cmd()) api.Trigger_AddNaplesCommand(req, node, intr.set_field_cmd()) resp = api.Trigger(req) for cmd in resp.commands: if cmd.exit_code != 0: api.Logger.error( "Command failed: {}, Node {}, Interrupt {}, Field {}". format(cmd.command, node, intr.name(), intr.field())) return api.types.status.FAILURE cmd = resp.commands[0] count = intr.parse_count_cmd_output(cmd.stdout) if count == -1: api.Logger.error( "Invalid count: Node {}, Interrupt {}, Field {}".format( node, intr.name(), intr.field())) return api.types.status.FAILURE api.Logger.info( "Set: Node {}, Interrupt {}, Field {}, {} -> {}".format( node, intr.name(), intr.field(), count, count + 1)) intr.set_count(count) return api.types.status.SUCCESS
def Verify(tc): rc = api.types.status.SUCCESS req = api.Trigger_CreateExecuteCommandsRequest(serial=True) api.Trigger_AddNaplesCommand(req, tc.Nodes[0], 'sleep 3') resp = api.Trigger(req) for node, intr in intr_list: req = api.Trigger_CreateExecuteCommandsRequest(serial=True) api.Trigger_AddNaplesCommand(req, node, intr.get_count_cmd()) resp = api.Trigger(req) cmd = resp.commands[0] if cmd.exit_code != 0: api.Logger.error( "Command failed: Node {}, Interrupt {}, Field {}".format( node, intr.name(), intr.field())) return api.types.status.FAILURE expected = intr.count() + 1 value = intr.parse_count_cmd_output(cmd.stdout) if value < expected: api.Logger.error( "Node {}, Interrupt {}, Field {}, Expected {}, Got {}".format( node, intr.name(), intr.field(), expected, value)) rc = api.types.status.FAILURE return rc
def CopyTechSupportFiles(naples_nodes, techsupport_files, log_dir): req = api.Trigger_CreateExecuteCommandsRequest() for node in naples_nodes: api.Trigger_AddNaplesCommand(req, node, "ls /data/techsupport/") for file in techsupport_files: cmd = "chmod 666 %s" % file api.Trigger_AddNaplesCommand(req, node, cmd) resp = api.Trigger(req) for cmd_resp in resp.commands: api.PrintCommandResults(cmd_resp) for node in naples_nodes: resp = api.CopyFromNaples(node, techsupport_files, log_dir) if resp == None or resp.api_response.api_status != types_pb2.API_STATUS_OK: api.Logger.info( "Failed to copy tech-support file(s) to logdir:%s" % (log_dir)) return api.types.status.FAILURE for file in techsupport_files: cmd = "mv " + log_dir + "/" + os.path.basename( file) + " " + log_dir + "/" + os.path.basename( file) + "-" + node #Rename file based on node api.Logger.info("executing cmd %s" % cmd) os.system(cmd) return api.types.status.SUCCESS
def Trigger(tc): tc.resp = [] tc.nc_server_cmds = [] tc.nc_client_cmds = [] for sub_wl in tc.sub_wl: req = api.Trigger_CreateExecuteCommandsRequest(serial=True) if tc.traffic_type == 'ping': # ping from host cmd = "ping -c 5 " + tc.mnic_p2p_sub_ip[sub_wl.workload_name] api.Trigger_AddHostCommand(req, tc.wl_node_name, cmd) else: # copy send_data to host tc.send_data = tc.traffic_type + " Hello" send_fname = CURR_DIR + '/' + SEND_FNAME f = open(send_fname, "w") f.write(tc.send_data) f.close() api.CopyToHost(tc.wl_node_name, [send_fname], "") os.remove(send_fname) # netcat on naples # args common to TCP & UDP s_args = ' -l -p 9999 ' # arg specific to UDP if tc.traffic_type == 'UDP': s_args += ' -u ' s_args += '> ' + RECV_FNAME_PREFIX + sub_wl.workload_name tc.nc_server_cmds.append(s_args) netcat.StartNetcat(req, tc.bitw_node_name, 'naples', s_args) # netcat on host c_args = tc.mnic_p2p_sub_ip[sub_wl.workload_name] + ' 9999 < ' \ + SEND_FNAME # arg specific to UDP if tc.traffic_type == 'UDP': c_args += ' -u ' tc.nc_client_cmds.append(c_args) netcat.StartNetcat(req, tc.wl_node_name, 'host', c_args) # wait for ping to be done api.Trigger_AddNaplesCommand(req, tc.bitw_node_name, "sleep 5") if tc.traffic_type == 'UDP' or \ tc.traffic_type == 'TCP': api.Trigger_AddNaplesCommand(req, tc.bitw_node_name, "cat " + \ RECV_FNAME_PREFIX + sub_wl.workload_name) trig_resp = api.Trigger(req) term_resp = api.Trigger_TerminateAllCommands(trig_resp) tc.resp.append( api.Trigger_AggregateCommandsResponse(trig_resp, term_resp)) return api.types.status.SUCCESS
def DeleteNMDDb(n): api.Logger.info("Deleting NMD DB.") req = api.Trigger_CreateExecuteCommandsRequest(serial=True) for naples in api.GetDeviceNames(n): api.Trigger_AddNaplesCommand(req, n, "rm -rf /sysconfig/config0/nmd.db", naples=naples) api.Trigger_AddNaplesCommand( req, n, "rm -f /sysconfig/config0/clusterTrustRoots.pem", naples=naples) resp = api.Trigger(req)
def Setup(tc): tc.skip = False tc.sleep = getattr(tc.args, "sleep", 200) tc.allowed_down_time = getattr(tc.args, "allowed_down_time", 0) tc.pktloss_verify = getattr(tc.args, "pktloss_verify", False) tc.node_selection = tc.iterators.selection if tc.node_selection not in ["any", "all"]: api.Logger.error("Incorrect Node selection option {} specified. Use 'any' or 'all'".format(tc.node_selection)) tc.skip = True return api.types.status.FAILURE tc.Nodes = api.GetNaplesHostnames() if tc.node_selection == "any": tc.Nodes = [random.choice(tc.Nodes)] req = api.Trigger_CreateExecuteCommandsRequest() for node in tc.Nodes: api.Trigger_AddNaplesCommand(req, node, "touch /data/upgrade_to_same_firmware_allowed") api.Trigger_AddNaplesCommand(req, node, "rm -rf /data/techsupport/DSC_TechSupport_*") api.Trigger_AddNaplesCommand(req, node, "rm -rf /update/pds_upg_status.txt") api.Trigger_AddNaplesCommand(req, node, "cp /update/{} /update/{}".format(UPGRADE_NAPLES_PKG, NAPLES_PKG)) resp = api.Trigger(req) for cmd_resp in resp.commands: api.PrintCommandResults(cmd_resp) if cmd_resp.exit_code != 0: api.Logger.error("Setup failed %s", cmd_resp.command) tc.skip = True return api.types.status.FAILURE if upgrade_utils.ResetUpgLog(tc.Nodes) != api.types.status.SUCCESS: api.Logger.error("Failed in Reseting Upgrade Log files.") return api.types.status.FAILURE # choose workloads for connectivity/traffic test result = ChooseWorkLoads(tc) if result != api.types.status.SUCCESS or tc.skip: api.Logger.error("Failed to Choose Workloads.") return result # Send Grat Arp for learning arping.SendGratArp(tc.wloads) # verify connectivity result = VerifyConnectivity(tc) if result != api.types.status.SUCCESS: api.Logger.error("Failed in Connectivity Check during Setup.") tc.skip = True return result # setup packet test based on pktloss verify argument result = PacketTestSetup(tc) if result != api.types.status.SUCCESS or tc.skip: api.Logger.error("Failed in Packet Test setup.") return result return api.types.status.SUCCESS
def Setup(tc): parse_args(tc) if tc.policy_type == "default": tc.sec_app_restart_sleep = 120 tc.flow_cache_read_sleep = 15 else: tc.sec_app_restart_sleep = 180 tc.flow_cache_read_sleep = 45 tc.node_nic_pairs = athena_app_utils.get_athena_node_nic_names() tc.custom_policy_path = api.GetTopDir() + '/' + CUSTOM_PLCY_JSON_DIR + '/' \ + CUSTOM_PLCY_JSON_FNAME tc.default_policy_path = api.GetTopDir() + '/' + DEFAULT_PLCY_JSON_FILEPATH tc.gen_custom_plcy_fname = '' # if file is already there, it will overwrite the old file cmd = "" for node, nic in tc.node_nic_pairs: # because new logic in athena_app is to read policy.json in /data # if we want to test default policy.json, we have to clean /data first if tc.policy_type == "default": api.Logger.info("Test default policy.json") api.Logger.info("Clean old policy.json file in /data") cmd = "rm -f /data/policy.json" else: api.Logger.info("Test Custom policy.json") if (gen_custom_policy_cfg(tc) != api.types.status.SUCCESS): return api.types.status.FAILURE api.Logger.info( "Copy policy.json file from IOTA dir to /data/ on Naples") api.CopyToNaples(node, [tc.gen_custom_plcy_fname], "") cmd = "mv /" + GEN_CUSTOM_PLCY_JSON_FNAME + " /data/policy.json" req = api.Trigger_CreateExecuteCommandsRequest() api.Trigger_AddNaplesCommand(req, node, cmd, nic) api.Trigger_AddNaplesCommand(req, node, "sync", nic) resp = api.Trigger(req) for cmd in resp.commands: api.PrintCommandResults(cmd) if cmd.exit_code != 0: if 'rm' in cmd.command: api.Logger.error("removing /data/policy.json failed " "on {}".format((node, nic))) return api.types.status.FAILURE if 'mv' in cmd.command: api.Logger.error("moving policy.json to /data/ failed " "on {}".format((node, nic))) return api.types.status.FAILURE if 'sync' in cmd.command: api.Logger.error("sync failed on {}".format((node, nic))) return api.types.status.FAILURE return api.types.status.SUCCESS
def Trigger(tc): req = None req = api.Trigger_CreateExecuteCommandsRequest() tc.cmd_cookies = [] ping_count = getattr(tc.args, "ping_count", 20) interval = getattr(tc.args, "ping_interval", 0.01) connectivity = tc.iterators.connectivity naplesHosts = api.GetNaplesHostnames() tc_intf = (tc.iterators.interface).capitalize() if tc_intf in ['Uplink0', 'Uplink1', 'Uplinks']: setDataPortStatePerUplink(naplesHosts, tc.iterators.port_status, tc_intf) if tc_intf in ['Switchport0', 'Switchport1', 'Switchports']: switchPortOp(naplesHosts, tc.iterators.port_status, tc_intf) if connectivity == 'bgp_peer': for node in naplesHosts: for bgppeer in bgp_peer.client.Objects(node): # Dont try to ping on the down interface if tc.iterators.port_status == 'down': if tc_intf == 'Switchport0' and bgppeer.Id == 1: continue elif tc_intf == 'Switchport1' and bgppeer.Id == 2: continue cmd_cookie = "%s --> %s" %\ (str(bgppeer.LocalAddr), str(bgppeer.PeerAddr)) api.Trigger_AddNaplesCommand(req, node, \ "ping -i %f -c %d -s %d %s" % \ (interval, ping_count, tc.iterators.pktsize, \ str(bgppeer.PeerAddr))) api.Logger.info("Ping test from %s" % (cmd_cookie)) tc.cmd_cookies.append(cmd_cookie) tc.resp = api.Trigger(req) else: for node1 in naplesHosts: for node2 in naplesHosts: if node1 == node2: continue objs = device.client.Objects(node1) device1 = next(iter(objs)) objs = device.client.Objects(node2) device2 = next(iter(objs)) cmd_cookie = "%s --> %s" %\ (device1.IP, device2.IP) api.Trigger_AddNaplesCommand(req, node1, \ "ping -i %f -c %d -s %d %s" % \ (interval, ping_count, tc.iterators.pktsize, \ device2.IP)) api.Logger.info("Loopback ping test from %s" % (cmd_cookie)) tc.cmd_cookies.append(cmd_cookie) tc.resp = api.Trigger(req) return api.types.status.SUCCESS
def pdsClearFlows(node_name=None): clear_cmd = "/nic/bin/pdsctl clear flow" req = api.Trigger_CreateExecuteCommandsRequest(serial=False) if node_name: api.Trigger_AddNaplesCommand(req, node_name, clear_cmd) else: for node_name in api.GetNaplesHostnames(): api.Trigger_AddNaplesCommand(req, node_name, clear_cmd) api.Trigger(req) return api.types.status.SUCCESS
def DumpFlowmonSessions(nodes=[]): req = api.Trigger_CreateExecuteCommandsRequest(serial = True) nodes = nodes if nodes else api.GetNaplesHostnames() for node in nodes: api.Trigger_AddNaplesCommand(req, node, "/nic/bin/halctl show flow-monitor") api.Trigger_AddNaplesCommand(req, node, "/nic/bin/halctl show collector") resp = api.Trigger(req) for cmd in resp.commands: api.PrintCommandResults(cmd)
def clearNaplesSessions(node_name=None): clear_cmd = "/nic/bin/halctl clear session" req = api.Trigger_CreateExecuteCommandsRequest(serial=False) if node_name: api.Trigger_AddNaplesCommand(req, node_name, clear_cmd) else: for node_name in api.GetNaplesHostnames(): api.Trigger_AddNaplesCommand(req, node_name, clear_cmd) api.Trigger(req)
def Trigger(tc): pairs = api.GetLocalWorkloadPairs() server = pairs[0][0] client = pairs[0][1] tc.cmd_cookies = [] naples = server if not server.IsNaples(): naples = client if not client.IsNaples(): return api.types.status.SUCCESS req = api.Trigger_CreateExecuteCommandsRequest(serial = True) tc.cmd_descr = "Server: %s(%s) <--> Client: %s(%s)" %\ (server.workload_name, server.ip_address, client.workload_name, client.ip_address) api.Logger.info("Starting RTSP test from %s" % (tc.cmd_descr)) dir_path = os.path.dirname(os.path.realpath(__file__)) fullpath = dir_path + '/' + "small.vob" api.Logger.info("fullpath %s" % (fullpath)) resp = api.CopyToWorkload(server.node_name, server.workload_name, [fullpath], 'rtspdir') if resp is None: return api.types.status.FAILURE api.Trigger_AddCommand(req, client.node_name, client.workload_name, "ls -al | grep video") tc.cmd_cookies.append("Before RTSP") server_cmd = "cd rtspdir && live555MediaServer" api.Trigger_AddCommand(req, server.node_name, server.workload_name, server_cmd, background = True) tc.cmd_cookies.append("Run RTSP server") api.Trigger_AddCommand(req, client.node_name, client.workload_name, "openRTSP rtsp://%s/small.vob" % server.ip_address) tc.cmd_cookies.append("Run RTSP client") ## Add Naples command validation api.Trigger_AddNaplesCommand(req, naples.node_name, "/nic/bin/halctl show session --alg rtsp --yaml") tc.cmd_cookies.append("show session RTSP established") api.Trigger_AddNaplesCommand(req, naples.node_name, "/nic/bin/halctl show nwsec flow-gate | grep RTSP") tc.cmd_cookies.append("show flow-gate") api.Trigger_AddCommand(req, client.node_name, client.workload_name, "ls -al | grep video") tc.cmd_cookies.append("After RTSP") trig_resp = api.Trigger(req) term_resp = api.Trigger_TerminateAllCommands(trig_resp) tc.resp = api.Trigger_AggregateCommandsResponse(trig_resp, term_resp) return api.types.status.SUCCESS
def DeleteTmpFiles(): req = api.Trigger_CreateExecuteCommandsRequest(serial=True) iota_cmd = "rm -f /data/iota-emulation" dev_cmd = "rm -f /sysconfig/config0/device.conf" for n in api.GetNaplesHostnames(): api.Trigger_AddNaplesCommand(req, n, iota_cmd) api.Trigger_AddNaplesCommand(req, n, dev_cmd) resp = api.Trigger(req) for cmd in resp.commands: api.PrintCommandResults(cmd)
def Trigger(tc): req = api.Trigger_CreateExecuteCommandsRequest(serial=True) tc.cmd_cookies = [] for pair in tc.workload_pairs: server = pair[0] client = pair[1] naples = server if not server.IsNaples(): naples = client if not client.IsNaples(): continue cmd_cookie = "%s(%s) --> %s(%s)" %\ (server.workload_name, server.ip_address, client.workload_name, client.ip_address) api.Logger.info("Starting UDP Single flow test from %s" % (cmd_cookie)) timeout = get_timeout('udp-timeout') server_port = api.AllocateUdpPort() client_port = api.AllocateUdpPort() for idx in range(0, 5): api.Trigger_AddCommand( req, client.node_name, client.workload_name, "sudo hping3 -c 1 -s %s -p %s --udp %s -d 10" % (client_port, server_port, server.ip_address)) tc.cmd_cookies.append("Send data from server to client") if server.IsNaples(): cmd_cookie = "Before aging show session" api.Trigger_AddNaplesCommand( req, server.node_name, "/nic/bin/halctl show session --dstport {} --dstip {} --srcip {} | grep UDP" .format(server_port, server.ip_address, client.ip_address)) tc.cmd_cookies.append(cmd_cookie) #Get it from the config cmd_cookie = "sleep" api.Trigger_AddNaplesCommand(req, server.node_name, "sleep %s" % (int(timeout) % 5), timeout=300) tc.cmd_cookies.append(cmd_cookie) cmd_cookie = "After aging show session" api.Trigger_AddNaplesCommand( req, server.node_name, "/nic/bin/halctl show session --dstport {} --dstip {} --srcip {} | grep UDP" .format(server_port, server.ip_address, client.ip_address)) tc.cmd_cookies.append(cmd_cookie) tc.resp = api.Trigger(req) return api.types.status.SUCCESS
def Trigger(tc): req = api.Trigger_CreateExecuteCommandsRequest(serial=True) for intf in tc.intfs: ip_addr = str(ip_address(intf['sub_ip']) + 1) if tc.traffic_type == 'ping': # ping from host cmd = "ping -c 5 " + ip_addr api.Trigger_AddHostCommand(req, tc.wl_node_name, cmd) else: # copy send_data to host tc.send_data = tc.traffic_type + " Hello" send_fname = CURR_DIR + '/' + SEND_FNAME f = open(send_fname, "w") f.write(tc.send_data) f.close() api.CopyToHost(tc.wl_node_name, [send_fname], "") os.remove(send_fname) # netcat on naples # args common to TCP & UDP s_args = ' -l -p 9999 ' # arg specific to UDP if tc.traffic_type == 'UDP': s_args += ' -u ' s_args += '> ' + RECV_FNAME netcat.StartNetcat(req, tc.bitw_node_name, 'naples', s_args) # netcat on host c_args = ip_addr + ' 9999 < ' + SEND_FNAME # arg specific to UDP if tc.traffic_type == 'UDP': c_args += ' -u ' netcat.StartNetcat(req, tc.wl_node_name, 'host', c_args) # wait for ping to be done api.Trigger_AddNaplesCommand(req, tc.bitw_node_name, "sleep 5") if tc.traffic_type == 'UDP' or \ tc.traffic_type == 'TCP': api.Trigger_AddNaplesCommand(req, tc.bitw_node_name, "cat " + RECV_FNAME) trig_resp = api.Trigger(req) term_resp = api.Trigger_TerminateAllCommands(trig_resp) tc.resp = api.Trigger_AggregateCommandsResponse(trig_resp, term_resp) return api.types.status.SUCCESS
def athena_sec_app_start(node_name=None, nic_name=None, init_wait_time=INIT_WAIT_TIME_DEFAULT): node_nic_names = [] if (not node_name and nic_name) or (node_name and not nic_name): raise Exception("specify both node_name and nic_name or neither") if node_name and nic_name: node_nic_names.append((node_name, nic_name)) else: node_nic_names = get_athena_node_nic_names() for nname, nicname in node_nic_names: req = api.Trigger_CreateExecuteCommandsRequest() cmd = "/nic/tools/start-sec-agent-iota.sh" api.Trigger_AddNaplesCommand(req, nname, cmd, nicname, background=True) resp = api.Trigger(req) cmd = resp.commands[0] api.PrintCommandResults(cmd) if cmd.exit_code != 0: api.Logger.error("command to start athena sec app failed on " "node %s nic %s" % (nname, nicname)) return api.types.status.FAILURE # sleep for init to complete misc_utils.Sleep(init_wait_time) for nname, nicname in node_nic_names: req = api.Trigger_CreateExecuteCommandsRequest() cmd = "ps -aef | grep athena_app | grep soft-init | grep -v grep" api.Trigger_AddNaplesCommand(req, nname, cmd, nicname) resp = api.Trigger(req) cmd = resp.commands[0] api.PrintCommandResults(cmd) if cmd.exit_code != 0: api.Logger.error("ps failed or athena_app failed to start " "on node %s nic %s" % (nname, nicname)) return api.types.status.FAILURE if "athena_app" in cmd.stdout: athena_sec_app_pid = cmd.stdout.strip().split()[1] api.Logger.info("Athena sec app came up on node %s nic %s and " "has pid %s" % (nname, nicname, athena_sec_app_pid)) return api.types.status.SUCCESS
def Trigger(tc): pairs = api.GetRemoteWorkloadPairs() mgmt_ip = api.GetNaplesMgmtIpAddresses() w1 = pairs[0][0] w2 = pairs[0][1] req = api.Trigger_CreateExecuteCommandsRequest(serial=True) tc.cmd_descr = "Node1: %s(%s) <--> Node2: %s(%s)" %\ (w1.workload_name, w1.ip_address, w2.workload_name, w2.ip_address) api.Logger.info("Starting Offline Diags for %s" % (tc.cmd_descr)) cmd1 = "LD_LIBRARY_PATH=/platform/lib:/nic/lib /nic/bin/diag_test offline &> /var/log/pensando/offline_diags_report.txt" #ignore RTC and Local temperature as of now until DavidV fix the i2c bus issue cmd2 = "! grep FAIL /var/log/pensando/offline_diags_report.txt | grep -v Local | grep -v RTC" cmd3 = "grep PASS /var/log/pensando/offline_diags_report.txt" cmd4 = "cat /var/log/pensando/offline-diags.*.log" if w1.IsNaples() or w2.IsNaples(): if w1.IsNaples(): api.Trigger_AddNaplesCommand(req, w1.node_name, cmd1) api.Trigger_AddNaplesCommand(req, w1.node_name, cmd2) api.Trigger_AddNaplesCommand(req, w1.node_name, cmd3) api.Trigger_AddNaplesCommand(req, w1.node_name, cmd4) if w2.IsNaples(): api.Trigger_AddNaplesCommand(req, w2.node_name, cmd1) api.Trigger_AddNaplesCommand(req, w2.node_name, cmd2) api.Trigger_AddNaplesCommand(req, w2.node_name, cmd3) api.Trigger_AddNaplesCommand(req, w2.node_name, cmd4) tc.resp = api.Trigger(req) else: tc.resp = None return api.types.status.SUCCESS
def ResetNMDState(n): api.Logger.info("Resetting NMD State.") req = api.Trigger_CreateExecuteCommandsRequest(serial=True) for naples in api.GetDeviceNames(n): api.Trigger_AddNaplesCommand( req, n, "rm -rf /var/log/pensando/pen-nmd.log") api.Trigger_AddNaplesCommand(req, n, "rm -rf /sysconfig/config0/nmd.db", naples=naples) api.Trigger_AddNaplesCommand( req, n, "rm -rf /sysconfig/config0/app-start.conf", naples=naples) api.Trigger_AddNaplesCommand( req, n, "rm -rf /sysconfig/config0/device.conf", naples=naples) api.Trigger_AddNaplesCommand( req, n, "rm -f /sysconfig/config0/clusterTrustRoots.pem", naples=naples) resp = api.Trigger(req)
def Trigger(tc): # remove preinit script req = api.Trigger_CreateExecuteCommandsRequest() cmd = "rm " + tc.preinit_script_path api.Trigger_AddNaplesCommand(req, tc.bitw_node_name, cmd) cmd = "rm " + tc.start_agent_script_path api.Trigger_AddNaplesCommand(req, tc.bitw_node_name, cmd) tc.resp = api.Trigger(req) return api.types.status.SUCCESS
def Trigger(tc): pairs = api.GetLocalWorkloadPairs() server = pairs[0][0] client = pairs[0][1] tc.cmd_cookies = [] naples = server if not server.IsNaples(): naples = client if not client.IsNaples(): return api.types.status.SUCCESS req = api.Trigger_CreateExecuteCommandsRequest(serial=True) tc.cmd_descr = "Server: %s(%s) <--> Client: %s(%s)" %\ (server.workload_name, server.ip_address, client.workload_name, client.ip_address) api.Logger.info("Starting SUNRPC test from %s" % (tc.cmd_descr)) api.Trigger_AddNaplesCommand(req, naples.node_name, "/nic/bin/halctl clear session") tc.cmd_cookies.append("clear session") SetupNFSServer(server, client) api.Trigger_AddCommand(req, client.node_name, client.workload_name, "rpcinfo -p %s" % (server.ip_address)) tc.cmd_cookies.append("RPC Dump") # Add Naples command validation api.Trigger_AddNaplesCommand(req, naples.node_name, "/nic/bin/halctl show session --alg sun_rpc") tc.cmd_cookies.append("show session") api.Trigger_AddNaplesCommand( req, naples.node_name, "/nic/bin/halctl show nwsec flow-gate | grep SUN_RPC") tc.cmd_cookies.append("show security flow-gate") api.Trigger_AddNaplesCommand(req, naples.node_name, "/nic/bin/halctl clear session") tc.cmd_cookies.append("clear session") api.Trigger_AddNaplesCommand( req, naples.node_name, "/nic/bin/halctl show nwsec flow-gate | grep SUN_RPC") tc.cmd_cookies.append("show flow-gate after clear") trig_resp = api.Trigger(req) term_resp = api.Trigger_TerminateAllCommands(trig_resp) tc.resp = api.Trigger_AggregateCommandsResponse(trig_resp, term_resp) CleanupNFSServer(server, client) return api.types.status.SUCCESS