def Cleanup(server, client): req = api.Trigger_CreateExecuteCommandsRequest(serial=True) api.Trigger_AddCommand(req, server.node_name, server.workload_name, "rm -rf /var/lib/tftpboot/*") api.Trigger_AddCommand(req, server.node_name, server.workload_name, "rm -rf tftpdir") api.Trigger_AddCommand(req, client.node_name, client.workload_name, "rm -rf tftpdir") trig_resp = api.Trigger(req) term_resp = api.Trigger_TerminateAllCommands(trig_resp) return api.types.status.SUCCESS
def Trigger(tc): #Run all commands in serial req = api.Trigger_CreateExecuteCommandsRequest(serial=True) tc.cmd_cookies = [] count = 0 for tunnel in tc.tunnels: w1 = tunnel.ltep w2 = tunnel.rtep cmd_cookie = "%s(%s) --> %s(%s)" %\ (w1.workload_name, w1.ip_address, w2.workload_name, w2.ip_address) if count % 2 == 0: # TCP test api.Trigger_AddCommand(req, w2.node_name, w2.workload_name, "nc -l %s 2000" % w2.ip_address, background=True) api.Logger.info("Listen on IP %s and TCP port 2000" % w2.ip_address) api.Trigger_AddCommand( req, w1.node_name, w1.workload_name, "echo \"Pensando Systems\" | nc %s 2000" % w2.ip_address) api.Logger.info("Send TCP traffic from %s" % (cmd_cookie)) tc.cmd_cookies.append("TCP test " + cmd_cookie) if count % 2 == 1: # UDP test api.Trigger_AddCommand(req, w2.node_name, w2.workload_name, "nc -l -u %s 2000" % w2.ip_address, background=True) api.Logger.info("Listen on IP %s and UDP port 2000" % w2.ip_address) api.Trigger_AddCommand( req, w1.node_name, w1.workload_name, "echo \"Pensando Systems\" | nc -u %s 2000" % w2.ip_address) api.Logger.info("Send UDP traffic from %s" % (cmd_cookie)) tc.cmd_cookies.append("UDP test " + cmd_cookie) count += 1 if count == 16: break trig_resp = api.Trigger(req) time.sleep(10) term_resp = api.Trigger_TerminateAllCommands(trig_resp) tc.resp = api.Trigger_AggregateCommandsResponse(trig_resp, term_resp) return api.types.status.SUCCESS
def ForceReleasePort(port, node): req = api.Trigger_CreateExecuteCommandsRequest(serial=True) api.Trigger_AddCommand(req, node.node_name, node.workload_name, "sudo fuser -k %s" % (port)) trig_resp = api.Trigger(req) term_resp = api.Trigger_TerminateAllCommands(trig_resp) resp = api.Trigger_AggregateCommandsResponse(trig_resp, term_resp) for cmd in trig_resp.commands: api.PrintCommandResults(cmd) return api.types.status.SUCCESS
def stop_single_pcap_capture(tc): if not getattr(tc, "pcap_trigger", None): return api.types.status.SUCCESS api.Trigger_TerminateAllCommands(tc.pcap_trigger) nodes = api.GetWorkloadNodeHostnames() tc_dir = tc.GetLogsDir() resp = api.CopyFromHost(n, [tc.pcap_filename], tc_dir) if resp == None or resp.api_response.api_status != types_pb2.API_STATUS_OK: api.Logger.error("Failed to copy from to Node: %s" % n) return api.types.status.FAILURE return api.types.status.SUCCESS
def Teardown(tc): if tc.skip == True: return api.types.status.FAILURE # # Restore current Time-out configs # update_timeout('tcp-timeout', tc.tcp_timeout_val) update_timeout('tcp-close', tc.tcp_close_val) # # Terminate all commands # api.Trigger_TerminateAllCommands(tc.resp) api.Trigger_TerminateAllCommands(tc.resp3) # api.Trigger_TerminateAllCommands(tc.resp6) api.Trigger_TerminateAllCommands(tc.resp7) tc.SetTestCount(1) return api.types.status.SUCCESS
def Trigger(tc): if tc.skip: return api.types.status.SUCCESS tc.serverCmds = [] tc.clientCmds = [] tc.cmd_descr = [] if not api.IsSimulation(): serverReq = api.Trigger_CreateAllParallelCommandsRequest() clientReq = api.Trigger_CreateAllParallelCommandsRequest() else: serverReq = api.Trigger_CreateExecuteCommandsRequest(serial = False) clientReq = api.Trigger_CreateExecuteCommandsRequest(serial = False) for idx, pairs in enumerate(tc.workload_pairs): client = pairs[0] server = pairs[1] cmd_descr = "Server: %s(%s) <--> Client: %s(%s)" %\ (server.workload_name, server.ip_address, client.workload_name, client.ip_address) tc.cmd_descr.append(cmd_descr) num_sessions = int(getattr(tc.args, "num_sessions", 1)) api.Logger.info("Starting Iperf test from %s num-sessions %d" % (cmd_descr, num_sessions)) if tc.iterators.proto == 'udp': port = api.AllocateTcpPort() serverCmd = iperf.ServerCmd(port) clientCmd = iperf.ClientCmd(server.ip_address, port, proto='udp', jsonOut=True, num_of_streams = num_sessions) else: port = api.AllocateUdpPort() serverCmd = iperf.ServerCmd(port) clientCmd = iperf.ClientCmd(server.ip_address, port, jsonOut=True, num_of_streams = num_sessions) tc.serverCmds.append(serverCmd) tc.clientCmds.append(clientCmd) api.Trigger_AddCommand(serverReq, server.node_name, server.workload_name, serverCmd, background = True) api.Trigger_AddCommand(clientReq, client.node_name, client.workload_name, clientCmd) server_resp = api.Trigger(serverReq) #Sleep for some time as bg may not have been started. time.sleep(30) tc.iperf_client_resp = api.Trigger(clientReq) #Its faster kill iperf servers #Still call terminate on all api.Trigger_TerminateAllCommands(server_resp) return api.types.status.SUCCESS
def Trigger(tc): #============================================================== # trigger the commands #============================================================== req = api.Trigger_CreateExecuteCommandsRequest(serial=True) if getattr(tc.iterators, 'server', None) == 'yes': i = 0 k = 1 else: i = 1 k = 2 while ((i < 2) and (i < k)): j = (i + 1) % 2 w1 = tc.w[i] w2 = tc.w[j] tc.cmd_descr = "Server: %s(%s) <--> Client: %s(%s)" %\ (w1.workload_name, w1.ip_address, w2.workload_name, w2.ip_address) api.Logger.info("Starting rping test from %s" % (tc.cmd_descr)) # cmd for server cmd = "rping -s -a %s -C 10 -S %d " % (w1.ip_address, tc.iterators.pktsize) api.Trigger_AddCommand(req, w1.node_name, w1.workload_name, tc.ib_prefix[i] + cmd, background=True) # On Naples-Mellanox setups, with Mellanox as server, it takes a few seconds before the server # starts listening. So sleep for a few seconds before trying to start the client cmd = 'sleep 2' api.Trigger_AddCommand(req, w1.node_name, w1.workload_name, cmd) # cmd for client cmd = "rping -c -a %s -C 10 -S %d " % (w1.ip_address, tc.iterators.pktsize) api.Trigger_AddCommand(req, w2.node_name, w2.workload_name, tc.ib_prefix[j] + cmd) i = i + 1 #end while # trigger the request trig_resp = api.Trigger(req) term_resp = api.Trigger_TerminateAllCommands(trig_resp) tc.resp = api.Trigger_AggregateCommandsResponse(trig_resp, term_resp) return api.types.status.SUCCESS
def wait_and_verify_fuz(tc): tc.fuz_client_resp = api.Trigger_WaitForAllCommands(tc.fuz_client_resp) api.Trigger_TerminateAllCommands(tc.server_resp) for idx, cmd in enumerate(tc.fuz_client_resp.commands): if cmd.exit_code != 0: api.Logger.error( "Fuz commmand failed Workload : {}, command : {}, stdout : {} stderr : {}", cmd.entity_name, cmd.command, cmd.stdout, cmd.stderr) return api.types.status.FAILURE api.Logger.info("Fuz test successfull") return api.types.status.SUCCESS
def send_pkt_h2s(tc, node, flow, pkt_gen): # Send and Receive packets in H2S direction pkt_gen.set_dir_('h2s') pkt_gen.set_sip(flow.sip) pkt_gen.set_dip(flow.dip) if flow.proto == 'UDP' or flow.proto == 'TCP': pkt_gen.set_sport(flow.sport) pkt_gen.set_dport(flow.dport) h2s_req = api.Trigger_CreateExecuteCommandsRequest(serial=False) # ========== # Rx Packet # ========== pkt_gen.set_encap(True) pkt_gen.set_Rx(True) pkt_gen.set_vlan(tc.up0_vlan) pkt_gen.set_smac(tc.up1_mac) pkt_gen.set_dmac(tc.up0_mac) pkt_gen.setup_pkt() recv_cmd = "./recv_pkt.py --intf_name %s --pcap_fname %s "\ "--timeout %s --pkt_cnt %d" % (tc.up0_intf, pktgen.DEFAULT_H2S_RECV_PKT_FILENAME, str(SNIFF_TIMEOUT), tc.pkt_cnt) api.Trigger_AddHostCommand(h2s_req, node.Name(), recv_cmd, background=True) # ========== # Tx Packet # ========== pkt_gen.set_encap(False) pkt_gen.set_Rx(False) pkt_gen.set_smac(tc.up1_mac) pkt_gen.set_dmac(tc.up0_mac) pkt_gen.set_vlan(tc.up1_vlan) pkt_gen.setup_pkt() send_cmd = "./send_pkt.py --intf_name %s --pcap_fname %s "\ "--pkt_cnt %d" % (tc.up1_intf, pktgen.DEFAULT_H2S_GEN_PKT_FILENAME, tc.pkt_cnt) api.Trigger_AddHostCommand(h2s_req, node.Name(), 'sleep 0.5') api.Trigger_AddHostCommand(h2s_req, node.Name(), send_cmd) trig_resp = api.Trigger(h2s_req) time.sleep(SNIFF_TIMEOUT) term_resp = api.Trigger_TerminateAllCommands(trig_resp) h2s_resp = api.Trigger_AggregateCommandsResponse(trig_resp, term_resp) tc.resp.append(h2s_resp)
def Trigger(tc): req = api.Trigger_CreateExecuteCommandsRequest(serial=True) for intf in tc.intfs: ip_addr = str(ip_address(intf['sub_ip']) + 1) if tc.traffic_type == 'ping': # ping from host cmd = "ping -c 5 " + ip_addr api.Trigger_AddHostCommand(req, tc.wl_node_name, cmd) else: # copy send_data to host tc.send_data = tc.traffic_type + " Hello" send_fname = CURR_DIR + '/' + SEND_FNAME f = open(send_fname, "w") f.write(tc.send_data) f.close() api.CopyToHost(tc.wl_node_name, [send_fname], "") os.remove(send_fname) # netcat on naples # args common to TCP & UDP s_args = ' -l -p 9999 ' # arg specific to UDP if tc.traffic_type == 'UDP': s_args += ' -u ' s_args += '> ' + RECV_FNAME netcat.StartNetcat(req, tc.bitw_node_name, 'naples', s_args) # netcat on host c_args = ip_addr + ' 9999 < ' + SEND_FNAME # arg specific to UDP if tc.traffic_type == 'UDP': c_args += ' -u ' netcat.StartNetcat(req, tc.wl_node_name, 'host', c_args) # wait for ping to be done api.Trigger_AddNaplesCommand(req, tc.bitw_node_name, "sleep 5") if tc.traffic_type == 'UDP' or \ tc.traffic_type == 'TCP': api.Trigger_AddNaplesCommand(req, tc.bitw_node_name, "cat " + RECV_FNAME) trig_resp = api.Trigger(req) term_resp = api.Trigger_TerminateAllCommands(trig_resp) tc.resp = api.Trigger_AggregateCommandsResponse(trig_resp, term_resp) return api.types.status.SUCCESS
def Trigger(tc): pairs = api.GetRemoteWorkloadPairs() w1 = pairs[0][1] w2 = pairs[0][0] group = "239.1.1.1" maddr = "01:00:5e:01:01:01" req = api.Trigger_CreateExecuteCommandsRequest(serial = True) tc.cmd_descr = "Server: %s(%s) <--> Client: %s(%s)" % \ (w1.workload_name, w1.ip_address, w2.workload_name, w2.ip_address) api.Logger.info("Starting Multicast outbound Iperf test from %s" % (tc.cmd_descr)) basecmd = "ip maddress add %s dev %s" % (maddr, w1.interface) api.Trigger_AddCommand(req, w1.node_name, w1.workload_name, basecmd, background = True) basecmd = "ip maddress add %s dev %s" % (maddr, w2.interface) api.Trigger_AddCommand(req, w2.node_name, w2.workload_name, basecmd, background = True) basecmd = "ip route add %s/32 dev %s" % (group, w1.interface) api.Trigger_AddCommand(req, w1.node_name, w1.workload_name, basecmd, background = True) basecmd = "ip route add %s/32 dev %s" % (group, w2.interface) api.Trigger_AddCommand(req, w2.node_name, w2.workload_name, basecmd, background = True) basecmd = 'iperf -p %d ' % api.AllocateTcpPort() if tc.iterators.proto == 'udp': basecmd = 'iperf -u -p %d ' % api.AllocateUdpPort() api.Trigger_AddCommand(req, w1.node_name, w1.workload_name, "%s -s -t 300 -B %s -i 1" % (basecmd, group), background = True) api.Trigger_AddCommand(req, w2.node_name, w2.workload_name, "%s -c %s -T 32 -t 3 -i 1" % (basecmd, group)) basecmd = "ip maddress del %s dev %s" % (maddr, w1.interface) api.Trigger_AddCommand(req, w1.node_name, w1.workload_name, basecmd, background = True) basecmd = "ip maddress del %s dev %s" % (maddr, w2.interface) api.Trigger_AddCommand(req, w2.node_name, w2.workload_name, basecmd, background = True) basecmd = "ip route del %s/32 dev %s" % (group, w1.interface) api.Trigger_AddCommand(req, w1.node_name, w1.workload_name, basecmd, background = True) basecmd = "ip route del %s/32 dev %s" % (group, w2.interface) api.Trigger_AddCommand(req, w2.node_name, w2.workload_name, basecmd, background = True) trig_resp = api.Trigger(req) term_resp = api.Trigger_TerminateAllCommands(trig_resp) tc.resp = api.Trigger_AggregateCommandsResponse(trig_resp, term_resp) return api.types.status.SUCCESS
def Trigger(tc): #============================================================== # trigger the commands #============================================================== req = api.Trigger_CreateExecuteCommandsRequest(serial=True) if tc.iterators.rdma_cm == 'yes': cm_opt = " -R " else: cm_opt = " " i = 0 j = i + 1 w1 = tc.w[i] w2 = tc.w[j] tc.cmd_descr = "Server: %s(%s) <--> Client: %s(%s)" %\ (w1.workload_name, w1.ip_address, w2.workload_name, w2.ip_address) api.Logger.info("Starting ib_%s_bw test from %s" % (tc.iterators.test, tc.cmd_descr)) # cmd for server cmd = "ib_" + tc.iterators.test + "_bw -d " + tc.devices[ i] + " -n 10 -F -x " + tc.gid[i] + " -s 1024 -b -q " + str( tc.iterators.num_qp) + cm_opt + " --report_gbits" api.Trigger_AddCommand(req, w1.node_name, w1.workload_name, tc.ib_prefix[i] + cmd, background=True) # On Naples-Mellanox setups, with Mellanox as server, it takes a few seconds before the server # starts listening. So sleep for a few seconds before trying to start the client cmd = 'sleep 2' api.Trigger_AddCommand(req, w1.node_name, w1.workload_name, cmd) # cmd for client cmd = "ib_" + tc.iterators.test + "_bw -d " + tc.devices[ j] + " -n 10 -F -x " + tc.gid[j] + " -s 1024 -b -q " + str( tc.iterators.num_qp) + cm_opt + " --report_gbits " + w1.ip_address api.Trigger_AddCommand(req, w2.node_name, w2.workload_name, tc.ib_prefix[j] + cmd) # trigger the request trig_resp = api.Trigger(req) term_resp = api.Trigger_TerminateAllCommands(trig_resp) tc.resp = api.Trigger_AggregateCommandsResponse(trig_resp, term_resp) return api.types.status.SUCCESS
def Trigger(tc): pairs = api.GetLocalWorkloadPairs() server = pairs[0][0] client = pairs[0][1] tc.cmd_cookies = [] naples = server if not server.IsNaples(): naples = client if not client.IsNaples(): return api.types.status.SUCCESS req = api.Trigger_CreateExecuteCommandsRequest(serial = True) tc.cmd_descr = "Server: %s(%s) <--> Client: %s(%s)" %\ (server.workload_name, server.ip_address, client.workload_name, client.ip_address) api.Logger.info("Starting SUNRPC test from %s" % (tc.cmd_descr)) api.Trigger_AddNaplesCommand(req, naples.node_name, "/nic/bin/halctl clear session") tc.cmd_cookies.append("clear session") api.Trigger_AddCommand(req, server.node_name, server.workload_name, "sudo service rpcbind start") tc.cmd_cookies.append("Start RPC") api.Trigger_AddCommand(req, client.node_name, client.workload_name, "rpcinfo -T udp %s 100000 4"%(server.ip_address)) tc.cmd_cookies.append("RPC Getport") api.Trigger_AddNaplesCommand(req, naples.node_name, "/nic/bin/halctl show session --alg sun_rpc") tc.cmd_cookies.append("show session") api.Trigger_AddNaplesCommand(req, naples.node_name, "/nic/bin/halctl show nwsec flow-gate") tc.cmd_cookies.append("show security flow-gate") # Add Naples command validation api.Trigger_AddNaplesCommand(req, naples.node_name, "sleep 100", timeout=120) tc.cmd_cookies.append("sleep") api.Trigger_AddNaplesCommand(req, naples.node_name, "/nic/bin/halctl show session --alg sun_rpc") tc.cmd_cookies.append("After timeout session") api.Trigger_AddNaplesCommand(req, naples.node_name, "/nic/bin/halctl show nwsec flow-gate") tc.cmd_cookies.append("After timeout flow-gate") trig_resp = api.Trigger(req) term_resp = api.Trigger_TerminateAllCommands(trig_resp) tc.resp = api.Trigger_AggregateCommandsResponse(trig_resp, term_resp) return api.types.status.SUCCESS
def Trigger(tc): #============================================================== # trigger the commands #============================================================== req = api.Trigger_CreateExecuteCommandsRequest(serial=True) i = 0 while (i < 2): j = (i + 1) % 2 w1 = tc.w[i] w2 = tc.w[j] tc.cmd_descr = "Server: %s(%s) <--> Client: %s(%s)" %\ (w1.workload_name, w1.ip_address, w2.workload_name, w2.ip_address) api.Logger.info("Starting ibv_ud_pingpong test from %s" % (tc.cmd_descr)) # cmd for server cmd = "ibv_ud_pingpong -d " + tc.devices[i] + " -g " + tc.gid[ i] + " -s 1024 -r 10 -n 10" api.Trigger_AddCommand(req, w1.node_name, w1.workload_name, tc.ib_prefix[i] + cmd, background=True) # On Naples-Mellanox setups, with Mellanox as server, it takes a few seconds before the server # starts listening. So sleep for a few seconds before trying to start the client cmd = 'sleep 2' api.Trigger_AddCommand(req, w1.node_name, w1.workload_name, cmd) # cmd for client cmd = "ibv_ud_pingpong -d " + tc.devices[j] + " -g " + tc.gid[ j] + " -s 1024 -r 10 -n 10 " + w1.ip_address api.Trigger_AddCommand(req, w2.node_name, w2.workload_name, tc.ib_prefix[j] + cmd) i = i + 1 #end while # trigger the request trig_resp = api.Trigger(req) term_resp = api.Trigger_TerminateAllCommands(trig_resp) tc.resp = api.Trigger_AggregateCommandsResponse(trig_resp, term_resp) return api.types.status.SUCCESS
def get_dbg_vmotion_stats(tc, node): req = api.Trigger_CreateExecuteCommandsRequest(serial = True) api.Trigger_AddNaplesCommand(req, node, "/nic/bin/halctl show vmotion") trig_resp = api.Trigger(req) term_resp = api.Trigger_TerminateAllCommands(trig_resp) tc.resp = api.Trigger_AggregateCommandsResponse(trig_resp, term_resp) for cmd in tc.resp.commands: api.PrintCommandResults(cmd) if cmd.stdout == '': api.Logger.info("halctl show vmotion returned no info") return None else: ret = vm_process_dbg_stats(cmd.stdout) return ret
def TriggerSWMQoSConfigVerfication(req, tc): api.Logger.info("Running commands to verify SWM QoS Configuration") tc.cmd_descr = "QoS configuration commands" # channel that has RX mode set is the uplink that is configured to receive BMC traffic ncsi_cmd = "/nic/bin/halctl show ncsi channel" api.Trigger_AddNaplesCommand(req, tc.node_name, ncsi_cmd) tc.cmd_cookies.append(ncsi_cmd) # 0 - NONE; 1 - ETHERTYPE; 2 - DA reg_name = 'pb_pbc_hbm_hbm_port_0_cfg_hbm_parser_cam_enable' reg_read_cmd = '/bin/echo \'read ' + reg_name + '\' | LD_LIBRARY_PATH=/nic/lib:/platform/lib:$LD_LIBRARY_PATH /platform/bin/capview' api.Trigger_AddNaplesCommand(req, tc.node_name, reg_read_cmd) tc.cmd_cookies.append(reg_read_cmd) reg_name = 'pb_pbc_hbm_hbm_port_1_cfg_hbm_parser_cam_enable' reg_read_cmd = '/bin/echo \'read ' + reg_name + '\' | LD_LIBRARY_PATH=/nic/lib:/platform/lib:$LD_LIBRARY_PATH /platform/bin/capview' api.Trigger_AddNaplesCommand(req, tc.node_name, reg_read_cmd) tc.cmd_cookies.append(reg_read_cmd) reg_name = 'pb_pbc_hbm_hbm_port_8_cfg_hbm_parser_cam_enable' reg_read_cmd = '/bin/echo \'read ' + reg_name + '\' | LD_LIBRARY_PATH=/nic/lib:/platform/lib:$LD_LIBRARY_PATH /platform/bin/capview' api.Trigger_AddNaplesCommand(req, tc.node_name, reg_read_cmd) tc.cmd_cookies.append(reg_read_cmd) #============================================================== # trigger the request #============================================================== trig_resp = api.Trigger(req) term_resp = api.Trigger_TerminateAllCommands(trig_resp) tc.resp = api.Trigger_AggregateCommandsResponse(trig_resp, term_resp) return api.types.status.SUCCESS
def Trigger(tc): tc.cmd_cookies = [] tc.event_count_at_start = GetPenctlEventsCount(tc) req = api.Trigger_CreateExecuteCommandsRequest(serial=True) cmd_cookie = "%s(%s) --> %s(%s)" %\ (tc.wc_server.workload_name, tc.wc_server.ip_address, tc.wc_client.workload_name, tc.wc_client.ip_address) api.Logger.info("Starting %s Session test from %s" % (tc.proto, cmd_cookie)) utils.clearNaplesSessions(node_name=tc.wc_server.node_name) metrics = utils.GetDelphiSessionSummaryMetrics(tc.wc_server.node_name) api.Logger.info("Before Session summary metrics for %s => %s" % \ (tc.wc_server.node_name, metrics)) #Step 0: Update the session limit in the config object utils.SetSessionLimit(tc.proto, 100) if tc.proto == 'tcp': cmd_cookie = "iptable drop rule" api.Trigger_AddCommand(req, tc.wc_client.node_name, \ tc.wc_client.workload_name, \ "iptables -A INPUT -p tcp --destination-port 80 -j DROP", \ background = True) tc.cmd_cookies.append(cmd_cookie) SetTrafficGeneratorCommand(tc, req) cmd_cookie = "show sessions" api.Trigger_AddNaplesCommand(req, tc.wc_server.node_name, "/nic/bin/halctl show session") tc.cmd_cookies.append(cmd_cookie) trig_resp = api.Trigger(req) if tc.proto == 'tcp': cmd_cookie = "iptable states" api.Trigger_AddCommand(req, tc.wc_client.node_name, \ tc.wc_client.workload_name, \ "iptables -L -v", background = True) tc.cmd_cookies.append(cmd_cookie) #give some time for the traffic to pass time.sleep(5) term_resp = api.Trigger_TerminateAllCommands(trig_resp) tc.resp = api.Trigger_AggregateCommandsResponse(trig_resp, term_resp) return api.types.status.SUCCESS
def SetupDNSServer(server, stop=False): node = server.node_name workload = server.workload_name req = api.Trigger_CreateExecuteCommandsRequest(serial=True) if (stop == True): api.Trigger_AddCommand(req, node, workload, "sudo systemctl stop named") api.Trigger_AddCommand(req, node, workload, "sudo systemctl disable named") api.Trigger_AddCommand( req, node, workload, "sudo mv /etc/resolv.conf.back /etc/resolv.conf") else: zonefile = dir_path + '/' + "example.com.zone" api.Logger.info("fullpath %s" % (zonefile)) resp = api.CopyToWorkload(node, workload, [zonefile], 'dnsdir') if resp is None: return None named_conf = dir_path + '/' + "named.conf" resp = api.CopyToWorkload(node, workload, [named_conf], 'dnsdir') if resp is None: return None api.Trigger_AddCommand( req, node, workload, "sudo mv /etc/resolv.conf /etc/resolv.conf.back") api.Trigger_AddCommand(req, node, workload, "yes | sudo cp dnsdir/named.conf /etc/") api.Trigger_AddCommand( req, node, workload, "ex -s -c \'%s/192.168.100.102/%s/g|x\' /etc/named.conf" % ("%s", server.ip_address)) api.Trigger_AddCommand( req, node, workload, "yes | sudo cp dnsdir/example.com.zone /var/named/") api.Trigger_AddCommand(req, node, workload, "sudo systemctl start named") api.Trigger_AddCommand(req, node, workload, "sudo systemctl enable named") trig_resp = api.Trigger(req) term_resp = api.Trigger_TerminateAllCommands(trig_resp) for cmd in trig_resp.commands: api.PrintCommandResults(cmd) return api.types.status.SUCCESS
def Trigger(tc): if api.IsDryrun(): return api.types.status.SUCCESS req1 = api.Trigger_CreateExecuteCommandsRequest(serial=True) req2 = api.Trigger_CreateExecuteCommandsRequest(serial=False) w1 = tc.workloads[0] w2 = tc.workloads[1] tc.cmd_descr = "Server: %s(%s) <--> Client: %s(%s)" %\ (w1.interface, w1.ip_address, w2.interface, w2.ip_address) api.Logger.info("Starting Iperf test from %s" % (tc.cmd_descr)) proto = getattr(tc.iterators, "proto", 'tcp') number_of_iperf_threads = getattr(tc.args, "iperfthreads", 1) pktsize = getattr(tc.iterators, "pktsize", 512) ipproto = getattr(tc.iterators, "ipproto", 'v4') for i in range(number_of_iperf_threads): if proto == 'tcp': port = api.AllocateTcpPort() else: port = api.AllocateUdpPort() iperf_server_cmd = cmd_builder.iperf_server_cmd(port=port) api.Trigger_AddCommand(req1, w1.node_name, w1.workload_name, iperf_server_cmd, background=True) iperf_client_cmd = cmd_builder.iperf_client_cmd( server_ip=w1.ip_address, port=port, proto=proto, pktsize=pktsize, ipproto=ipproto) api.Trigger_AddCommand(req2, w2.node_name, w2.workload_name, iperf_client_cmd) trig_resp1 = api.Trigger(req1) trig_resp2 = api.Trigger(req2) term_resp1 = api.Trigger_TerminateAllCommands(trig_resp1) response = api.Trigger_AggregateCommandsResponse(trig_resp1, term_resp1) tc.resp = api.Trigger_AggregateCommandsResponse(response, trig_resp2) return api.types.status.SUCCESS
def Trigger(tc): api.Logger.info("Trigger.") new_seq_num = tc.pre_ctrckinf.i_tcpseqnum + tc.pre_ctrckinf.r_tcpwinsz * ( 2**tc.pre_ctrckinf.r_tcpwinscale) req = api.Trigger_CreateExecuteCommandsRequest(serial=True) #within the window - outoforder cmd_cookie = "sleep 2 && hping3 -c 1 -s 52252 -p 1234 -M {} -L {} --ack --tcp-timestamp {} -d 10".format( tc.pre_ctrckinf.i_tcpseqnum + 100, tc.pre_ctrckinf.i_tcpacknum, tc.server.ip_address) api.Trigger_AddCommand(req, tc.client.node_name, tc.client.workload_name, cmd_cookie) tc.cmd_cookies['fail ping'] = cmd_cookie cmd_cookie = "sleep 3 && /nic/bin/halctl show session --dstport 1234 --dstip {} --yaml".format( tc.server.ip_address) api.Trigger_AddNaplesCommand(req, tc.client.node_name, cmd_cookie) tc.cmd_cookies['show after'] = cmd_cookie cmd_cookie = "/nic/bin/halctl clear session" add_command(req, tc, 'clear', tc.client, cmd_cookie, naples=True) if tc.server.IsNaples(): cmd_cookie = "/nic/bin/halctl clear session" add_command(req, tc, 'clear', tc.server, cmd_cookie, naples=True) trig_resp = api.Trigger(req) term_resp = api.Trigger_TerminateAllCommands(trig_resp) term_resp1 = api.Trigger_TerminateAllCommands(tc.setup_cmd_resp) tc.resp = api.Trigger_AggregateCommandsResponse(trig_resp, term_resp) if getattr(tc.args, 'vmotion_enable', False): vmotion_utils.PrepareWorkloadRestore(tc) return api.types.status.SUCCESS
def Cleanup(server, client): req = api.Trigger_CreateExecuteCommandsRequest(serial=True) api.Trigger_AddCommand(req, server.node_name, server.workload_name, "rm -rf ftp_*") api.Trigger_AddCommand(req, client.node_name, client.workload_name, "rm -rf ftp_*") api.Trigger_AddCommand(req, server.node_name, server.workload_name, "rm -rf /home/admin/ftp") api.Trigger_AddCommand(req, client.node_name, client.workload_name, "rm -rf /home/admin/ftp") ftpfile = os.path.dirname(os.path.realpath(__file__)) + '/' + ".lftprc" os.remove(ftpfile) trig_resp = api.Trigger(req) term_resp = api.Trigger_TerminateAllCommands(trig_resp) return api.types.status.SUCCESS
def cleanup_trigger(tc): tc.cmd_cookies = [] req = api.Trigger_CreateExecuteCommandsRequest(serial=True) # make sure a file change in client reflects on server api.Trigger_AddCommand(req, tc.client.node_name, tc.client.workload_name, "mv /home/sunrpcdir/sunrpc_file.txt sunrpcdir/") tc.cmd_cookies.append("Move file back") trig_resp = api.Trigger(req) term_resp = api.Trigger_TerminateAllCommands(trig_resp) tc.resp = api.Trigger_AggregateCommandsResponse(trig_resp, term_resp) for cmd in tc.resp.commands: api.PrintCommandResults(cmd) if cmd.stdout == '': return api.types.status.FAILURE return api.types.status.SUCCESS
def stop_pcap_capture(tc): api.Trigger_TerminateAllCommands(tc.pcap_trigger) nodes = api.GetWorkloadNodeHostnames() tc_dir = tc.GetLogsDir() for n, host_intfs in tc.host_intfs.items(): if len(host_intfs) == 0: api.Logger.error("No host interfaces for node :%s" % n) return api.types.status.FAILURE for intfObj in host_intfs: intf = intfObj.Name() resp = api.CopyFromHost(n, [pcap_file_name(intf)], tc_dir) if resp == None or resp.api_response.api_status != types_pb2.API_STATUS_OK: api.Logger.error("Failed to copy from to Node: %s" % n) return api.types.status.FAILURE return api.types.status.SUCCESS
def DNSCleanup(server, client): # Cleanup DNS Server SetupDNSServer(server, True) # Cleanup DNS Client req = api.Trigger_CreateExecuteCommandsRequest(serial=True) api.Trigger_AddCommand(req, client.node_name, client.workload_name, "sudo rm /etc/resolv.conf") trig_resp = api.Trigger(req) term_resp = api.Trigger_TerminateAllCommands(trig_resp) for cmd in trig_resp.commands: api.PrintCommandResults(cmd) return api.types.status.SUCCESS
def get_sessions_info(tc, node): tc.cmd_cookies = [] sessions = [] api.Logger.info("showing session info on node %s" % (node)) req = api.Trigger_CreateExecuteCommandsRequest(serial = True) cmd_cookie = "hal show session" api.Trigger_AddNaplesCommand(req, node, "/nic/bin/halctl show session ") tc.cmd_cookies.append(cmd_cookie) trig_resp = api.Trigger(req) term_resp = api.Trigger_TerminateAllCommands(trig_resp) tc.resp = api.Trigger_AggregateCommandsResponse(trig_resp, term_resp) cookie_idx = 0 for cmd in tc.resp.commands: api.Logger.info("Results for %s" % (tc.cmd_cookies[cookie_idx])) api.PrintCommandResults(cmd) return
def Teardown(tc): if (tc.iterators.proto == 'udp' or (tc.iterators.proto == 'tcp' and tc.iterators.timeout != 'rst' and tc.iterators.timeout != 'longlived')): if not addPktFilterRuleOnEp(tc.workload_pairs, tc.iterators.proto, False): api.Logger.error("Failed to delete drop rules") return api.types.status.FAILURE if tc.iterators.timeout == 'drop': modifyPolicyRule(tc.workload_pairs, tc.iterators.proto, "allow") if tc.iterators.timeout == 'longlived': # Terminate background commands api.Trigger_TerminateAllCommands(tc.resp) return flow_utils.clearFlowTable(tc.workload_pairs)
def send_iperf_cmd(tc, client_port, server_port, test_packet_count): """ send iperf to the receiving node use specific client/server ports to sensure packets land in the desired queue """ server_req = None client_req = None server_req = api.Trigger_CreateExecuteCommandsRequest(serial=False) client_req = api.Trigger_CreateExecuteCommandsRequest(serial=True) cmd_descr = f"Server: [({tc.server.workload_name})({tc.server_ip})] <--> Client: [({tc.client.workload_name})({tc.client_ip})]" tc.cmd_descr.append(cmd_descr) server_cmd = None client_cmd = None duration = 10 server_cmd = iperf.ServerCmd(server_port) client_cmd = iperf.ClientCmd(tc.server_ip, server_port, time=duration, proto=tc.proto, jsonOut=True,\ ipproto=tc.tc_ip_proto, num_of_streams=tc.num_sessions,\ client_ip=tc.client_ip, client_port=client_port, packet_count=test_packet_count,\ bandwidth='10m') tc.server_cmds.append(server_cmd) tc.client_cmds.append(client_cmd) api.Logger.info(f"Iperf: Server [{tc.server.node_name}.{tc.server.interface}]") api.Logger.info(f" Client [{tc.client.node_name}.{tc.client.interface}]") api.Trigger_AddCommand(server_req, tc.server.node_name, tc.server.workload_name, server_cmd, background=True) api.Trigger_AddCommand(client_req, tc.client.node_name, tc.client.workload_name, client_cmd, background=False, timeout=400) api.Logger.info("Start Server") server_resp = api.Trigger(server_req) #Sleep for some time as bg may not have been started. time.sleep(10) api.Logger.info("Start Client") tc.iperf_client_resp = api.Trigger(client_req) api.Logger.info("Client Returned. Terminate Server") api.Trigger_TerminateAllCommands(server_resp) return api.types.status.SUCCESS
def SetupFTPClient(node, workload): ftpdata = dir_path + '/' + "ftp_client.txt" api.Logger.info("fullpath %s" % (ftpdata)) resp = api.CopyToWorkload(node, workload, [ftpdata], 'ftpdir') if resp is None: return None req = api.Trigger_CreateExecuteCommandsRequest(serial=True) api.Trigger_AddCommand(req, node, workload, "mkdir /home/admin/ftp") api.Trigger_AddCommand(req, node, workload, "cp ftpdir/ftp_client.txt /home/admin/ftp/") trig_resp = api.Trigger(req) term_resp = api.Trigger_TerminateAllCommands(trig_resp) for cmd in trig_resp.commands: api.PrintCommandResults(cmd) return api.types.status.SUCCESS
def flapMgmtConnectivity(tc, node): api.Logger.info("Running flapMgmtConnectivity...") cmd_cookies = [] sessions = [] api.Logger.info("flapping mgmt connectivity on node %s" % (node)) req = api.Trigger_CreateExecuteCommandsRequest(serial=True) cmd_cookie = "ifconfig oob_mnic0 down" api.Trigger_AddNaplesCommand(req, node, "ifconfig oob_mnic0 down") cmd_cookies.append(cmd_cookie) cmd_cookie = "sleep" api.Trigger_AddNaplesCommand(req, node, "sleep 5", timeout=30) tc.cmd_cookies.append(cmd_cookie) cmd_cookie = "ifconfig oob_mnic0 up" api.Trigger_AddNaplesCommand(req, node, "ifconfig oob_mnic0 up") cmd_cookies.append(cmd_cookie) trig_resp = api.Trigger(req) term_resp = api.Trigger_TerminateAllCommands(trig_resp) tc.resp = api.Trigger_AggregateCommandsResponse(trig_resp, term_resp)
def SendTraffic(tc): req = api.Trigger_CreateExecuteCommandsRequest(serial=True) tc.cmd_descr = "Server: %s(%s) <--> Client: %s(%s)" % ( tc.intf1.Name(), tc.intf1.GetIP(), tc.intf2.Name(), tc.intf2.GetIP()) api.Logger.info("Starting Iperf from %s" % (tc.cmd_descr)) port = api.AllocateTcpPort() iperf_server_cmd = iperf.ServerCmd(port=port) tc.intf1.AddCommand(req, iperf_server_cmd, background=True) iperf_client_cmd = iperf.ClientCmd(server_ip=tc.intf1.GetIP(), port=port, proto='tcp', pktsize=512, ipproto='v4') tc.intf2.AddCommand(req, iperf_client_cmd) trig_resp = api.Trigger(req) term_resp = api.Trigger_TerminateAllCommands(trig_resp) tc.resp = api.Trigger_AggregateCommandsResponse(trig_resp, term_resp) return api.types.status.SUCCESS