def installRequiredPackages(tc): req = api.Trigger_CreateExecuteCommandsRequest(serial=True) tc.cmd_cookies = [] tc.resp = None for wl in tc.workloads: if not wl.IsNaples(): continue # Install python scapy packages cmd_cookie = "Installing yum packages in WL:%s" % wl.workload_name tc.cmd_cookies.append(cmd_cookie) api.Logger.info(cmd_cookie) cmd = "yum -y -q update" api.Trigger_AddCommand(req, wl.node_name, wl.workload_name, cmd) #cmd = "apt remove -y python3-pip && apt install -y python3-pip" #api.Trigger_AddCommand(req, wl.node_name, wl.workload_name, cmd) cmd_cookie = "Installing pyzmq packages in WL:%s" % wl.workload_name tc.cmd_cookies.append(cmd_cookie) api.Logger.info(cmd_cookie) cmd = "/usr/bin/yes | pip3 uninstall pyzmq ; pip3 install pyzmq" api.Trigger_AddCommand(req, wl.node_name, wl.workload_name, cmd) tc.resp = api.Trigger(req) if tc.resp: cookie_idx = 0 for cmd in tc.resp.commands: api.Logger.info(tc.cmd_cookies[cookie_idx]) api.PrintCommandResults(cmd) if cmd.exit_code != 0: api.Logger.info("Failed for %s" % tc.cmd_cookies[cookie_idx]) cookie_idx += 1 tc.cmd_cookies = [] return api.types.status.SUCCESS
def Trigger(tc): #============================================================== # trigger the commands #============================================================== req = api.Trigger_CreateExecuteCommandsRequest(serial=True) w1 = tc.w[0] w2 = tc.w[1] cmd = 'sysctl dev.ionic.0.qos.classification_type=' + str(tc.class_type) # Trigger Classification type config if w1.IsNaples(): api.Trigger_AddCommand(req, w1.node_name, w1.workload_name, cmd) if w2.IsNaples(): api.Trigger_AddCommand(req, w2.node_name, w2.workload_name, cmd) #============================================================== # trigger the request #============================================================== trig_resp = api.Trigger(req) term_resp = api.Trigger_TerminateAllCommands(trig_resp) tc.resp = api.Trigger_AggregateCommandsResponse(trig_resp, term_resp) return api.types.status.SUCCESS
def Trigger(tc): if tc.skip: return api.types.status.SUCCESS req = None interval = "0.2" if not api.IsSimulation(): req = api.Trigger_CreateAllParallelCommandsRequest() else: req = api.Trigger_CreateExecuteCommandsRequest(serial = False) interval = "3" tc.cmd_cookies = [] for pair in tc.workload_pairs: w1 = pair[0] w2 = pair[1] if tc.iterators.ipaf == 'ipv6': cmd_cookie = "%s(%s) --> %s(%s)" %\ (w1.workload_name, w1.ipv6_address, w2.workload_name, w2.ipv6_address) api.Trigger_AddCommand(req, w1.node_name, w1.workload_name, "ping6 -i %s -c 20 -s %d %s" % (interval, tc.iterators.pktsize, w2.ipv6_address)) else: cmd_cookie = "%s(%s) --> %s(%s)" %\ (w1.workload_name, w1.ip_address, w2.workload_name, w2.ip_address) api.Trigger_AddCommand(req, w1.node_name, w1.workload_name, "ping -i %s -c 20 -s %d %s" % (interval, tc.iterators.pktsize, w2.ip_address)) api.Logger.info("Ping test from %s" % (cmd_cookie)) tc.cmd_cookies.append(cmd_cookie) tc.resp = api.Trigger(req) return api.types.status.SUCCESS
def Trigger(tc): pairs = api.GetLocalWorkloadPairs() tc.cmd_cookies = [] server, client = pairs[0] naples = server if not server.IsNaples(): naples = client if not client.IsNaples(): return api.types.status.SUCCESS else: client, server = pairs[0] cmd_cookie = "%s(%s) --> %s(%s)" %\ (server.workload_name, server.ip_address, client.workload_name, client.ip_address) api.Logger.info("Starting clear & show stress test from %s" % (cmd_cookie)) basecmd = 'iperf -p %d ' % api.AllocateTcpPort() proto = 6 timeout = 250 #tc.secprof = sec_profile_obj.gl_securityprofile_json_template #timeout = int(tc.secprof['security-profiles'][0]['spec']['timeouts']['tcp']) + \ # int(tc.secprof['security-profiles'][0]['spec']['timeouts']['tcp-close']) if tc.iterators.proto == 'udp': basecmd = 'iperf -u -p %d ' % api.AllocateUdpPort() proto = 17 timeout = 150 #timeout = tc.security_profile['security-profiles'][0]['spec']['timeouts']['udp'] req = api.Trigger_CreateExecuteCommandsRequest(serial=True) for cnt in range(tc.args.count): cmd_cookie = "iperf -s" api.Trigger_AddCommand(req, server.node_name, server.workload_name, "%s-s -t 300" % basecmd, background=True) tc.cmd_cookies.append(cmd_cookie) cmd_cookie = "iperf -c " api.Trigger_AddCommand( req, client.node_name, client.workload_name, "%s -c %s -P 100" % (basecmd, server.ip_address)) tc.cmd_cookies.append(cmd_cookie) cmd_cookie = "Show session" api.Trigger_AddNaplesCommand(req, naples.node_name, "/nic/bin/halctl show session") tc.cmd_cookies.append(cmd_cookie) cmd_cookie = "Clear session" api.Trigger_AddNaplesCommand(req, naples.node_name, "/nic/bin/halctl clear session") tc.cmd_cookies.append(cmd_cookie) trig_resp = api.Trigger(req) term_resp = api.Trigger_TerminateAllCommands(trig_resp) tc.resp = api.Trigger_AggregateCommandsResponse(trig_resp, term_resp) return api.types.status.SUCCESS
def Trigger(tc): tc.contexts = [] ctxt = IperfTestContext() ctxt.req = api.Trigger_CreateAllParallelCommandsRequest() ctxt.cmd_cookies = [] for tunnel in tc.tunnels: w1 = tunnel.ltep w2 = tunnel.rtep cmd_cookie = "Server: %s(%s) <--> Client: %s(%s)" %\ (w1.workload_name, w1.ip_address, w2.workload_name, w2.ip_address) api.Logger.info("Starting Iperf test from %s" % (cmd_cookie)) basecmd = 'iperf -p %d ' % api.AllocateTcpPort() if tc.iterators.proto == 'udp': basecmd = 'iperf -u -p %d ' % api.AllocateUdpPort() api.Trigger_AddCommand(ctxt.req, w1.node_name, w1.workload_name, "%s -s -t 300" % basecmd, background = True) api.Trigger_AddCommand(ctxt.req, w2.node_name, w2.workload_name, "%s -c %s" % (basecmd, w1.ip_address)) ctxt.cmd_cookies.append(cmd_cookie) ctxt.cmd_cookies.append(cmd_cookie) trig_resp = api.Trigger(ctxt.req) term_resp = api.Trigger_TerminateAllCommands(trig_resp) ctxt.resp = api.Trigger_AggregateCommandsResponse(trig_resp, term_resp) tc.context = ctxt return api.types.status.SUCCESS
def TriggerQoSTeardown(req, tc, w): # Disable tc_ethernet before disabling the TCs QosAddTcEthernetConfig(req, tc, w, 0) # Disable all the TCs disable_cmd = "sysctl dev.ionic.0.qos.tc_enable=" + '"1 0 0 0 0 0 0"' api.Logger.info("Running TC disable command {} on node_name {} workload_name {}"\ .format(disable_cmd, w.node_name, w.workload_name)) api.Trigger_AddCommand(req, w.node_name, w.workload_name, disable_cmd) tc.cmd_cookies.append(disable_cmd) # Set the Flow Control type to LLFC flow_ctrl_cmd = 'sysctl dev.ionic.0.flow_ctrl=1' api.Logger.info("Running flow control command {} on node_name {} workload_name {}"\ .format(flow_ctrl_cmd, w.node_name, w.workload_name)) api.Trigger_AddCommand(req, w.node_name, w.workload_name, flow_ctrl_cmd) tc.cmd_cookies.append(flow_ctrl_cmd) # Set the Classification type to PCP classification_cmd = 'sysctl dev.ionic.0.qos.classification_type=1' api.Trigger_AddCommand(req, w.node_name, w.workload_name, classification_cmd) tc.cmd_cookies.append(classification_cmd)
def SetupDNSServer(server): node = server.node_name workload = server.workload_name dir_path = os.path.dirname(os.path.realpath(__file__)) zonefile = dir_path + '/' + "example.com.zone" api.Logger.info("fullpath %s" % (zonefile)) resp = api.CopyToWorkload(node, workload, [zonefile], 'dnsdir') if resp is None: return None named_conf = dir_path + '/' + "named.conf" resp = api.CopyToWorkload(node, workload, [named_conf], 'dnsdir') if resp is None: return None req = api.Trigger_CreateExecuteCommandsRequest(serial=True) api.Trigger_AddCommand(req, node, workload, "yes | cp dnsdir/named.conf /etc/") api.Trigger_AddCommand( req, node, workload, "ex -s -c \'%s/192.168.100.102/%s/g|x\' /etc/named.conf" % ("%s", server.ip_address)) api.Trigger_AddCommand(req, node, workload, "yes | cp dnsdir/example.com.zone /var/named/") api.Trigger_AddCommand(req, node, workload, "systemctl start named") api.Trigger_AddCommand(req, node, workload, "systemctl enable named") trig_resp = api.Trigger(req) term_resp = api.Trigger_TerminateAllCommands(trig_resp) for cmd in trig_resp.commands: api.PrintCommandResults(cmd) return api.types.status.SUCCESS
def Trigger(tc): req = api.Trigger_CreateExecuteCommandsRequest(serial=True) catreq = api.Trigger_CreateExecuteCommandsRequest(serial=True) tc.cmd_cookies = [] iter_num = 0 for pair in tc.workload_pairs: w1 = pair[0] w2 = pair[1] cmd_cookie = "%s(%s) --> %s(%s)" %\ (w1.workload_name, w1.ip_address, w2.workload_name, w2.ip_address) api.Logger.info("Starting Ping test from %s" % (cmd_cookie)) tc.cmd_cookies.append(cmd_cookie) api.Trigger_AddCommand(req, w2.node_name, w2.workload_name, "tcpdump -nni %s ether proto arp" % (w2.interface), background=True) api.Trigger_AddCommand(req, w2.node_name, w2.workload_name, "tcpdump -nni %s ip proto gre" % (w2.interface), background=True) api.Trigger_AddCommand( req, w1.node_name, w1.workload_name, "ping -f -c 50 -s %d %s" % (tc.iterators.pktsize, w2.ip_address)) iter_num = iter_num + 1 trig_resp = api.Trigger(req) time.sleep(10) term_resp = api.Trigger_TerminateAllCommands(trig_resp) tc.resp = api.Trigger_AggregateCommandsResponse(trig_resp, term_resp) return api.types.status.SUCCESS
def __add_iptables_to_workloads(workloads=[]): if not api.IsSimulation(): req = api.Trigger_CreateAllParallelCommandsRequest() else: req = api.Trigger_CreateExecuteCommandsRequest(serial = False) workloads = workloads if workloads else api.GetWorkloads() for wl in workloads: api.Trigger_AddCommand(req, wl.node_name, wl.workload_name, "iptables -A INPUT -p tcp -i %s --src %s -j DROP" % (wl.interface, wl.ip_prefix)) api.Logger.info(f"iptables -A INPUT -p tcp -i {wl.interface} --src {wl.ip_prefix} -j DROP") api.Trigger_AddCommand(req, wl.node_name, wl.workload_name, "iptables -A INPUT -p tcp -i %s --dst %s -j DROP" % (wl.interface, wl.ip_prefix)) api.Logger.info(f"iptables -A INPUT -p tcp -i {wl.interface} --dst {wl.ip_prefix} -j DROP") api.Trigger_AddCommand(req, wl.node_name, wl.workload_name, "iptables -A INPUT -p udp -i %s --src %s -j DROP" % (wl.interface, wl.ip_prefix)) api.Logger.info(f"iptables -A INPUT -p udp -i {wl.interface} --src {wl.ip_prefix} -j DROP") api.Trigger_AddCommand(req, wl.node_name, wl.workload_name, "iptables -A INPUT -p udp -i %s --dst %s -j DROP" % (wl.interface, wl.ip_prefix)) api.Logger.info(f"iptables -A INPUT -p udp -i {wl.interface} --dst {wl.ip_prefix} -j DROP") resp = api.Trigger(req) if resp is None: return api.types.status.FAILURE return api.types.status.SUCCESS
def copyTestCommand(tc): test_reject_file = os.path.join(os.path.dirname(__file__), "test_reject.py") req = api.Trigger_CreateExecuteCommandsRequest(serial = True) path_to_copy = "test_reject_script" for w in tc.workload_dict.keys(): api.Logger.info("Copying file %s to %s:%s"% (test_reject_file, w.workload_name, path_to_copy)) resp = api.CopyToWorkload(w.node_name, w.workload_name, [test_reject_file], path_to_copy) if resp is None: api.Logger.error("Failed to copy test reject command to %s"%w.workload_name) return False api.Trigger_AddCommand(req, w.node_name, w.workload_name, "yes | mv %s/test_reject.py /usr/local/bin"%(path_to_copy)) api.Trigger_AddCommand(req, w.node_name, w.workload_name, "yes | chmod 755 /usr/local/bin/test_reject.py") trig_resp = api.Trigger(req) for cmd in trig_resp.commands: api.PrintCommandResults(cmd) return True
def __checkDebugStatsDefault(wl): req = api.Trigger_CreateExecuteCommandsRequest(serial = True) api.Logger.info("Check default setting: %s" % wl.interface) api.Trigger_AddCommand(req, wl.node_name, wl.workload_name, "ethtool --show-priv-flags %s" % wl.interface) api.Trigger_AddCommand(req, wl.node_name, wl.workload_name, "ethtool -S %s" % wl.interface) resp = api.Trigger(req) cmd = resp.commands[0] #api.PrintCommandResults(cmd) if cmd.exit_code != 0: api.Logger.error("Bad exit code %d on interface %s" % (cmd.exit_code, wl.interface)) api.Logger.info(cmd.stderr) return api.types.status.FAILURE if "sw-dbg-stats: on" in cmd.stdout: api.Logger.error("sw-dbg-stats on by default interface %s" % wl.interface) return api.types.status.FAILURE elif "sw-dbg-stats: off" not in cmd.stdout: api.Logger.info("sw-dbg-stats not available on interface %s" % wl.interface) return api.types.status.UNAVAIL cmd = resp.commands[1] #api.PrintCommandResults(cmd) if cmd.exit_code != 0: api.Logger.error("Bad exit code %d on interface %s" % (cmd.exit_code, wl.interface)) api.Logger.info(cmd.stderr) return api.types.status.FAILURE if "napi_work_done" in cmd.stdout: api.Logger.error("extended dbg stats seen in default mode on interface %s" % wl.interface) return api.types.status.FAILURE return api.types.status.SUCCESS
def trigger_data_update(tc): tc.cmd_cookies = [] req = api.Trigger_CreateExecuteCommandsRequest(serial=True) # make sure a file change in client reflects on server api.Trigger_AddCommand(req, tc.client.node_name, tc.client.workload_name, "mv sunrpcdir/sunrpc_file.txt /home/sunrpcdir/") tc.cmd_cookies.append("Create file") api.Trigger_AddCommand(req, tc.client.node_name, tc.client.workload_name, "ls -al /home/sunrpcdir") tc.cmd_cookies.append("verify file") api.Trigger_AddCommand(req, tc.server.node_name, tc.server.workload_name, "ls -al /home/sunrpcmntdir/") tc.cmd_cookies.append("After rpc") api.Trigger_AddCommand(req, tc.server.node_name, tc.server.workload_name, "sh -c 'cat /home/sunrpcmntdir/sunrpc_file.txt'") tc.cmd_cookies.append("After rpc") # Add Naples command validation api.Trigger_AddNaplesCommand(req, tc.vm_node.node_name, "/nic/bin/halctl show session --alg sun_rpc") tc.cmd_cookies.append("show session") trig_resp = api.Trigger(req) term_resp = api.Trigger_TerminateAllCommands(trig_resp) tc.resp = api.Trigger_AggregateCommandsResponse(trig_resp, term_resp)
def Trigger(tc): if api.IsDryrun(): return api.types.status.SUCCESS srv = tc.workloads[0] cli = tc.workloads[1] # Determine where the commands will be run - host or Naples. test_type = getattr(tc.args, "test-type", INTF_TEST_TYPE_HOST) is_naples_cmd = True if test_type == INTF_TEST_TYPE_HOST: is_naples_cmd = False srv_req = api.Trigger_CreateExecuteCommandsRequest(serial = False) cli_req = api.Trigger_CreateExecuteCommandsRequest(serial = False) proto = getattr(tc.iterators, "proto", 'tcp') number_of_iperf_threads = getattr(tc.args, "iperfthreads", 1) pktsize = getattr(tc.iterators, "pktsize", None) ipproto = getattr(tc.iterators, "ipproto", 'v4') if ipproto == 'v4': server_ip = srv.ip_address client_ip = cli.ip_address else: server_ip = srv.ipv6_address client_ip = cli.ipv6_address tc.cmd_descr = "Server: %s(%s) <--> Client: %s(%s)" %\ (srv.interface, server_ip, cli.interface, client_ip) api.Logger.info("Starting Iperf(%s/%s) test from %s" % (proto, ipproto, tc.cmd_descr)) duration = 10 for i in range(number_of_iperf_threads): if proto == 'tcp': port = api.AllocateTcpPort() else: port = api.AllocateUdpPort() iperf_server_cmd = iperf.ServerCmd(port, naples = is_naples_cmd) api.Trigger_AddCommand(srv_req, srv.node_name, srv.workload_name, iperf_server_cmd, background = True) iperf_client_cmd = iperf.ClientCmd(server_ip, port, time=duration, proto=proto, jsonOut=True, ipproto=ipproto, pktsize=pktsize, client_ip=client_ip, naples = is_naples_cmd) api.Trigger_AddCommand(cli_req, cli.node_name, cli.workload_name, iperf_client_cmd, timeout = 60) srv_resp = api.Trigger(srv_req) # Wait for iperf server to start. time.sleep(10) tc.cli_resp = api.Trigger(cli_req) # Wait for iperf clients to finish. time.sleep(2*duration) srv_resp1 = api.Trigger_TerminateAllCommands(srv_resp) return api.types.status.SUCCESS
def ConnectivityVRIPTest(proto='icmp', af='ipv4', pktsize=64, scope=config_api.WORKLOAD_PAIR_SCOPE_INTRA_SUBNET, args=None): cmd_cookies = [] cmd = None # default probe count is 3 probe_count = 3 sent_probes = dict() if not api.IsSimulation(): req = api.Trigger_CreateAllParallelCommandsRequest() else: req = api.Trigger_CreateExecuteCommandsRequest(serial = False) naplesHosts = api.GetNaplesHostnames() vnics = [] subnets = [] for node in naplesHosts: vnics.extend(vnic.client.Objects(node)) subnets.extend(subnet.client.Objects(node)) if scope == config_api.WORKLOAD_PAIR_SCOPE_INTRA_SUBNET: for vnic1 in vnics: if vnic1.SUBNET.VPC.Type == vpc_pb2.VPC_TYPE_CONTROL: continue wl = config_api.FindWorkloadByVnic(vnic1) assert(wl) dest_ip = vnic1.SUBNET.GetIPv4VRIP() cmd = traffic_utils.PingCmdBuilder(wl, dest_ip, proto, af, pktsize, args, probe_count) api.Logger.info(f" VR_IP on {wl.node_name}: {cmd}") api.Trigger_AddCommand(req, wl.node_name, wl.workload_name, cmd) cmd_cookies.append(cmd) cur_cnt = sent_probes.get(wl.node_name, 0) sent_probes.update({wl.node_name: cur_cnt + probe_count}) else: for vnic1 in vnics: if vnic1.SUBNET.VPC.Type == vpc_pb2.VPC_TYPE_CONTROL: continue wl = config_api.FindWorkloadByVnic(vnic1) assert(wl) for subnet1 in subnets: if subnet1.VPC.Type == vpc_pb2.VPC_TYPE_CONTROL: continue if subnet1.Node != vnic1.Node: continue if scope == config_api.WORKLOAD_PAIR_SCOPE_INTER_SUBNET and (vnic1.SUBNET.GID() == subnet1.GID()): continue dest_ip = subnet1.GetIPv4VRIP() cmd = traffic_utils.PingCmdBuilder(wl, dest_ip, proto, af, pktsize, args, probe_count) api.Logger.info(f" VRIP on {wl.node_name}: {cmd} ") api.Trigger_AddCommand(req, wl.node_name, wl.workload_name, cmd) cmd_cookies.append(cmd) cur_cnt = sent_probes.get(wl.node_name, 0) sent_probes.update({wl.node_name: cur_cnt + probe_count}) resp = api.Trigger(req) return cmd_cookies, resp, sent_probes
def ArPing(tc): if tc.args.type == 'local_only': api.Logger.info("local_only test") tc.workload_pairs = api.GetLocalWorkloadPairs() elif tc.args.type == 'both': api.Logger.info(" both local and remote test") tc.workload_pairs = api.GetLocalWorkloadPairs() tc.workload_pairs.extend(api.GetRemoteWorkloadPairs()) else: api.Logger.info("remote_only test") tc.workload_pairs = api.GetRemoteWorkloadPairs() if len(tc.workload_pairs) == 0: api.Logger.info("Skipping Testcase due to no workload pairs.") tc.skip = True req = None interval = "0.2" if not api.IsSimulation(): req = api.Trigger_CreateAllParallelCommandsRequest() else: req = api.Trigger_CreateExecuteCommandsRequest(serial=False) interval = "3" tc.cmd_cookies = [] for pair in tc.workload_pairs: w1 = pair[0] w2 = pair[1] cmd_cookie = "%s %s %s %s" % (w1.node_name, w1.workload_name, w1.interface, w1.ip_address) api.Trigger_AddCommand(req, w1.node_name, w1.workload_name, "arping -c 5 -U %s -I eth1" % (w1.ip_address)) api.Logger.info("ArPing from %s" % (cmd_cookie)) tc.cmd_cookies.append(cmd_cookie) cmd_cookie = "%s %s %s %s" % (w2.node_name, w2.workload_name, w2.interface, w2.ip_address) api.Trigger_AddCommand(req, w2.node_name, w2.workload_name, "arping -c 5 -U %s -I eth1" % (w2.ip_address)) api.Logger.info("ArPing from %s" % (cmd_cookie)) tc.cmd_cookies.append(cmd_cookie) tc.resp = api.Trigger(req) if tc.resp is None: return api.types.status.FAILURE result = api.types.status.SUCCESS cookie_idx = 0 for cmd in tc.resp.commands: api.Logger.info("ArPing Results for %s" % (tc.cmd_cookies[cookie_idx])) api.PrintCommandResults(cmd) if cmd.exit_code != 0: result = api.types.status.FAILURE cookie_idx += 1 return result
def Trigger(tc): pairs = api.GetLocalWorkloadPairs() server = pairs[0][0] client = pairs[0][1] tc.cmd_cookies = [] naples = server if not server.IsNaples(): naples = client if not client.IsNaples(): return api.types.status.SUCCESS req = api.Trigger_CreateExecuteCommandsRequest(serial = True) tc.cmd_descr = "Server: %s(%s) <--> Client: %s(%s)" %\ (server.workload_name, server.ip_address, client.workload_name, client.ip_address) api.Logger.info("Starting RTSP test from %s" % (tc.cmd_descr)) dir_path = os.path.dirname(os.path.realpath(__file__)) fullpath = dir_path + '/' + "small.vob" api.Logger.info("fullpath %s" % (fullpath)) resp = api.CopyToWorkload(server.node_name, server.workload_name, [fullpath], 'rtspdir') if resp is None: return api.types.status.FAILURE api.Trigger_AddCommand(req, client.node_name, client.workload_name, "ls -al | grep video") tc.cmd_cookies.append("Before RTSP") server_cmd = "cd rtspdir && live555MediaServer" api.Trigger_AddCommand(req, server.node_name, server.workload_name, server_cmd, background = True) tc.cmd_cookies.append("Run RTSP server") api.Trigger_AddCommand(req, client.node_name, client.workload_name, "openRTSP rtsp://%s/small.vob" % server.ip_address) tc.cmd_cookies.append("Run RTSP client") ## Add Naples command validation api.Trigger_AddNaplesCommand(req, naples.node_name, "/nic/bin/halctl show session --alg rtsp --yaml") tc.cmd_cookies.append("show session RTSP established") api.Trigger_AddNaplesCommand(req, naples.node_name, "/nic/bin/halctl show nwsec flow-gate | grep RTSP") tc.cmd_cookies.append("show flow-gate") api.Trigger_AddCommand(req, client.node_name, client.workload_name, "ls -al | grep video") tc.cmd_cookies.append("After RTSP") trig_resp = api.Trigger(req) term_resp = api.Trigger_TerminateAllCommands(trig_resp) tc.resp = api.Trigger_AggregateCommandsResponse(trig_resp, term_resp) return api.types.status.SUCCESS
def Trigger(tc): req = api.Trigger_CreateExecuteCommandsRequest(serial=True) # set interrupt coalescing value for wl in tc.workloads: # TODO: Maybe revisit this. Ignore 802.1q vlan workloads for now. if wl.interface_type == topo_svc.INTERFACE_TYPE_VSS: api.Logger.info("Set Interrupt Coalescing: Skipping vlan workload") continue api.Logger.info("Set Interrupt Coalescing on %s:%s:%s to %d" % \ (wl.node_name, wl.workload_name, wl.interface, \ tc.iterators.coales_interval)) if tc.os == 'linux': api.Trigger_AddCommand(req, wl.node_name, wl.workload_name, \ "ethtool -C %s rx-usecs %d" % \ (wl.interface, tc.iterators.coales_interval)) elif tc.os == 'freebsd': api.Trigger_AddCommand(req, wl.node_name, wl.workload_name, \ "sysctl dev.%s.intr_coal=%d" % \ (host.GetNaplesSysctl(wl.interface), \ tc.iterators.coales_interval)) tc.resp = api.Trigger(req) if tc.resp is None: api.Logger.error("Command failed to respond") return api.types.status.FAILURE # validate the command response # for > than max, expect an error and a specific message for cmd in tc.resp.commands: if tc.iterators.coales_interval < tc.args.max_coales_interval: if cmd.exit_code != 0: #linux ethtool will not set the value if same as current if cmd.stderr.find("unmodified, ignoring") == -1: api.Logger.error("Failed to set interrupt coalescing") api.Logger.info(cmd.stderr) return api.types.status.FAILURE else: if tc.os == 'linux': if cmd.stderr.find("out of range") == -1: api.Logger.error("ionic did not error when coales value set (%d) > than supported %d)" \ % (tc.iterators.coales_interval, tc.args.max_coales_interval)) api.Logger.info(cmd.stderr) return api.types.status.FAILURE elif tc.os == 'freebsd': if cmd.stderr.find("large") == -1: api.Logger.error( "ionic did not error when coales value set > than supported" ) api.Logger.info(cmd.stderr) return api.types.status.FAILURE return api.types.status.SUCCESS
def Cleanup(server, client): req = api.Trigger_CreateExecuteCommandsRequest(serial=True) api.Trigger_AddCommand(req, server.node_name, server.workload_name, "rm -rf /var/lib/tftpboot/*") api.Trigger_AddCommand(req, server.node_name, server.workload_name, "rm -rf tftpdir") api.Trigger_AddCommand(req, client.node_name, client.workload_name, "rm -rf tftpdir") trig_resp = api.Trigger(req) term_resp = api.Trigger_TerminateAllCommands(trig_resp) return api.types.status.SUCCESS
def Trigger(tc): #Run all commands in serial req = api.Trigger_CreateExecuteCommandsRequest(serial=True) tc.cmd_cookies = [] count = 0 for tunnel in tc.tunnels: w1 = tunnel.ltep w2 = tunnel.rtep cmd_cookie = "%s(%s) --> %s(%s)" %\ (w1.workload_name, w1.ip_address, w2.workload_name, w2.ip_address) if count % 2 == 0: # TCP test api.Trigger_AddCommand(req, w2.node_name, w2.workload_name, "nc -l %s 2000" % w2.ip_address, background=True) api.Logger.info("Listen on IP %s and TCP port 2000" % w2.ip_address) api.Trigger_AddCommand( req, w1.node_name, w1.workload_name, "echo \"Pensando Systems\" | nc %s 2000" % w2.ip_address) api.Logger.info("Send TCP traffic from %s" % (cmd_cookie)) tc.cmd_cookies.append("TCP test " + cmd_cookie) if count % 2 == 1: # UDP test api.Trigger_AddCommand(req, w2.node_name, w2.workload_name, "nc -l -u %s 2000" % w2.ip_address, background=True) api.Logger.info("Listen on IP %s and UDP port 2000" % w2.ip_address) api.Trigger_AddCommand( req, w1.node_name, w1.workload_name, "echo \"Pensando Systems\" | nc -u %s 2000" % w2.ip_address) api.Logger.info("Send UDP traffic from %s" % (cmd_cookie)) tc.cmd_cookies.append("UDP test " + cmd_cookie) count += 1 if count == 16: break trig_resp = api.Trigger(req) time.sleep(10) term_resp = api.Trigger_TerminateAllCommands(trig_resp) tc.resp = api.Trigger_AggregateCommandsResponse(trig_resp, term_resp) return api.types.status.SUCCESS
def Trigger(tc): #============================================================== # trigger the commands #============================================================== req = api.Trigger_CreateExecuteCommandsRequest(serial=True) if getattr(tc.iterators, 'server', None) == 'yes': i = 0 k = 1 else: i = 1 k = 2 while ((i < 2) and (i < k)): j = (i + 1) % 2 w1 = tc.w[i] w2 = tc.w[j] tc.cmd_descr = "Server: %s(%s) <--> Client: %s(%s)" %\ (w1.workload_name, w1.ip_address, w2.workload_name, w2.ip_address) api.Logger.info("Starting rping test from %s" % (tc.cmd_descr)) # cmd for server cmd = "rping -s -a %s -C 10 -S %d " % (w1.ip_address, tc.iterators.pktsize) api.Trigger_AddCommand(req, w1.node_name, w1.workload_name, tc.ib_prefix[i] + cmd, background=True) # On Naples-Mellanox setups, with Mellanox as server, it takes a few seconds before the server # starts listening. So sleep for a few seconds before trying to start the client cmd = 'sleep 2' api.Trigger_AddCommand(req, w1.node_name, w1.workload_name, cmd) # cmd for client cmd = "rping -c -a %s -C 10 -S %d " % (w1.ip_address, tc.iterators.pktsize) api.Trigger_AddCommand(req, w2.node_name, w2.workload_name, tc.ib_prefix[j] + cmd) i = i + 1 #end while # trigger the request trig_resp = api.Trigger(req) term_resp = api.Trigger_TerminateAllCommands(trig_resp) tc.resp = api.Trigger_AggregateCommandsResponse(trig_resp, term_resp) return api.types.status.SUCCESS
def Trigger(tc): if tc.skip: return api.types.status.SUCCESS tc.serverCmds = [] tc.clientCmds = [] tc.cmd_descr = [] if not api.IsSimulation(): serverReq = api.Trigger_CreateAllParallelCommandsRequest() clientReq = api.Trigger_CreateAllParallelCommandsRequest() else: serverReq = api.Trigger_CreateExecuteCommandsRequest(serial = False) clientReq = api.Trigger_CreateExecuteCommandsRequest(serial = False) for idx, pairs in enumerate(tc.workload_pairs): client = pairs[0] server = pairs[1] cmd_descr = "Server: %s(%s) <--> Client: %s(%s)" %\ (server.workload_name, server.ip_address, client.workload_name, client.ip_address) tc.cmd_descr.append(cmd_descr) num_sessions = int(getattr(tc.args, "num_sessions", 1)) api.Logger.info("Starting Iperf test from %s num-sessions %d" % (cmd_descr, num_sessions)) if tc.iterators.proto == 'udp': port = api.AllocateTcpPort() serverCmd = iperf.ServerCmd(port) clientCmd = iperf.ClientCmd(server.ip_address, port, proto='udp', jsonOut=True, num_of_streams = num_sessions) else: port = api.AllocateUdpPort() serverCmd = iperf.ServerCmd(port) clientCmd = iperf.ClientCmd(server.ip_address, port, jsonOut=True, num_of_streams = num_sessions) tc.serverCmds.append(serverCmd) tc.clientCmds.append(clientCmd) api.Trigger_AddCommand(serverReq, server.node_name, server.workload_name, serverCmd, background = True) api.Trigger_AddCommand(clientReq, client.node_name, client.workload_name, clientCmd) server_resp = api.Trigger(serverReq) #Sleep for some time as bg may not have been started. time.sleep(30) tc.iperf_client_resp = api.Trigger(clientReq) #Its faster kill iperf servers #Still call terminate on all api.Trigger_TerminateAllCommands(server_resp) return api.types.status.SUCCESS
def TriggerHping(workload_pairs, proto, af, pktsize, count=3, tcp_flags=None, rflow=False): resp = None cmd_cookies = [] options = '' req = api.Trigger_CreateAllParallelCommandsRequest() if proto in ['tcp', 'udp']: src_port = 1234 dest_port = 8000 if proto == 'tcp' and tcp_flags is not None: if "fin" in tcp_flags: options += ' -F' else: src_port = 0 dest_port = 0 for pair in workload_pairs: src_wl = pair[0] dst_wl = pair[1] dest_ip = dst_wl.ip_address cmd = GetHping3Cmd(proto, src_wl, dest_ip, dest_port, src_port, count, options) background = False if count > 100: background = True api.Trigger_AddCommand(req, src_wl.node_name, src_wl.workload_name, cmd, background=background) cmd_cookies.append(cmd) if rflow: # If rflow is True, then trigger hping in the reverse direction as well cmd = GetHping3Cmd(proto, dst_wl, src_wl.ip_address, src_port, dest_port, count, options) api.Trigger_AddCommand(req, dst_wl.node_name, dst_wl.workload_name, cmd) cmd_cookies.append(cmd) resp = api.Trigger(req) return cmd_cookies, resp
def pingWorkloads(workload_pairs, af="ipv4", packet_size=64, count=3, interval=0.2, do_pmtu_disc=False, sec_ip_test_type='none'): cmd_cookies = [] if not api.IsSimulation(): req = api.Trigger_CreateAllParallelCommandsRequest() else: req = api.Trigger_CreateExecuteCommandsRequest(serial=False) for pair in workload_pairs: w1 = pair[0] w2 = pair[1] ping_base_cmd = __get_ping_base_cmd(w1, af, packet_size, count, interval, do_pmtu_disc) if sec_ip_test_type == 'all': src_list = [__get_workload_address(w1, af)] + w1.sec_ip_addresses dst_list = [__get_workload_address(w2, af)] + w2.sec_ip_addresses for src_ip in src_list: for dst_ip in dst_list: ping_cmd = __add_source_ip_to_ping_cmd( ping_base_cmd, src_ip) ping_cmd = __ping_addr_substitution(ping_cmd, dst_ip) api.Logger.verbose(" Ping cmd %s " % (ping_cmd)) api.Trigger_AddCommand(req, w1.node_name, w1.workload_name, ping_cmd) cmd_cookies.append(ping_cmd) else: addr = __get_workload_address(w2, af) ping_cmd = __ping_addr_substitution(ping_base_cmd, addr) api.Logger.verbose(" Ping cmd %s " % (ping_cmd)) api.Trigger_AddCommand(req, w1.node_name, w1.workload_name, ping_cmd) cmd_cookies.append(ping_cmd) if sec_ip_test_type == 'random' and w1.sec_ip_addresses and w2.sec_ip_addresses: ping_cmd = __add_source_ip_to_ping_cmd( ping_base_cmd, random.choice(w1.sec_ip_addresses)) ping_cmd = __ping_addr_substitution( ping_cmd, random.choice(w2.sec_ip_addresses)) api.Logger.verbose(" Ping cmd %s " % (ping_cmd)) api.Trigger_AddCommand(req, w1.node_name, w1.workload_name, ping_cmd) cmd_cookies.append(ping_cmd) resp = api.Trigger(req) return cmd_cookies, resp
def Trigger(tc): #Run all commands in parallel. req = api.Trigger_CreateExecuteCommandsRequest(serial=False) #Start traffic commands in background wloads = [] for pair in tc.workload_pairs: w1 = pair[0] w2 = pair[1] tc.cmd_descr = "Server: %s(%s) <--> Client: %s(%s)" %\ (w1.workload_name, w1.ip_address, w2.workload_name, w2.ip_address) api.Logger.info("Starting Iperf test from %s" % (tc.cmd_descr)) basecmd = 'iperf -p %d ' % api.AllocateTcpPort() api.Trigger_AddCommand(req, w1.node_name, w1.workload_name, "%s -s -t 300" % basecmd, background=True) api.Trigger_AddCommand(req, w2.node_name, w2.workload_name, "%s -c %s" % (basecmd, w1.ip_address), background=True) wloads.append(w1) wloads.append(w2) #now bring up workloads ret = api.BringUpWorkloads(wloads) if ret != api.types.status.SUCCESS: return api.types.status.FAILURE #Now Send all the commands trig_resp = api.Trigger(req) #Sleep for some time for traffic to stabalize time.sleep(10) #Teardown workloads ret = api.TeardownWorkloads(wloads) if ret != api.types.status.SUCCESS: api.Trigger_TerminateAllCommands(trig_resp) return api.types.status.FAILURE #this will fail as workload is already delete and all commands are stopped tc.resp = api.Trigger_TerminateAllCommands(trig_resp) #Bring up the same workoad loads ret = api.BringUpWorkloads(wloads) if ret != api.types.status.SUCCESS: return api.types.status.FAILURE return api.types.status.SUCCESS
def Trigger(tc): #============================================================== # trigger the commands #============================================================== req = api.Trigger_CreateExecuteCommandsRequest(serial=True) if tc.iterators.rdma_cm == 'yes': cm_opt = " -R " else: cm_opt = " " i = 0 j = i + 1 w1 = tc.w[i] w2 = tc.w[j] tc.cmd_descr = "Server: %s(%s) <--> Client: %s(%s)" %\ (w1.workload_name, w1.ip_address, w2.workload_name, w2.ip_address) api.Logger.info("Starting ib_%s_bw test from %s" % (tc.iterators.test, tc.cmd_descr)) # cmd for server cmd = "ib_" + tc.iterators.test + "_bw -d " + tc.devices[ i] + " -n 10 -F -x " + tc.gid[i] + " -s 1024 -b -q " + str( tc.iterators.num_qp) + cm_opt + " --report_gbits" api.Trigger_AddCommand(req, w1.node_name, w1.workload_name, tc.ib_prefix[i] + cmd, background=True) # On Naples-Mellanox setups, with Mellanox as server, it takes a few seconds before the server # starts listening. So sleep for a few seconds before trying to start the client cmd = 'sleep 2' api.Trigger_AddCommand(req, w1.node_name, w1.workload_name, cmd) # cmd for client cmd = "ib_" + tc.iterators.test + "_bw -d " + tc.devices[ j] + " -n 10 -F -x " + tc.gid[j] + " -s 1024 -b -q " + str( tc.iterators.num_qp) + cm_opt + " --report_gbits " + w1.ip_address api.Trigger_AddCommand(req, w2.node_name, w2.workload_name, tc.ib_prefix[j] + cmd) # trigger the request trig_resp = api.Trigger(req) term_resp = api.Trigger_TerminateAllCommands(trig_resp) tc.resp = api.Trigger_AggregateCommandsResponse(trig_resp, term_resp) return api.types.status.SUCCESS
def Trigger(tc): pairs = api.GetRemoteWorkloadPairs() w1 = pairs[0][1] w2 = pairs[0][0] group = "239.1.1.1" maddr = "01:00:5e:01:01:01" req = api.Trigger_CreateExecuteCommandsRequest(serial = True) tc.cmd_descr = "Server: %s(%s) <--> Client: %s(%s)" % \ (w1.workload_name, w1.ip_address, w2.workload_name, w2.ip_address) api.Logger.info("Starting Multicast outbound Iperf test from %s" % (tc.cmd_descr)) basecmd = "ip maddress add %s dev %s" % (maddr, w1.interface) api.Trigger_AddCommand(req, w1.node_name, w1.workload_name, basecmd, background = True) basecmd = "ip maddress add %s dev %s" % (maddr, w2.interface) api.Trigger_AddCommand(req, w2.node_name, w2.workload_name, basecmd, background = True) basecmd = "ip route add %s/32 dev %s" % (group, w1.interface) api.Trigger_AddCommand(req, w1.node_name, w1.workload_name, basecmd, background = True) basecmd = "ip route add %s/32 dev %s" % (group, w2.interface) api.Trigger_AddCommand(req, w2.node_name, w2.workload_name, basecmd, background = True) basecmd = 'iperf -p %d ' % api.AllocateTcpPort() if tc.iterators.proto == 'udp': basecmd = 'iperf -u -p %d ' % api.AllocateUdpPort() api.Trigger_AddCommand(req, w1.node_name, w1.workload_name, "%s -s -t 300 -B %s -i 1" % (basecmd, group), background = True) api.Trigger_AddCommand(req, w2.node_name, w2.workload_name, "%s -c %s -T 32 -t 3 -i 1" % (basecmd, group)) basecmd = "ip maddress del %s dev %s" % (maddr, w1.interface) api.Trigger_AddCommand(req, w1.node_name, w1.workload_name, basecmd, background = True) basecmd = "ip maddress del %s dev %s" % (maddr, w2.interface) api.Trigger_AddCommand(req, w2.node_name, w2.workload_name, basecmd, background = True) basecmd = "ip route del %s/32 dev %s" % (group, w1.interface) api.Trigger_AddCommand(req, w1.node_name, w1.workload_name, basecmd, background = True) basecmd = "ip route del %s/32 dev %s" % (group, w2.interface) api.Trigger_AddCommand(req, w2.node_name, w2.workload_name, basecmd, background = True) trig_resp = api.Trigger(req) term_resp = api.Trigger_TerminateAllCommands(trig_resp) tc.resp = api.Trigger_AggregateCommandsResponse(trig_resp, term_resp) return api.types.status.SUCCESS
def Trigger(tc): pairs = api.GetLocalWorkloadPairs() server = pairs[0][0] client = pairs[0][1] tc.cmd_cookies = [] naples = server if not server.IsNaples(): naples = client if not client.IsNaples(): return api.types.status.SUCCESS req = api.Trigger_CreateExecuteCommandsRequest(serial = True) tc.cmd_descr = "Server: %s(%s) <--> Client: %s(%s)" %\ (server.workload_name, server.ip_address, client.workload_name, client.ip_address) api.Logger.info("Starting SUNRPC test from %s" % (tc.cmd_descr)) api.Trigger_AddNaplesCommand(req, naples.node_name, "/nic/bin/halctl clear session") tc.cmd_cookies.append("clear session") api.Trigger_AddCommand(req, server.node_name, server.workload_name, "sudo service rpcbind start") tc.cmd_cookies.append("Start RPC") api.Trigger_AddCommand(req, client.node_name, client.workload_name, "rpcinfo -T udp %s 100000 4"%(server.ip_address)) tc.cmd_cookies.append("RPC Getport") api.Trigger_AddNaplesCommand(req, naples.node_name, "/nic/bin/halctl show session --alg sun_rpc") tc.cmd_cookies.append("show session") api.Trigger_AddNaplesCommand(req, naples.node_name, "/nic/bin/halctl show nwsec flow-gate") tc.cmd_cookies.append("show security flow-gate") # Add Naples command validation api.Trigger_AddNaplesCommand(req, naples.node_name, "sleep 100", timeout=120) tc.cmd_cookies.append("sleep") api.Trigger_AddNaplesCommand(req, naples.node_name, "/nic/bin/halctl show session --alg sun_rpc") tc.cmd_cookies.append("After timeout session") api.Trigger_AddNaplesCommand(req, naples.node_name, "/nic/bin/halctl show nwsec flow-gate") tc.cmd_cookies.append("After timeout flow-gate") trig_resp = api.Trigger(req) term_resp = api.Trigger_TerminateAllCommands(trig_resp) tc.resp = api.Trigger_AggregateCommandsResponse(trig_resp, term_resp) return api.types.status.SUCCESS
def Cleanup(server, client): req = api.Trigger_CreateExecuteCommandsRequest(serial=True) api.Trigger_AddCommand(req, server.node_name, server.workload_name, "rm -rf ftpdir") api.Trigger_AddCommand(req, client.node_name, client.workload_name, "rm -rf ftpdir") api.Trigger_AddCommand(req, server.node_name, server.workload_name, "rm -rf /home/admin/ftp") api.Trigger_AddCommand(req, client.node_name, client.workload_name, "rm -rf /home/admin/ftp") ftpfile = os.path.dirname(os.path.realpath(__file__)) + '/' + "ftp.sh" os.remove(ftpfile) trig_resp = api.Trigger(req) return api.types.status.SUCCESS
def Trigger(tc): #============================================================== # trigger the commands #============================================================== req = api.Trigger_CreateExecuteCommandsRequest(serial=True) i = 0 while (i < 2): j = (i + 1) % 2 w1 = tc.w[i] w2 = tc.w[j] tc.cmd_descr = "Server: %s(%s) <--> Client: %s(%s)" %\ (w1.workload_name, w1.ip_address, w2.workload_name, w2.ip_address) api.Logger.info("Starting ibv_ud_pingpong test from %s" % (tc.cmd_descr)) # cmd for server cmd = "ibv_ud_pingpong -d " + tc.devices[i] + " -g " + tc.gid[ i] + " -s 1024 -r 10 -n 10" api.Trigger_AddCommand(req, w1.node_name, w1.workload_name, tc.ib_prefix[i] + cmd, background=True) # On Naples-Mellanox setups, with Mellanox as server, it takes a few seconds before the server # starts listening. So sleep for a few seconds before trying to start the client cmd = 'sleep 2' api.Trigger_AddCommand(req, w1.node_name, w1.workload_name, cmd) # cmd for client cmd = "ibv_ud_pingpong -d " + tc.devices[j] + " -g " + tc.gid[ j] + " -s 1024 -r 10 -n 10 " + w1.ip_address api.Trigger_AddCommand(req, w2.node_name, w2.workload_name, tc.ib_prefix[j] + cmd) i = i + 1 #end while # trigger the request trig_resp = api.Trigger(req) term_resp = api.Trigger_TerminateAllCommands(trig_resp) tc.resp = api.Trigger_AggregateCommandsResponse(trig_resp, term_resp) return api.types.status.SUCCESS
def debug_dump_interface_info(wl): req = api.Trigger_CreateExecuteCommandsRequest(serial=False) cmd = "ifconfig " + wl.interface api.Trigger_AddCommand(req, wl.node_name, cmd) if api.GetNodeOs(wl.node_name) == "linux": cmd = "ip -d link show " + wl.interface api.Trigger_AddCommand(req, wl.node_name, wl.workload_name, cmd) cmd = "ip maddr show " + interface api.Trigger_AddCommand(req, wl.node_name, wl.workload_name, cmd) elif api.GetNodeOs(wl.node_name) == "freebsd": cmd = "netstat -aI " + wl.interface api.Trigger_AddCommand(req, wl.node_name, wl.workload_name, cmd) resp = api.Trigger(req) return debug_dump_display_info(resp)