Exemple #1
0
def Trigger(tc):
    if tc.skip: return api.types.status.SUCCESS

    tc.serverCmds = []
    tc.clientCmds = []
    tc.cmd_descr = []

    if not api.IsSimulation():
        serverReq = api.Trigger_CreateAllParallelCommandsRequest()
        clientReq = api.Trigger_CreateAllParallelCommandsRequest()
    else:
        serverReq = api.Trigger_CreateExecuteCommandsRequest(serial = False)
        clientReq = api.Trigger_CreateExecuteCommandsRequest(serial = False)

    for idx, pairs in enumerate(tc.workload_pairs):
        client = pairs[0]
        server = pairs[1]

        cmd_descr = "Server: %s(%s) <--> Client: %s(%s)" %\
                       (server.workload_name, server.ip_address, client.workload_name, client.ip_address)
        tc.cmd_descr.append(cmd_descr)
        num_sessions = int(getattr(tc.args, "num_sessions", 1))
        api.Logger.info("Starting Iperf test from %s num-sessions %d" % (cmd_descr, num_sessions))

        if tc.iterators.proto == 'udp':
            port = api.AllocateTcpPort()
            serverCmd = iperf.ServerCmd(port)
            clientCmd = iperf.ClientCmd(server.ip_address, port, proto='udp', jsonOut=True, num_of_streams = num_sessions)
        else:
            port = api.AllocateUdpPort()
            serverCmd = iperf.ServerCmd(port)
            clientCmd = iperf.ClientCmd(server.ip_address, port, jsonOut=True,  num_of_streams = num_sessions)

        tc.serverCmds.append(serverCmd)
        tc.clientCmds.append(clientCmd)

        api.Trigger_AddCommand(serverReq, server.node_name, server.workload_name,
                               serverCmd, background = True)

        api.Trigger_AddCommand(clientReq, client.node_name, client.workload_name,
                               clientCmd)

    server_resp = api.Trigger(serverReq)
    #Sleep for some time as bg may not have been started.
    time.sleep(30)

    tc.iperf_client_resp = api.Trigger(clientReq)
    #Its faster kill iperf servers

    #Still call terminate on all
    api.Trigger_TerminateAllCommands(server_resp)

    return api.types.status.SUCCESS
Exemple #2
0
    def __node_api_handler(self, url, json_data=None, oper=CfgOper.ADD):
        if oper == CfgOper.DELETE:
            oper = "DELETE"
        elif oper == CfgOper.ADD:
            oper = "POST"
        elif oper == CfgOper.UPDATE:
            oper = "PUT"
        elif oper == CfgOper.GET:
            oper = "GET"
        else:
            print(oper)
            assert (0)
        if GlobalOptions.debug:
            api.Logger.info("Url : %s" % url)

        cmd = None
        if json_data and len(json.dumps(json_data)) > 100000:
            filename = "/tmp/temp_config.json"
            with open(filename, 'w') as outfile:
                json.dump(json_data, outfile)

            req = api.Trigger_CreateAllParallelCommandsRequest()
            cmd = ["rm", "-rf", "temp_config.json"]
            cmd = " ".join(cmd)
            api.Trigger_AddHostCommand(req, self.host_name, cmd, timeout=3600)
            api.Trigger(req)

            resp = api.CopyToHost(self.host_name, [filename], "")
            if not api.IsApiResponseOk(resp):
                assert (0)
            cmd = [
                "curl", "-X", oper, "-d", "@temp_config.json", "-k", "-H",
                "\"Content-Type:application/json\"", url
            ]
        else:
            cmd = [
                "curl", "-X", oper, "-k", "-d",
                "\'" + json.dumps(json_data) + "\'" if json_data else " ",
                "-H", "\"Content-Type:application/json\"", url
            ]
        cmd = " ".join(cmd)
        req = api.Trigger_CreateAllParallelCommandsRequest()
        api.Trigger_AddHostCommand(req, self.host_name, cmd, timeout=3600)

        resp = api.Trigger(req)
        if GlobalOptions.debug:
            print(" ".join(cmd))
        return resp.commands[0].stdout
Exemple #3
0
def __add_iptables_to_workloads(workloads=[]):
    if not api.IsSimulation():
        req = api.Trigger_CreateAllParallelCommandsRequest()
    else:
        req = api.Trigger_CreateExecuteCommandsRequest(serial = False)

    workloads = workloads if workloads else api.GetWorkloads()
    for wl in workloads:
        api.Trigger_AddCommand(req, wl.node_name, wl.workload_name,
                "iptables -A INPUT -p tcp -i %s --src %s -j DROP" % (wl.interface, wl.ip_prefix))
        api.Logger.info(f"iptables -A INPUT -p tcp -i {wl.interface} --src {wl.ip_prefix} -j DROP")
        api.Trigger_AddCommand(req, wl.node_name, wl.workload_name,
                "iptables -A INPUT -p tcp -i %s --dst %s -j DROP" % (wl.interface, wl.ip_prefix))
        api.Logger.info(f"iptables -A INPUT -p tcp -i {wl.interface} --dst {wl.ip_prefix} -j DROP")
        api.Trigger_AddCommand(req, wl.node_name, wl.workload_name,
                "iptables -A INPUT -p udp -i %s --src %s -j DROP" % (wl.interface, wl.ip_prefix))
        api.Logger.info(f"iptables -A INPUT -p udp -i {wl.interface} --src {wl.ip_prefix} -j DROP")
        api.Trigger_AddCommand(req, wl.node_name, wl.workload_name,
                "iptables -A INPUT -p udp -i %s --dst %s -j DROP" % (wl.interface, wl.ip_prefix))
        api.Logger.info(f"iptables -A INPUT -p udp -i {wl.interface} --dst {wl.ip_prefix} -j DROP")

    resp = api.Trigger(req)
    if resp is None:
        return api.types.status.FAILURE

    return api.types.status.SUCCESS
Exemple #4
0
def Trigger(tc):
    tc.context = None
    ctxt = PingTestContext()
    ctxt.req = api.Trigger_CreateAllParallelCommandsRequest()
    ctxt.cmd_cookies = []
    for tunnel in tc.tunnels:
        w1 = tunnel.ltep
        w2 = tunnel.rtep

        for sec_ipaddr in w1.sec_ip_addresses:
            if ipaddress.IPv4Address(sec_ipaddr).is_reserved:
                api.Logger.info("Workload1: ClassE IP = %s" % sec_ipaddr)
        for sec_ipaddr in w2.sec_ip_addresses:
            if ipaddress.IPv4Address(sec_ipaddr).is_reserved:
                api.Logger.info("Workload2: ClassE IP = %s" % sec_ipaddr)

        cmd_cookie = "%s(%s) --> %s(%s)" %\
                     (w1.workload_name, w1.ip_address, w2.workload_name, w2.ip_address)
        api.Trigger_AddCommand(
            ctxt.req, w1.node_name, w1.workload_name,
            "ping -i 0.2 -c 20 -s %d %s" % (64, w2.ip_address))

        api.Logger.info("Ping test from %s" % (cmd_cookie))
        ctxt.cmd_cookies.append(cmd_cookie)
    ctxt.resp = api.Trigger(ctxt.req)
    tc.context = ctxt

    return api.types.status.SUCCESS
Exemple #5
0
def ARPingWorkloads(workload_pairs,
                    update_neighbor=False,
                    send_dad=False,
                    count=3):
    cmd_cookies = []

    if not api.IsSimulation():
        req = api.Trigger_CreateAllParallelCommandsRequest()
    else:
        req = api.Trigger_CreateExecuteCommandsRequest(serial=False)

    for pair in workload_pairs:
        w1 = pair[0]
        w2 = pair[1]

        arp_base_cmd = __get_arp_base_cmd(w1, update_neighbor, send_dad, count)
        addr = __get_workload_address(w1, "ipv4")
        if update_neighbor is True:
            dest_addr = " %s" % (addr)
        else:
            addr = __get_workload_address(w2, "ipv4")
            dest_addr = " %s" % (addr)
        arp_cmd = arp_base_cmd + dest_addr

        api.Logger.verbose(" ARP cmd %s " % (arp_cmd))
        api.Trigger_AddCommand(req, w1.node_name, w1.workload_name, arp_cmd)
        cmd_cookies.append(arp_cmd)

    resp = api.Trigger(req)
    return cmd_cookies, resp
Exemple #6
0
def Trigger(tc):
    if tc.skip: return api.types.status.SUCCESS

    req = None
    if not api.IsSimulation():
        req = api.Trigger_CreateAllParallelCommandsRequest()
    else:
        req = api.Trigger_CreateExecuteCommandsRequest(serial=False)

    tc.cmd_cookies = []
    if tc.args.type == 'remote_only':
        tc.control_ep_pairs = config_api.GetControlEPPairs(
            pair_type=config_api.WORKLOAD_PAIR_TYPE_REMOTE_ONLY)

    if len(tc.control_ep_pairs) == 0:
        api.Logger.error("Skipping Testcase due to no VCN Endpoint pairs.")
        return api.types.status.FAILURE

    for pair in tc.control_ep_pairs:
        api.Logger.verbose("Ping between %s and %s" %
                           (pair[0].ip_addresses[0], pair[1].ip_addresses[0]))
    if pair[0].node_name == tc.naples_node0:
        cmd_cookie = "ip netns exec %s ping %s -c 10 -s %d" % (
            "vcn", pair[1].ip_addresses[0], tc.iterators.pktsize)
    else:
        cmd_cookie = "ip netns exec %s ping %s -c 10 -s %d" % (
            "vcn", pair[0].ip_addresses[0], tc.iterators.pktsize)

    api.Trigger_AddNaplesCommand(req, tc.naples_node0, cmd_cookie)
    api.Logger.info("Ping test %s" % (cmd_cookie))
    tc.cmd_cookies.append(cmd_cookie)
    tc.resp = api.Trigger(req)

    return api.types.status.SUCCESS
Exemple #7
0
def Trigger(tc):
    if tc.skip: return api.types.status.SUCCESS

    req = None
    interval = "0.2"
    if not api.IsSimulation():
        req = api.Trigger_CreateAllParallelCommandsRequest()
    else:
        req = api.Trigger_CreateExecuteCommandsRequest(serial = False)
        interval = "3"
    tc.cmd_cookies = []

    for pair in tc.workload_pairs:
        w1 = pair[0]
        w2 = pair[1]
        if tc.iterators.ipaf == 'ipv6':
            cmd_cookie = "%s(%s) --> %s(%s)" %\
                         (w1.workload_name, w1.ipv6_address, w2.workload_name, w2.ipv6_address)
            api.Trigger_AddCommand(req, w1.node_name, w1.workload_name,
                                   "ping6 -i %s -c 20 -s %d %s" % (interval, tc.iterators.pktsize, w2.ipv6_address))
        else:
            cmd_cookie = "%s(%s) --> %s(%s)" %\
                         (w1.workload_name, w1.ip_address, w2.workload_name, w2.ip_address)
            api.Trigger_AddCommand(req, w1.node_name, w1.workload_name,
                                   "ping -i %s -c 20 -s %d %s" % (interval, tc.iterators.pktsize, w2.ip_address))
        api.Logger.info("Ping test from %s" % (cmd_cookie))
        tc.cmd_cookies.append(cmd_cookie)

    tc.resp = api.Trigger(req)

    return api.types.status.SUCCESS
Exemple #8
0
def trigger_ping_to_bond(tc):
    req = api.Trigger_CreateAllParallelCommandsRequest()

    for node, wl in utils.GetBondWorkloadPair():
        node_name, bond_ip = node.Name(), node.GetBondIp()

        # for baremetal, don't ping to workloads in the nodes where the uplink is brought down.
        if tc.is_bm_type and tc.uplink_fail_stage:
            if wl.node_name in tc.nodes:
                #api.Logger.info("Skipping ping b/w %s <--> %s"%(bond_ip, wl.ip_address))
                continue

        # Ping Bond ===> Workload
        cmd_cookie = "%s(%s) --> %s(%s)" % (node_name, bond_ip,
                                            wl.workload_name, wl.ip_address)
        tc.cmd_cookies.append(cmd_cookie)
        api.Logger.info("%s" % (cmd_cookie))
        cmd = "ping -i %s -c 20 -s %d %s" % (tc.interval, tc.iterators.pktsize,
                                             wl.ip_address)
        api.Trigger_AddNaplesCommand(req, node_name, cmd)

        # Ping Workload ===> Bond
        cmd_cookie = "%s(%s) --> %s(%s)" % (wl.workload_name, wl.ip_address,
                                            node_name, bond_ip)
        tc.cmd_cookies.append(cmd_cookie)
        api.Logger.info("%s" % (cmd_cookie))
        cmd = "ping -i %s -c 20 -s %d %s" % (tc.interval, tc.iterators.pktsize,
                                             bond_ip)
        api.Trigger_AddCommand(req, wl.node_name, wl.workload_name, cmd)

    tc.resp = api.Trigger(req)
    return api.types.status.SUCCESS
Exemple #9
0
def Trigger(tc):
    tc.contexts = []
    ctxt = IperfTestContext()
    ctxt.req = api.Trigger_CreateAllParallelCommandsRequest()
    ctxt.cmd_cookies = []
    for tunnel in tc.tunnels:
        w1 = tunnel.ltep
        w2 = tunnel.rtep

        cmd_cookie = "Server: %s(%s) <--> Client: %s(%s)" %\
                       (w1.workload_name, w1.ip_address, w2.workload_name, w2.ip_address)
        api.Logger.info("Starting Iperf test from %s" % (cmd_cookie))

        basecmd = 'iperf -p %d ' % api.AllocateTcpPort()
        if tc.iterators.proto == 'udp':
            basecmd = 'iperf -u -p %d ' % api.AllocateUdpPort()
        api.Trigger_AddCommand(ctxt.req, w1.node_name, w1.workload_name,
                               "%s -s -t 300" % basecmd, background = True)
        api.Trigger_AddCommand(ctxt.req, w2.node_name, w2.workload_name,
                               "%s -c %s" % (basecmd, w1.ip_address))

        ctxt.cmd_cookies.append(cmd_cookie)
        ctxt.cmd_cookies.append(cmd_cookie)
    trig_resp = api.Trigger(ctxt.req)
    term_resp = api.Trigger_TerminateAllCommands(trig_resp)
    ctxt.resp = api.Trigger_AggregateCommandsResponse(trig_resp, term_resp)
    tc.context = ctxt

    return api.types.status.SUCCESS
Exemple #10
0
def ChangeHostLifsAdminStatus(tc, shutdown=False):
    result = api.types.status.SUCCESS
    req = api.Trigger_CreateAllParallelCommandsRequest()

    tc.cmd_cookies = []
    for node in tc.nodes:
        for intf_wl in tc.host_lifs[node]:
            intf, wl = next(iter(intf_wl.items()))
            # Change admin status
            cmd = "ifconfig %s %s" % (intf, ("down" if shutdown else "up"))
            if wl:
                cmd_cookie = "Node: %s, WL: %s, intf: %s, shutdown: %s" % (
                    node, wl, intf, shutdown)
                tc.cmd_cookies.append(cmd_cookie)
                #api.Logger.info("%s"%(cmd_cookie))
                api.Trigger_AddCommand(req, node, wl, cmd)
            else:
                cmd_cookie = "Node: %s, intf: %s, shutdown: %s" % (node, intf,
                                                                   shutdown)
                tc.cmd_cookies.append(cmd_cookie)
                #api.Logger.info("%s"%(cmd_cookie))
                api.Trigger_AddHostCommand(req, node, cmd)

    tc.resp = api.Trigger(req)

    cookie_idx = 0
    for cmd in tc.resp.commands:
        if cmd.exit_code != 0:
            result = api.types.status.FAILURE
            api.Logger.info("Failed to change Admin for %s" %
                            (tc.cmd_cookies[cookie_idx]))
            api.PrintCommandResults(cmd)
        cookie_idx += 1
    return result
Exemple #11
0
def TriggerUnderlayConnectivityTest(ping_count=20, connectivity = 'bgp_peer', interval = 0.01):
    req = None
    req = api.Trigger_CreateAllParallelCommandsRequest()
    cmd_cookies = []
    naplesHosts = api.GetNaplesHostnames()

    if connectivity == 'bgp_peer':
        for node in naplesHosts:
            for bgppeer in bgp_peer.client.Objects(node):
                cmd_cookie = "%s --> %s" %\
                             (str(bgppeer.LocalAddr), str(bgppeer.PeerAddr))
                api.Trigger_AddNaplesCommand(req, node, \
                        f"ping -i {interval} -c {ping_count} -s 64 {str(bgppeer.PeerAddr)}")
                api.Logger.verbose(f"Ping test from {cmd_cookie}")
                cmd_cookies.append(cmd_cookie)
    else:
        for node1 in naplesHosts:
            for node2 in naplesHosts:
                if node1 == node2:
                    continue
                objs = device.client.Objects(node1)
                device1 = next(iter(objs))
                objs = device.client.Objects(node2)
                device2 = next(iter(objs))
                cmd_cookie = "%s --> %s" %\
                             (device1.IP, device2.IP)
                api.Trigger_AddNaplesCommand(req, node1, \
                        f"ping -i {interval} -c {ping_count} -s 64 {device2.IP}")
                api.Logger.verbose(f"Loopback ping test from {cmd_cookie}")
                cmd_cookies.append(cmd_cookie)

    resp = api.Trigger(req)

    return resp, cmd_cookies
Exemple #12
0
def ConnectivityVRIPTest(proto='icmp', af='ipv4', pktsize=64,
        scope=config_api.WORKLOAD_PAIR_SCOPE_INTRA_SUBNET, args=None):

    cmd_cookies = []
    cmd = None
    # default probe count is 3
    probe_count = 3
    sent_probes = dict()

    if not api.IsSimulation():
        req = api.Trigger_CreateAllParallelCommandsRequest()
    else:
        req = api.Trigger_CreateExecuteCommandsRequest(serial = False)

    naplesHosts = api.GetNaplesHostnames()
    vnics = []
    subnets = []
    for node in naplesHosts:
        vnics.extend(vnic.client.Objects(node))
        subnets.extend(subnet.client.Objects(node))

    if scope == config_api.WORKLOAD_PAIR_SCOPE_INTRA_SUBNET:
        for vnic1 in vnics:
            if vnic1.SUBNET.VPC.Type == vpc_pb2.VPC_TYPE_CONTROL:
                continue
            wl = config_api.FindWorkloadByVnic(vnic1)
            assert(wl)
            dest_ip = vnic1.SUBNET.GetIPv4VRIP()
            cmd = traffic_utils.PingCmdBuilder(wl, dest_ip, proto, af, pktsize, args, probe_count)
            api.Logger.info(f" VR_IP on {wl.node_name}: {cmd}")
            api.Trigger_AddCommand(req, wl.node_name, wl.workload_name, cmd)
            cmd_cookies.append(cmd)
            cur_cnt = sent_probes.get(wl.node_name, 0)
            sent_probes.update({wl.node_name: cur_cnt + probe_count})
    else:
        for vnic1 in vnics:
            if vnic1.SUBNET.VPC.Type == vpc_pb2.VPC_TYPE_CONTROL:
                continue
            wl = config_api.FindWorkloadByVnic(vnic1)
            assert(wl)
            for subnet1 in subnets:
                if subnet1.VPC.Type == vpc_pb2.VPC_TYPE_CONTROL:
                    continue
                if subnet1.Node != vnic1.Node:
                    continue
                if scope == config_api.WORKLOAD_PAIR_SCOPE_INTER_SUBNET and (vnic1.SUBNET.GID() == subnet1.GID()):
                    continue
                dest_ip = subnet1.GetIPv4VRIP()
                cmd = traffic_utils.PingCmdBuilder(wl, dest_ip, proto, af, pktsize, args, probe_count)
                api.Logger.info(f" VRIP on {wl.node_name}: {cmd} ")
                api.Trigger_AddCommand(req, wl.node_name, wl.workload_name, cmd)
                cmd_cookies.append(cmd)
                cur_cnt = sent_probes.get(wl.node_name, 0)
                sent_probes.update({wl.node_name: cur_cnt + probe_count})

    resp = api.Trigger(req)

    return cmd_cookies, resp, sent_probes
Exemple #13
0
def ArPing(tc):
    if tc.args.type == 'local_only':
        api.Logger.info("local_only test")
        tc.workload_pairs = api.GetLocalWorkloadPairs()
    elif tc.args.type == 'both':
        api.Logger.info(" both local and remote test")
        tc.workload_pairs = api.GetLocalWorkloadPairs()
        tc.workload_pairs.extend(api.GetRemoteWorkloadPairs())
    else:
        api.Logger.info("remote_only test")
        tc.workload_pairs = api.GetRemoteWorkloadPairs()

    if len(tc.workload_pairs) == 0:
        api.Logger.info("Skipping Testcase due to no workload pairs.")
        tc.skip = True

    req = None
    interval = "0.2"
    if not api.IsSimulation():
        req = api.Trigger_CreateAllParallelCommandsRequest()
    else:
        req = api.Trigger_CreateExecuteCommandsRequest(serial=False)
        interval = "3"
    tc.cmd_cookies = []

    for pair in tc.workload_pairs:
        w1 = pair[0]
        w2 = pair[1]
        cmd_cookie = "%s %s %s %s" % (w1.node_name, w1.workload_name,
                                      w1.interface, w1.ip_address)
        api.Trigger_AddCommand(req, w1.node_name, w1.workload_name,
                               "arping -c  5 -U %s -I eth1" % (w1.ip_address))
        api.Logger.info("ArPing from %s" % (cmd_cookie))
        tc.cmd_cookies.append(cmd_cookie)

        cmd_cookie = "%s %s %s %s" % (w2.node_name, w2.workload_name,
                                      w2.interface, w2.ip_address)
        api.Trigger_AddCommand(req, w2.node_name, w2.workload_name,
                               "arping -c  5 -U %s -I eth1" % (w2.ip_address))
        api.Logger.info("ArPing from %s" % (cmd_cookie))
        tc.cmd_cookies.append(cmd_cookie)

    tc.resp = api.Trigger(req)

    if tc.resp is None:
        return api.types.status.FAILURE

    result = api.types.status.SUCCESS
    cookie_idx = 0

    for cmd in tc.resp.commands:
        api.Logger.info("ArPing Results for %s" % (tc.cmd_cookies[cookie_idx]))
        api.PrintCommandResults(cmd)
        if cmd.exit_code != 0:
            result = api.types.status.FAILURE
        cookie_idx += 1
    return result
Exemple #14
0
def Trigger(tc):
    if tc.skip: return api.types.status.SUCCESS

    tc.serverCmds = []
    tc.clientCmds = []
    tc.cmd_descr = []

    iperfClients = []
    serverReq = api.Trigger_CreateAllParallelCommandsRequest()

    dip_port_cache = dict()

    def __dip_port_key(dip, port):
        return dip + ":" + str(port)

    for idx, pairs in enumerate(tc.workload_pairs):
        client = pairs[0]
        server = pairs[1]
        port = int(pairs[2])

        server_key = __dip_port_key(server.ip_address, port)


        cmd_descr = "Server: %s(%s) <--> Client: %s(%s)" %\
                       (server.workload_name, server.ip_address, client.workload_name, client.ip_address)
        tc.cmd_descr.append(cmd_descr)
        num_sessions = int(getattr(tc.args, "num_sessions", 1))
        api.Logger.info("Starting Iperf test from %s num-sessions %d" %
                        (cmd_descr, num_sessions))

        serverCmd = None

        #if tc.iterators.proto == 'udp':
        #port = api.AllocateTcpPort()
        if server_key not in dip_port_cache:
            dip_port_cache[server_key] = True

            serverCmd = iperf.ServerCmd(port)
            tc.serverCmds.append(serverCmd)
            api.Trigger_AddCommand(serverReq,
                                   server.node_name,
                                   server.workload_name,
                                   serverCmd,
                                   background=True,
                                   stdout_on_err=True,
                                   stderr_on_err=True)

        iperfClients.append(
            IperfClient(server.ip_address, port, client.node_name,
                        client.workload_name))

    store = tc.GetBundleStore()
    store["server_req"] = serverReq
    store["client_ctxts"] = iperfClients

    return api.types.status.SUCCESS
Exemple #15
0
def delete_file(tc):
    result = True
    req = api.Trigger_CreateAllParallelCommandsRequest()
    for node in tc.nodes:
        api.Trigger_AddNaplesCommand(req, node, tc.delete_cmd)
    resp = api.Trigger(req)
    for cmd in resp.commands:
        if cmd.exit_code != 0:
            api.PrintCommandResults(cmd)
            result = False
    return result
Exemple #16
0
def trigger_techsupport_requests(tc):
    result = ts_utils.DeleteTSDir(tc.nodes)
    if result != True:
        api.Logger.error("Failed to clean techsupport in naples before request")
        return result

    req = api.Trigger_CreateAllParallelCommandsRequest()
    for node in tc.nodes:
        api.Logger.info(f"Collecting techsupport from {node}")
        api.Trigger_AddNaplesCommand(req, node, TS_SCRIPT, timeout=300)
    tc.resp = api.Trigger(req)
    return True
Exemple #17
0
def pingWorkloads(workload_pairs,
                  af="ipv4",
                  packet_size=64,
                  count=3,
                  interval=0.2,
                  do_pmtu_disc=False,
                  sec_ip_test_type='none'):
    cmd_cookies = []

    if not api.IsSimulation():
        req = api.Trigger_CreateAllParallelCommandsRequest()
    else:
        req = api.Trigger_CreateExecuteCommandsRequest(serial=False)

    for pair in workload_pairs:
        w1 = pair[0]
        w2 = pair[1]

        ping_base_cmd = __get_ping_base_cmd(w1, af, packet_size, count,
                                            interval, do_pmtu_disc)
        if sec_ip_test_type == 'all':
            src_list = [__get_workload_address(w1, af)] + w1.sec_ip_addresses
            dst_list = [__get_workload_address(w2, af)] + w2.sec_ip_addresses
            for src_ip in src_list:
                for dst_ip in dst_list:
                    ping_cmd = __add_source_ip_to_ping_cmd(
                        ping_base_cmd, src_ip)
                    ping_cmd = __ping_addr_substitution(ping_cmd, dst_ip)
                    api.Logger.verbose(" Ping cmd %s " % (ping_cmd))
                    api.Trigger_AddCommand(req, w1.node_name, w1.workload_name,
                                           ping_cmd)
                    cmd_cookies.append(ping_cmd)
        else:
            addr = __get_workload_address(w2, af)
            ping_cmd = __ping_addr_substitution(ping_base_cmd, addr)
            api.Logger.verbose(" Ping cmd %s " % (ping_cmd))
            api.Trigger_AddCommand(req, w1.node_name, w1.workload_name,
                                   ping_cmd)
            cmd_cookies.append(ping_cmd)

            if sec_ip_test_type == 'random' and w1.sec_ip_addresses and w2.sec_ip_addresses:
                ping_cmd = __add_source_ip_to_ping_cmd(
                    ping_base_cmd, random.choice(w1.sec_ip_addresses))
                ping_cmd = __ping_addr_substitution(
                    ping_cmd, random.choice(w2.sec_ip_addresses))
                api.Logger.verbose(" Ping cmd %s " % (ping_cmd))
                api.Trigger_AddCommand(req, w1.node_name, w1.workload_name,
                                       ping_cmd)
                cmd_cookies.append(ping_cmd)

    resp = api.Trigger(req)
    return cmd_cookies, resp
Exemple #18
0
def TriggerHping(workload_pairs,
                 proto,
                 af,
                 pktsize,
                 count=3,
                 tcp_flags=None,
                 rflow=False):
    resp = None
    cmd_cookies = []
    options = ''

    req = api.Trigger_CreateAllParallelCommandsRequest()

    if proto in ['tcp', 'udp']:
        src_port = 1234
        dest_port = 8000
        if proto == 'tcp' and tcp_flags is not None:
            if "fin" in tcp_flags:
                options += ' -F'
    else:
        src_port = 0
        dest_port = 0

    for pair in workload_pairs:
        src_wl = pair[0]
        dst_wl = pair[1]

        dest_ip = dst_wl.ip_address

        cmd = GetHping3Cmd(proto, src_wl, dest_ip, dest_port, src_port, count,
                           options)
        background = False
        if count > 100:
            background = True
        api.Trigger_AddCommand(req,
                               src_wl.node_name,
                               src_wl.workload_name,
                               cmd,
                               background=background)
        cmd_cookies.append(cmd)

        if rflow:
            # If rflow is True, then trigger hping in the reverse direction as well
            cmd = GetHping3Cmd(proto, dst_wl, src_wl.ip_address, src_port,
                               dest_port, count, options)
            api.Trigger_AddCommand(req, dst_wl.node_name, dst_wl.workload_name,
                                   cmd)
            cmd_cookies.append(cmd)

    resp = api.Trigger(req)

    return cmd_cookies, resp
Exemple #19
0
def Trigger(tc):
    if tc.skip: return api.types.status.SUCCESS

    store = tc.GetBundleStore()
    cps = int(getattr(tc.args, "cps", 500))
    attempts = int(getattr(tc.args, "attempts", 3))
    sessions = int(getattr(tc.args, "num_sessions", 1))
    session_time = str(getattr(tc.args, "session_time", "10")) + "s"
    static_arp = int(getattr(tc.args, "static_arp", False))

    if static_arp:
        #install arp entries before running
        arpReqs = store["arp_ctx"]
        api.Trigger(arpReqs)

    #Spread CPS evenly
    clientReq = api.Trigger_CreateAllParallelCommandsRequest()

    cps_per_node = int(cps / len(store["client_ctxts"]))
    for index, (_, client) in enumerate(store["client_ctxts"].items()):
        jsonInput = {"connections": []}
        for serverIPPort in client.GetServers():
            jsonInput["connections"].append({
                "ServerIPPort": serverIPPort,
                "proto": "tcp"
            })
        outfile = api.GetTestDataDirectory(
        ) + "/" + client.workload_name + "_fuz.json"
        with open(outfile, 'w') as fp:
            json.dump(jsonInput, fp)
        api.CopyToWorkload(client.node_name, client.workload_name, [outfile],
                           "")
        clientCmd = fuz_init.FUZ_EXEC[
            client.workload_name] + " -attempts " + str(
                attempts) + " -duration " + session_time + " -conns " + str(
                    sessions) + " -cps " + str(
                        cps_per_node
                    ) + " -talk  --jsonOut --jsonInput " + os.path.basename(
                        outfile)
        api.Trigger_AddCommand(clientReq,
                               client.node_name,
                               client.workload_name,
                               clientCmd,
                               background=True)

    #Initiate connections
    store["client_req"] = api.Trigger(clientReq)

    return api.types.status.SUCCESS
Exemple #20
0
def HitlessAddUpgTestAppToJson(node, naples_json_dir):
    file = 'upgrade_hitless.json'
    tmp_dir = f"/tmp/{node}/"

    # remove already existing one if there are any
    if os.path.exists(f"{tmp_dir}/{file}"):
        os.remove(f"{tmp_dir}/{file}")
    # copy hitless.json from naples
    api.CopyFromNaples(node, [f"{naples_json_dir}/{file}"],
                       f"{tmp_dir}",
                       via_oob=True)

    if api.GlobalOptions.dryrun:
        return api.types.status.SUCCESS

    if not os.path.exists(f"{tmp_dir}/{file}"):
        api.Logger.error(f"Upgrade json for {node} not found @ {file}")
        return api.types.status.FAILURE

    # delete from the host
    req = api.Trigger_CreateAllParallelCommandsRequest()
    api.Trigger_AddHostCommand(req, node, f"rm -f {file}")
    resp = api.Trigger(req)

    file = f"{tmp_dir}/{file}"
    cmd = "cp %s %s.org && " % (file, file)
    # add upgtestapp to the discovery and serial list
    cmd = cmd + '''awk ' BEGIN { found=0;line=0 }
                    /"upg_svc"/ { found=1;line=0 }
                    /.*/ { if (found == 1 && line == 1) print "    \\\"upgtestapp\\\"," }
                    /.*/ { print $0;line=line+1 } ' %s.org > %s && ''' % (file,
                                                                          file)
    cmd = cmd + '''sed -i 's/"svc_sequence" : "\([a-z:].*\)"/"svc_sequence" : "\\1:upgtestapp"/' "%s"''' % (
        file)
    rv = subprocess.call(cmd, shell=True)
    if rv != 0:
        api.Logger.error(f"Upgrade hitless json modify {cmd} failed")
        return api.types.status.FAILURE

    # copy the modified json back
    resp = api.CopyToNaples(node, [file],
                            "",
                            naples_dir=f"{naples_json_dir}",
                            via_oob=True)
    if resp.api_response.api_status != types_pb2.API_STATUS_OK:
        return api.types.status.FAILURE

    return api.types.status.SUCCESS
Exemple #21
0
def addPktFilterRuleOnEp(wl_pairs, proto, enable=True):
    req = api.Trigger_CreateAllParallelCommandsRequest()

    for wl_pair in wl_pairs:
        wl = wl_pair[1]
        api.Trigger_AddCommand(req, wl.node_name, wl.workload_name,
                               f"iptables -{'A' if enable else 'D'} INPUT -p {proto} -j DROP -w")

    resp = api.Trigger(req)
    result = 0
    for cmd in resp.commands:
        if cmd.exit_code != 0:
            api.PrintCommandResults(cmd)
        result |= cmd.exit_code

    return False if result else True
Exemple #22
0
def Trigger(tc):

    store = tc.GetBundleStore()
    clientReq = api.Trigger_CreateAllParallelCommandsRequest()
    num_sessions = int(getattr(tc.args, "num_sessions", 1))
    for client in store["client_ctxts"]:
        clientCmd = iperf.ClientCmd(client.server_ip, client.server_port,
                proto=tc.iterators.proto, jsonOut=True,
                num_of_streams = num_sessions, connect_timeout = 15000)
        api.Trigger_AddCommand(clientReq, client.node_name, client.workload_name,
                               clientCmd, timeout = 0,
                               stdout_on_err = True, stderr_on_err = True)


    tc.iperf_client_resp = api.Trigger(clientReq)

    return api.types.status.SUCCESS
Exemple #23
0
def Trigger(tc):
    if tc.skip: return api.types.status.SUCCESS

    store = tc.GetBundleStore()
    serverReq = store["server_req"]

    if serverReq:
        store["server_resp"] = api.Trigger(serverReq)

    #If we want to start server combined
    server_pcap_info = []
    if store.get("server_ctxts", None):
        serverReq = api.Trigger_CreateAllParallelCommandsRequest()
        for _, server in store["server_ctxts"].items():
            jsonInput = {"connections": []}
            for serverIPPort in server.GetServers():
                jsonInput["connections"].append({
                    "ServerIPPort": serverIPPort,
                    "proto": "tcp"
                })
            outfile = api.GetTestDataDirectory(
            ) + "/" + server.node_name + "_fuz.json"
            with open(outfile, 'w') as fp:
                json.dump(jsonInput, fp)
            api.CopyToHost(server.node_name, [outfile], "")
            serverCmd = fuz_init.FUZ_EXEC[
                server.workload_name] + " --jsonInput " + os.path.basename(
                    outfile) + " --jsonOut"
            api.Trigger_AddHostCommand(serverReq,
                                       server.node_name,
                                       serverCmd,
                                       background=True)

            for intf in server.interfaces:
                server_pcap_info.append(
                    (server.node_name, intf, intf + ".pcap"))

        #Start server
        store["combined_server_resp"] = api.Trigger(serverReq)
        #Start Pcap just before starting client
        store["server_pcap_info"] = server_pcap_info

    #Sleep for some time as bg may not have been started.
    time.sleep(30)

    return api.types.status.SUCCESS
Exemple #24
0
def verify_copy(tc):
    result = True
    req = api.Trigger_CreateAllParallelCommandsRequest()
    for node in tc.nodes:
        api.Trigger_AddNaplesCommand(req, node, tc.verify_cmd)
    resp = api.Trigger(req)
    for cmd in resp.commands:
        api.PrintCommandResults(cmd)
        if cmd.exit_code != 0:
            api.Logger.error("Failed to verify md5sum")
            result = False
        elif tc.src_file_md5sum != __parse_md5sum(cmd):
            if GlobalOptions.dryrun:
                continue
            api.Logger.error("md5sum mismatch")
            result = False
    return result
Exemple #25
0
def __generate_ping_command_cookies(tc):
    req = None
    interval = "0.2"
    if not api.IsSimulation():
        req = api.Trigger_CreateAllParallelCommandsRequest()
    else:
        req = api.Trigger_CreateExecuteCommandsRequest(serial = False)
        interval = "3"
    
    tc.cmd_cookies = []
    for pair in tc.workload_pairs:
        w1 = pair[0]
        w2 = pair[1]
        cmd_cookie = "%s(%s) --> %s(%s)" % (w1.workload_name, w1.ip_address, w2.workload_name, w2.ip_address)
        api.Trigger_AddCommand(req, w1.node_name, w1.workload_name,"ping -i %s -c 20 -s %d %s" % (interval, 128, w2.ip_address))
        api.Logger.info("Ping test from %s" % (cmd_cookie))
        tc.cmd_cookies.append(cmd_cookie)
    return api.Trigger(req)
Exemple #26
0
def SetWorkloadIntfOperState(wl, state):
    if not api.IsSimulation():
        req = api.Trigger_CreateAllParallelCommandsRequest()
    else:
        req = api.Trigger_CreateExecuteCommandsRequest(serial=False)

    api.Trigger_AddCommand(req, wl.node_name, wl.workload_name,
                           "ifconfig %s %s" % (wl.interface, state))
    api.Logger.debug("ifconfig %s %s from %s %s" %
                     (wl.interface, state, wl.node_name, wl.workload_name))

    resp = api.Trigger(req)
    if resp is None:
        return False

    if state is 'up':
        add_routes.AddRoutes(config_api.FindVnicObjectByWorkload(wl))
    return resp.commands[0].exit_code == 0
Exemple #27
0
 def __mk_testcase_directory(self, newdir):
     Logger.debug("Creating Testcase directory: %s" % newdir)
     command = "mkdir -p %s && chmod 777 %s" % (newdir, newdir)
     req = api.Trigger_CreateAllParallelCommandsRequest()
     for nodename in api.GetWorkloadNodeHostnames():
         api.Trigger_AddHostCommand(req, nodename, command)
     for wl in api.GetWorkloads():
         if api.IsWorkloadRunning(wl.workload_name):
             api.Trigger_AddCommand(req,
                                    wl.node_name,
                                    wl.workload_name,
                                    command,
                                    timeout=60)
     resp = api.Trigger(req)
     if not api.Trigger_IsSuccess(resp):
         Logger.error("Failed to create destination directory %s" % newdir)
         return types.status.FAILURE
     return types.status.SUCCESS
Exemple #28
0
def __add_secondary_ip_to_workloads(workloads=[]):
    if not api.IsSimulation():
        req = api.Trigger_CreateAllParallelCommandsRequest()
    else:
        req = api.Trigger_CreateExecuteCommandsRequest(serial = False)

    workloads = workloads if workloads else api.GetWorkloads()
    for wl in workloads:
        for sec_ip_addr in wl.sec_ip_addresses:
            api.Trigger_AddCommand(req, wl.node_name, wl.workload_name,
                                   "ifconfig %s add %s" % (wl.interface, sec_ip_addr))
            api.Logger.debug("ifconfig add from %s %s %s %s" % (wl.node_name, wl.workload_name, wl.interface, sec_ip_addr))

    resp = api.Trigger(req)
    if resp is None:
        return api.types.status.FAILURE

    return api.types.status.SUCCESS
Exemple #29
0
def Trigger(tc):

    if tc.skip:
        return api.types.status.SUCCESS

    req = None
    interval = "0.2"
    if not api.IsSimulation():
        req = api.Trigger_CreateAllParallelCommandsRequest()
    else:
        req = api.Trigger_CreateExecuteCommandsRequest(serial=False)
        interval = "3"
    tc.cmd_cookies = []

    if tc.os == compat.OS_TYPE_LINUX:
        for node in tc.nodes:
            cmd_cookie = "Driver-FW-Version"
            cmd = "/naples/nodeinit.sh --version"
            api.Trigger_AddHostCommand(req, node, cmd)
            tc.cmd_cookies.append(cmd_cookie)

    for pair in tc.workload_pairs:
        w1 = pair[0]
        w2 = pair[1]
        if tc.args.ipaf == 'ipv6':
            cmd_cookie = "%s(%s) --> %s(%s)" %\
                         (w1.workload_name, w1.ipv6_address, w2.workload_name, w2.ipv6_address)
            api.Trigger_AddCommand(
                req, w1.node_name, w1.workload_name,
                "ping6 -i %s -c 20 -s %d %s" %
                (interval, tc.args.pktsize, w2.ipv6_address))
        else:
            cmd_cookie = "%s(%s) --> %s(%s)" %\
                         (w1.workload_name, w1.ip_address, w2.workload_name, w2.ip_address)
            api.Trigger_AddCommand(
                req, w1.node_name, w1.workload_name,
                "ping -i %s -c 20 -s %d %s" %
                (interval, tc.args.pktsize, w2.ip_address))
        api.Logger.info("Ping test from %s" % (cmd_cookie))
        tc.cmd_cookies.append(cmd_cookie)

    tc.resp = api.Trigger(req)

    return api.types.status.SUCCESS
Exemple #30
0
def Trigger(tc):
    if tc.skip: return api.types.status.SUCCESS

    store = tc.GetBundleStore()
    serverResp = store["server_resp"]

    kill_cmd = "pkill -9 iperf3"
    server_nodes = set()
    for cmd in serverResp.commands:
        server_nodes.add(cmd.node_name)

    serverReq = api.Trigger_CreateAllParallelCommandsRequest()
    for server in server_nodes:
        api.Trigger_AddHostCommand(serverReq, server, kill_cmd)
    api.Trigger(serverReq)

    #Still call terminate on all
    api.Trigger_TerminateAllCommands(serverResp)
    return api.types.status.SUCCESS