Exemplo n.º 1
0
def Verify(tc):
    if getattr(tc.args, 'vmotion_enable', False):
        vmotion_utils.PrepareWorkloadRestore(tc)

    if tc.resp == None:
        return api.types.status.SUCCESS

    for cmd in tc.resp.commands:
        api.PrintCommandResults(cmd)

    if not tc.pcap_file_name:
        api.Logger.error("Invalid Pcap file")
        return api.types.status.FAILURE

    ret = api.CopyFromWorkload(tc.client.node_name, tc.client.workload_name,
                               [tc.pcap_file_name], tc.dir_path)
    if not ret:
        return api.types.status.FAILURE

    return __verify_pcap_packet_data_len(f"{tc.dir_path}/{tc.pcap_file_name}",
                                         tc.test_mss)
Exemplo n.º 2
0
def GetTcpdumpData(node):
    resp = api.CopyFromWorkload(node.node_name, node.workload_name,
                                ['out.txt'], dir_path)
    if resp is None:
        return None
Exemplo n.º 3
0
def Trigger(tc):
    if tc.skip: return api.types.status.SUCCESS

    store = tc.GetBundleStore()
    cps = int(getattr(tc.args, "cps", 500))
    pcap_capture = int(getattr(tc.args, "pcap", False))
    sessions = int(getattr(tc.args, "num_sessions", 1))
    session_time = str(getattr(tc.args, "session_time", "10")) + "s"
    static_arp = int(getattr(tc.args, "static_arp", False))

    if static_arp:
        #install arp entries before running
        arpReqs = store["arp_ctx"]
        api.Trigger(arpReqs)

    #Spread CPS evenly
    clientReq = api.Trigger_CreateAllParallelCommandsRequest()

    clientPcapReq = api.Trigger_CreateAllParallelCommandsRequest()
    cps_per_node = int(cps / len(store["client_ctxts"]))
    for index, (_, client) in enumerate(store["client_ctxts"].items()):
        jsonInput = {"connections": []}
        for serverIPPort in client.GetServers():
            jsonInput["connections"].append({
                "ServerIPPort": serverIPPort,
                "proto": "tcp"
            })
        outfile = api.GetTestDataDirectory(
        ) + "/" + client.workload_name + "_fuz.json"
        with open(outfile, 'w') as fp:
            json.dump(jsonInput, fp)
        api.CopyToWorkload(client.node_name, client.workload_name, [outfile],
                           "")
        api.Trigger_AddCommand(clientPcapReq,
                               client.node_name,
                               client.workload_name,
                               "tcpdump -i eth1 -w %s.pcap" %
                               (client.workload_name),
                               background=True)
        clientCmd = fuz_init.FUZ_EXEC[
            client.
            workload_name] + " -attempts 6  -duration " + session_time + " -conns " + str(
                sessions) + " -cps " + str(
                    cps_per_node
                ) + " -talk  --jsonOut --jsonInput " + os.path.basename(
                    outfile)
        api.Trigger_AddCommand(clientReq,
                               client.node_name,
                               client.workload_name,
                               clientCmd,
                               timeout=0)

    if pcap_capture:
        #Start Server Pcap
        serverPcapReq = api.Trigger_CreateAllParallelCommandsRequest()
        for pcap_info in store["server_pcap_info"]:
            node_name, intf, pcap_file = pcap_info[0], pcap_info[1], pcap_info[
                2]
            api.Trigger_AddHostCommand(serverPcapReq,
                                       node_name,
                                       "tcpdump -i %s -w %s" %
                                       (intf, pcap_file),
                                       background=True)

        tc.server_pcap_resp = api.Trigger(serverPcapReq)
        tc.fuz_client_pcap_resp = api.Trigger(clientPcapReq)

    #Initiate connections
    tc.fuz_client_resp = api.Trigger(clientReq)

    if pcap_capture:
        #Stop tcpdump
        api.Trigger_TerminateAllCommands(tc.fuz_client_pcap_resp)
        api.Trigger_TerminateAllCommands(tc.server_pcap_resp)

        for _, client in store["client_ctxts"].items():
            api.CopyFromWorkload(client.node_name, client.workload_name,
                                 [client.workload_name + ".pcap"],
                                 tc.GetLogsDir())

        #Stop server Pcaps too
        #for pcap_info in store["server_pcap_info"]:
        #    node_name, intf, pcap_file = pcap_info[0], pcap_info[1], pcap_info[2]
        #    api.CopyFromHost(node_name, [pcap_file], tc.GetLogsDir())

    return api.types.status.SUCCESS
Exemplo n.º 4
0
def Trigger(tc):
    tc.cmd_cookies = []
    tc.cmd_cookies1 = []
    tc.cmd_cookies2 = []

    nodes = api.GetWorkloadNodeHostnames()
    push_node_0 = [nodes[0]]
    push_node_1 = [nodes[1]]

    encrypt_objects = netagent_cfg_api.QueryConfigs(kind='IPSecSAEncrypt')
    decrypt_objects = netagent_cfg_api.QueryConfigs(kind='IPSecSADecrypt')
    policy_objects = netagent_cfg_api.QueryConfigs(kind='IPSecPolicy')

    # Configure IPsec on Node 1

    if api.IsNaplesNode(nodes[0]):

        if len(encrypt_objects) == 0:

            newObjects = netagent_cfg_api.AddOneConfig(
                api.GetTopologyDirectory() +
                "/ipsec/ipsec_encryption_node1.json")
            if len(newObjects) == 0:
                api.Logger.error(
                    "Adding new objects to store failed for ipsec_encryption_node1.json"
                )
                return api.types.status.FAILURE

            ret = netagent_cfg_api.PushConfigObjects(newObjects,
                                                     node_names=push_node_0,
                                                     ignore_error=True)
            if ret != api.types.status.SUCCESS:
                api.Logger.error(
                    "Unable to push ipsec-encryption objects to node %s" %
                    nodes[0])
                return api.types.status.FAILURE

            get_config_objects = netagent_cfg_api.GetConfigObjects(newObjects)
            if len(get_config_objects) == 0:
                api.Logger.error("Unable to fetch newly pushed objects")
                return api.types.status.FAILURE

        if len(decrypt_objects) == 0:

            newObjects = netagent_cfg_api.AddOneConfig(
                api.GetTopologyDirectory() +
                "/ipsec/ipsec_decryption_node1.json")
            if len(newObjects) == 0:
                api.Logger.error(
                    "Adding new objects to store failed for ipsec_decryption_node1.json"
                )
                return api.types.status.FAILURE

            ret = netagent_cfg_api.PushConfigObjects(newObjects,
                                                     node_names=push_node_0,
                                                     ignore_error=True)
            if ret != api.types.status.SUCCESS:
                api.Logger.error(
                    "Unable to push ipsec-encryption objects to node %s" %
                    nodes[0])
                return api.types.status.FAILURE

            get_config_objects = netagent_cfg_api.GetConfigObjects(newObjects)
            if len(get_config_objects) == 0:
                api.Logger.error("Unable to fetch newly pushed objects")
                return api.types.status.FAILURE

        if len(policy_objects) == 0:

            newObjects = netagent_cfg_api.AddOneConfig(
                api.GetTopologyDirectory() +
                "/ipsec/ipsec_policies_node1.json")
            if len(newObjects) == 0:
                api.Logger.error(
                    "Adding new objects to store failed for ipsec_policies_node1.json"
                )
                return api.types.status.FAILURE

            ret = netagent_cfg_api.PushConfigObjects(newObjects,
                                                     node_names=push_node_0,
                                                     ignore_error=True)
            if ret != api.types.status.SUCCESS:
                api.Logger.error(
                    "Unable to push ipsec-policy objects to node %s" %
                    nodes[0])
                return api.types.status.FAILURE

            get_config_objects = netagent_cfg_api.GetConfigObjects(newObjects)
            if len(get_config_objects) == 0:
                api.Logger.error("Unable to fetch newly pushed objects")
                return api.types.status.FAILURE

    else:
        workloads = api.GetWorkloads(nodes[0])
        w1 = workloads[0]

        req1 = api.Trigger_CreateExecuteCommandsRequest(serial=True)

        api.Trigger_AddCommand(req1, w1.node_name, w1.workload_name,
                               "sudo ip xfrm state flush")

        api.Trigger_AddCommand(req1, w1.node_name, w1.workload_name,
                               "sudo ip xfrm policy flush")

        api.Trigger_AddCommand(
            req1, w1.node_name, w1.workload_name,
            "sudo ip xfrm policy add src 192.168.100.103/32 dst 192.168.100.101/32 proto tcp dport %s dir in tmpl src 192.168.100.103 dst 192.168.100.101 proto esp mode tunnel"
            % (tc.iterators.port))

        api.Trigger_AddCommand(
            req1, w1.node_name, w1.workload_name,
            "sudo ip xfrm policy add src 192.168.100.103/32 dst 192.168.100.101/32 proto tcp dport %s dir fwd tmpl src 192.168.100.103 dst 192.168.100.101 proto esp mode tunnel"
            % (tc.iterators.port))

        api.Trigger_AddCommand(
            req1, w1.node_name, w1.workload_name,
            "sudo ip xfrm policy add src 192.168.100.101/32 dst 192.168.100.103/32 proto tcp sport %s dir out tmpl src 192.168.100.101 dst 192.168.100.103 proto esp mode tunnel"
            % (tc.iterators.port))

        api.Trigger_AddCommand(
            req1, w1.node_name, w1.workload_name,
            "sudo ip xfrm state add src 192.168.100.103 dst 192.168.100.101 proto esp spi 0x01 mode tunnel aead 'rfc4106(gcm(aes))' 0x414141414141414141414141414141414141414141414141414141414141414100000000 128 sel src 192.168.100.103/32 dst 192.168.100.101/32"
        )

        api.Trigger_AddCommand(
            req1, w1.node_name, w1.workload_name,
            "sudo ip xfrm state add src 192.168.100.101 dst 192.168.100.103 proto esp spi 0x01 mode tunnel aead 'rfc4106(gcm(aes))' 0x414141414141414141414141414141414141414141414141414141414141414100000000 128 sel src 192.168.100.101/32 dst 192.168.100.103/32"
        )

        api.Trigger_AddCommand(
            req1, w1.node_name, w1.workload_name,
            "sudo ip xfrm policy add src 192.168.100.103/32 dst 192.168.100.101/32 proto udp dport %s dir in tmpl src 192.168.100.103 dst 192.168.100.101 proto esp mode tunnel"
            % (tc.iterators.port))

        api.Trigger_AddCommand(
            req1, w1.node_name, w1.workload_name,
            "sudo ip xfrm policy add src 192.168.100.103/32 dst 192.168.100.101/32 proto udp dport %s dir fwd tmpl src 192.168.100.103 dst 192.168.100.101 proto esp mode tunnel"
            % (tc.iterators.port))

        api.Trigger_AddCommand(
            req1, w1.node_name, w1.workload_name,
            "sudo ip xfrm policy add src 192.168.100.101/32 dst 192.168.100.103/32 proto udp sport %s dir out tmpl src 192.168.100.101 dst 192.168.100.103 proto esp mode tunnel"
            % (tc.iterators.port))

        api.Trigger_AddCommand(req1, w1.node_name, w1.workload_name,
                               "sudo ip xfrm state list")

        api.Trigger_AddCommand(req1, w1.node_name, w1.workload_name,
                               "sudo ip xfrm policy list")

        trig_resp1 = api.Trigger(req1)
        term_resp1 = api.Trigger_TerminateAllCommands(trig_resp1)

    # Configure IPsec on Node 2

    if api.IsNaplesNode(nodes[1]):

        if len(encrypt_objects) == 0:

            newObjects = netagent_cfg_api.AddOneConfig(
                api.GetTopologyDirectory() +
                "/ipsec/ipsec_encryption_node2.json")
            if len(newObjects) == 0:
                api.Logger.error(
                    "Adding new objects to store failed for ipsec_encryption_node2.json"
                )
                return api.types.status.FAILURE

            ret = netagent_cfg_api.PushConfigObjects(newObjects,
                                                     node_names=push_node_1,
                                                     ignore_error=True)
            if ret != api.types.status.SUCCESS:
                api.Logger.error(
                    "Unable to push ipsec-encryption objects to node %s" %
                    nodes[1])
                return api.types.status.FAILURE

            get_config_objects = netagent_cfg_api.GetConfigObjects(newObjects)
            if len(get_config_objects) == 0:
                api.Logger.error("Unable to fetch newly pushed objects")
                #return api.types.status.FAILURE

        if len(decrypt_objects) == 0:

            newObjects = netagent_cfg_api.AddOneConfig(
                api.GetTopologyDirectory() +
                "/ipsec/ipsec_decryption_node2.json")
            if len(newObjects) == 0:
                api.Logger.error(
                    "Adding new objects to store failed for ipsec_decryption_node2.json"
                )
                return api.types.status.FAILURE

            ret = netagent_cfg_api.PushConfigObjects(newObjects,
                                                     node_names=push_node_1,
                                                     ignore_error=True)
            if ret != api.types.status.SUCCESS:
                api.Logger.error(
                    "Unable to push ipsec-encryption objects to node %s" %
                    nodes[1])
                return api.types.status.FAILURE

            get_config_objects = netagent_cfg_api.GetConfigObjects(newObjects)
            if len(get_config_objects) == 0:
                api.Logger.error("Unable to fetch newly pushed objects")
                #return api.types.status.FAILURE

        if len(policy_objects) == 0:

            newObjects = netagent_cfg_api.AddOneConfig(
                api.GetTopologyDirectory() +
                "/ipsec/ipsec_policies_node2.json")
            if len(newObjects) == 0:
                api.Logger.error(
                    "Adding new objects to store failed for ipsec_policies_node2.json"
                )
                return api.types.status.FAILURE

            ret = netagent_cfg_api.PushConfigObjects(newObjects,
                                                     node_names=push_node_1,
                                                     ignore_error=True)
            if ret != api.types.status.SUCCESS:
                api.Logger.error(
                    "Unable to push ipsec-encryption objects to node %s" %
                    nodes[1])
                return api.types.status.FAILURE

            get_config_objects = netagent_cfg_api.GetConfigObjects(newObjects)
            if len(get_config_objects) == 0:
                api.Logger.error("Unable to fetch newly pushed objects")
                #return api.types.status.FAILURE

    else:
        workloads = api.GetWorkloads(nodes[1])
        w2 = workloads[0]

        req2 = api.Trigger_CreateExecuteCommandsRequest(serial=True)

        api.Trigger_AddCommand(req2, w2.node_name, w2.workload_name,
                               "sudo ip xfrm state flush")

        api.Trigger_AddCommand(req2, w2.node_name, w2.workload_name,
                               "sudo ip xfrm policy flush")

        api.Trigger_AddCommand(
            req2, w2.node_name, w2.workload_name,
            "sudo ip xfrm policy add src 192.168.100.101/32 dst 192.168.100.103/32 proto tcp dport %s dir in tmpl src 192.168.100.101 dst 192.168.100.103 proto esp mode tunnel"
            % (tc.iterators.port))

        api.Trigger_AddCommand(
            req2, w2.node_name, w2.workload_name,
            "sudo ip xfrm policy add src 192.168.100.101/32 dst 192.168.100.103/32 proto tcp dport %s dir fwd tmpl src 192.168.100.101 dst 192.168.100.103 proto esp mode tunnel"
            % (tc.iterators.port))

        api.Trigger_AddCommand(
            req2, w2.node_name, w2.workload_name,
            "sudo ip xfrm policy add src 192.168.100.103/32 dst 192.168.100.101/32 proto tcp sport %s dir out tmpl src 192.168.100.103 dst 192.168.100.101 proto esp mode tunnel"
            % (tc.iterators.port))

        api.Trigger_AddCommand(
            req2, w2.node_name, w2.workload_name,
            "sudo ip xfrm state add src 192.168.100.101 dst 192.168.100.103 proto esp spi 0x01 mode tunnel aead 'rfc4106(gcm(aes))' 0x414141414141414141414141414141414141414141414141414141414141414100000000 128 sel src 192.168.100.101/32 dst 192.168.100.103/32"
        )

        api.Trigger_AddCommand(
            req2, w2.node_name, w2.workload_name,
            "sudo ip xfrm state add src 192.168.100.103 dst 192.168.100.101 proto esp spi 0x01 mode tunnel aead 'rfc4106(gcm(aes))' 0x414141414141414141414141414141414141414141414141414141414141414100000000 128 sel src 192.168.100.103/32 dst 192.168.100.101/32"
        )

        api.Trigger_AddCommand(
            req2, w2.node_name, w2.workload_name,
            "sudo ip xfrm policy add src 192.168.100.101/32 dst 192.168.100.103/32 proto udp dport %s dir in tmpl src 192.168.100.101 dst 192.168.100.103 proto esp mode tunnel"
            % (tc.iterators.port))

        api.Trigger_AddCommand(
            req2, w2.node_name, w2.workload_name,
            "sudo ip xfrm policy add src 192.168.100.101/32 dst 192.168.100.103/32 proto udp dport %s dir fwd tmpl src 192.168.100.101 dst 192.168.100.103 proto esp mode tunnel"
            % (tc.iterators.port))

        api.Trigger_AddCommand(
            req2, w2.node_name, w2.workload_name,
            "sudo ip xfrm policy add src 192.168.100.103/32 dst 192.168.100.101/32 proto udp sport %s dir out tmpl src 192.168.100.103 dst 192.168.100.101 proto esp mode tunnel"
            % (tc.iterators.port))

        api.Trigger_AddCommand(req2, w2.node_name, w2.workload_name,
                               "sudo ip xfrm state list")

        api.Trigger_AddCommand(req2, w2.node_name, w2.workload_name,
                               "sudo ip xfrm policy list")

        trig_resp2 = api.Trigger(req2)
        term_resp2 = api.Trigger_TerminateAllCommands(trig_resp2)

    workloads = api.GetWorkloads(nodes[0])
    w1 = workloads[0]
    workloads = api.GetWorkloads(nodes[1])
    w2 = workloads[0]
    bypass_test = 0

    if w1.IsNaples() and w2.IsNaples():
        api.Logger.info(
            "Both workloads are Naples, %s is nc client, %s is nc server, bypassing test"
            % (w1.node_name, w2.node_name))
        nc_client_wl = w1
        nc_server_wl = w2
        bypass_test = 1
    elif w1.IsNaples():
        api.Logger.info("%s is Naples and nc client, %s is nc server" %
                        (w1.node_name, w2.node_name))
        nc_client_wl = w1
        nc_server_wl = w2
    elif w2.IsNaples():
        api.Logger.info("%s is Naples and nc client, %s is nc server" %
                        (w2.node_name, w1.node_name))
        nc_client_wl = w2
        nc_server_wl = w1

    req = api.Trigger_CreateExecuteCommandsRequest(serial=True)

    tc.cmd_descr = "Server: %s(%s) <--> Client: %s(%s) on %s port %s" %\
                   (nc_server_wl.workload_name, nc_server_wl.ip_address, nc_client_wl.workload_name, nc_client_wl.ip_address, tc.iterators.protocol, tc.iterators.port)

    api.Logger.info("Starting NC test over IPSec from %s" % (tc.cmd_descr))

    if bypass_test == 0:
        cmd_cookie = "Creating test file on %s" % (nc_client_wl.workload_name)
        api.Trigger_AddCommand(
            req, nc_client_wl.node_name, nc_client_wl.workload_name,
            "base64 /dev/urandom | head -1000 > ipsec_client.dat")
        tc.cmd_cookies.append(cmd_cookie)

        cmd_cookie = "Setting MTU to smaller value on %s" % (
            nc_client_wl.workload_name)
        api.Trigger_AddCommand(req, nc_client_wl.node_name,
                               nc_client_wl.workload_name,
                               "ifconfig %s mtu 1048" % nc_client_wl.interface)
        tc.cmd_cookies.append(cmd_cookie)

        cmd_cookie = "Running nc server on %s" % (nc_server_wl.workload_name)
        if tc.iterators.protocol == "tcp":
            api.Trigger_AddCommand(req,
                                   nc_server_wl.node_name,
                                   nc_server_wl.workload_name,
                                   "nc -l %s > ipsec_server.dat" %
                                   (tc.iterators.port),
                                   background=True)
        else:
            api.Trigger_AddCommand(req,
                                   nc_server_wl.node_name,
                                   nc_server_wl.workload_name,
                                   "nc --udp -l %s > ipsec_server.dat" %
                                   (tc.iterators.port),
                                   background=True)
        tc.cmd_cookies.append(cmd_cookie)

        cmd_cookie = "Running nc client on %s" % (nc_client_wl.workload_name)
        if tc.iterators.protocol == "tcp":
            api.Trigger_AddCommand(
                req, nc_client_wl.node_name, nc_client_wl.workload_name,
                "nc %s %s < ipsec_client.dat" %
                (nc_server_wl.ip_address, tc.iterators.port))
        else:
            api.Trigger_AddCommand(
                req, nc_client_wl.node_name, nc_client_wl.workload_name,
                "nc --udp %s %s < ipsec_client.dat" %
                (nc_server_wl.ip_address, tc.iterators.port))
        tc.cmd_cookies.append(cmd_cookie)
    else:
        cmd_cookie = "Creating dummy file on %s" % (nc_client_wl.workload_name)
        api.Trigger_AddCommand(
            req, nc_client_wl.node_name, nc_client_wl.workload_name,
            "rm -f ipsec_client.dat ; touch ipsec_client.dat")
        tc.cmd_cookies.append(cmd_cookie)

        cmd_cookie = "Creating dummy file on %s" % (nc_server_wl.workload_name)
        api.Trigger_AddCommand(
            req, nc_server_wl.node_name, nc_server_wl.workload_name,
            "rm -f ipsec_server.dat ; touch ipsec_server.dat")
        tc.cmd_cookies.append(cmd_cookie)

    if nc_client_wl.IsNaples():
        cmd_cookie = "IPSec state on %s AFTER running nc test" % (
            nc_client_wl.node_name)
        api.Trigger_AddNaplesCommand(
            req, nc_client_wl.node_name,
            "/nic/bin/halctl show ipsec-global-stats")
        tc.cmd_cookies.append(cmd_cookie)
    else:
        cmd_cookie = "IPSec state on %s AFTER running nc test" % (
            nc_client_wl.node_name)
        api.Trigger_AddCommand(req, nc_client_wl.node_name,
                               nc_client_wl.workload_name,
                               "sudo ip xfrm policy show")
        tc.cmd_cookies.append(cmd_cookie)

    if nc_server_wl.IsNaples():
        cmd_cookie = "IPSec state on %s AFTER running nc test" % (
            nc_server_wl.node_name)
        api.Trigger_AddNaplesCommand(
            req, nc_server_wl.node_name,
            "/nic/bin/halctl show ipsec-global-stats")
        tc.cmd_cookies.append(cmd_cookie)
    else:
        cmd_cookie = "IPSec state on %s AFTER running nc test" % (
            nc_server_wl.node_name)
        api.Trigger_AddCommand(req, nc_server_wl.node_name,
                               nc_server_wl.workload_name,
                               "sudo ip xfrm policy show")
        tc.cmd_cookies.append(cmd_cookie)

    trig_resp = api.Trigger(req)
    term_resp = api.Trigger_TerminateAllCommands(trig_resp)

    tc.resp = api.Trigger_AggregateCommandsResponse(trig_resp, term_resp)
    resp = api.CopyFromWorkload(nc_client_wl.node_name,
                                nc_client_wl.workload_name,
                                ['ipsec_client.dat'], tc.GetLogsDir())
    if resp is None:
        api.Logger.error("Could not find ipsec_client.dat")
        return api.types.status.FAILURE
    resp = api.CopyFromWorkload(nc_server_wl.node_name,
                                nc_server_wl.workload_name,
                                ['ipsec_server.dat'], tc.GetLogsDir())
    if resp is None:
        api.Logger.error("Could not find ipsec_server.dat")
        return api.types.status.FAILURE
    return api.types.status.SUCCESS
Exemplo n.º 5
0
def RunCmd(src_wl, protocol, dest_wl, destination_ip, destination_port, collector_info, feature, is_wl_type_bm=False,span_id=1):
    result = api.types.status.SUCCESS
    backgroun_req = api.Trigger_CreateExecuteCommandsRequest(serial = False)
    req = api.Trigger_CreateExecuteCommandsRequest(serial = False)
    for col in collector_info:
        coll_wl   = col['workload']
        coll_ip   = col['cfg']

        # Add the ping commands from collector to source and dest workload
        # to avoid flooding on the vswitch
        api.Trigger_AddCommand(req, coll_wl.node_name, coll_wl.workload_name,
                               "ping -c 1 %s " % (src_wl.ip_address), timeout=2)
        api.Trigger_AddCommand(req, coll_wl.node_name, coll_wl.workload_name,
                               "ping -c 1 %s " % (destination_ip), timeout=2)

        if feature == 'mirror':
            api.Trigger_AddCommand(backgroun_req, coll_wl.node_name, coll_wl.workload_name,
                                   "tcpdump -c 10 -nnSXi %s ip proto gre and dst %s -U -w mirror-%s.pcap" %
                                   (coll_wl.interface, coll_ip, coll_ip), background=True, timeout=20)
        elif feature == 'flowmon':
            api.Trigger_AddCommand(backgroun_req, coll_wl.node_name, coll_wl.workload_name,
                                   "tcpdump -c 100 -nni %s udp and dst port %s and dst host -w flowmon-%s.pcap %s"%
                                   (coll_wl.interface, coll_ip.proto_port.port, coll_ip.destination, coll_ip.destination),
                                   background=True, timeout=20)

    trig_resp = api.Trigger(req)
    background_trig_resp = api.Trigger(backgroun_req)

    #delay for background cmds to start before issuing ping
    if feature == 'flowmon':
        time.sleep(2)
    if feature == 'mirror':
        time.sleep(2)

    # Workaround for hping issue on BM workload over VLAN tagged sub-if,
    # when there are more than 16 interfaces in the system.
    # hping sends local host addr as source IP for hping on tagged sub-if;
    # hence using ping instead of hping on BM.
    if is_wl_type_bm:
        cmd = GetNpingCmd(protocol, destination_ip, destination_port)
    else:
        cmd = GetHping3Cmd(protocol, src_wl, destination_ip, destination_port)

    req = api.Trigger_CreateExecuteCommandsRequest(serial = True)
    api.Trigger_AddCommand(req, src_wl.node_name, src_wl.workload_name, cmd, timeout=10)
    api.Logger.info("Running from src_wl_ip {} COMMAND {}".format(src_wl.ip_address, cmd))

    # retry traffic in case of any send failures
    for retry in range(4):
        retrigger = False
        api.Logger.info("Running from src_wl_ip {} COMMAND {}".format(src_wl.ip_address, cmd))
        trig_resp = api.Trigger(req)
        api.Logger.info("Trigger resp commands")
        for cmd in trig_resp.commands:
            api.PrintCommandResults(cmd)
            if cmd.exit_code != 0:
                retrigger = True
                api.Logger.info("Traffic failed...Retry (%s)..."%retry)
        if not retrigger:
            break

    if feature == 'flowmon':
        time.sleep(2)
    if feature == 'mirror':
        time.sleep(2)

    # Verify packets received on collectors.
    term_resp = api.Trigger_TerminateAllCommands(background_trig_resp)
    background_resp = api.Trigger_AggregateCommandsResponse(background_trig_resp, term_resp)
    dir_path = os.path.dirname(os.path.realpath(__file__))
    for coll_idx, col in enumerate(collector_info):
        coll_wl   = col['workload']
        coll_ip   = col['cfg']
        coll_type = col.get('type', None)

        pcap_file_name = None
        proto_port = None
        cmd_resp_idx = coll_idx
        if feature == 'mirror':
            pcap_file_name = ('mirror-%s.pcap'%coll_ip)
            api.CopyFromWorkload(coll_wl.node_name, coll_wl.workload_name, [pcap_file_name], dir_path)
        elif feature == 'flowmon':
            pcap_file_name = ('flowmon-%s.pcap'%coll_ip.destination)
            api.CopyFromWorkload(coll_wl.node_name, coll_wl.workload_name, [pcap_file_name], dir_path)
            proto_port = coll_ip.proto_port.port

        cmd = background_resp.commands[cmd_resp_idx]
        result = VerifyCmd(cmd, feature, pcap_file_name, proto_port, erspan_type=coll_type, span_id=span_id)
        if (result == api.types.status.FAILURE):
            api.Logger.info("Testcase FAILED!! cmd: {}".format(cmd))
            break

    return result
Exemplo n.º 6
0
def Trigger(tc):
    naples_list = []
    tc.cmd_cookies = []
    tc.fin_fail = 0

    req = api.Trigger_CreateExecuteCommandsRequest(serial=True)

    idx = 0
    for pairs in tc.workload_pairs:
        server = pairs[0]
        client = pairs[1]

        naples = server
        if not server.IsNaples():
            naples = client
            if not client.IsNaples():
                continue

        found = False
        for info in naples_list:
            if info[0] == naples.node_name:
                found = True
        if found == False:
            naples_list.append((naples.node_name, pairs))

        tc.cmd_descr = "Server: %s(%s) <--> Client: %s(%s)" %\
                      (server.workload_name, server.ip_address, client.workload_name, client.ip_address)
        api.Logger.info("Starting Upgrade test from %s" % (tc.cmd_descr))

        #Step 1: Start TCPDUMP
        api.Trigger_AddCommand(req,
                               client.node_name,
                               client.workload_name,
                               "tcpdump -i {} > out.txt".format(
                                   client.interface),
                               background=True)
        tc.cmd_cookies.append("tcpdump on client")

        api.Trigger_AddCommand(req,
                               server.node_name,
                               server.workload_name,
                               "tcpdump -i {} > out.txt".format(
                                   server.interface),
                               background=True)
        tc.cmd_cookies.append("tcpdump on server")

        #Step 1: Start TCP Server
        server_port = api.AllocateTcpPort()
        api.Trigger_AddCommand(req,
                               server.node_name,
                               server.workload_name,
                               "nc -l %s" % (server_port),
                               background=True)
        tc.cmd_cookies.append("start server")

        #Step 2: Start TCP Client
        client_port = api.AllocateTcpPort()
        api.Trigger_AddCommand(req,
                               client.node_name,
                               client.workload_name,
                               "nc {} {} -p {}".format(server.ip_address,
                                                       server_port,
                                                       client_port),
                               background=True)
        tc.cmd_cookies.append("start client")

        api.Trigger_AddCommand(req,
                               server.node_name,
                               server.workload_name,
                               "ping {}".format(server.ip_address),
                               background=True)
        tc.cmd_cookies.append("Start ping")
        idx = idx + 1

    for node in naples_list:
        api.Trigger_AddNaplesCommand(req, node[0],
                                     "/nic/bin/halctl show session")
        tc.cmd_cookies.append("show session")
        api.Trigger_AddNaplesCommand(req, node[0],
                                     "/nic/bin/halctl debug test send-fin")
        tc.cmd_cookies.append("Send fin")
        api.Trigger_AddNaplesCommand(req, node[0],
                                     "/nic/bin/halctl show session --yaml")
        tc.cmd_cookies.append("show session after delete")

    trig_resp = api.Trigger(req)
    term_resp = api.Trigger_TerminateAllCommands(trig_resp)
    tc.resp = api.Trigger_AggregateCommandsResponse(trig_resp, term_resp)

    for node in naples_list:
        found = False
        api.CopyFromWorkload(node[1][0].node_name, node[1][0].workload_name,
                             ['out.txt'], dir_path)
        api.Logger.info("Copy from {} {}".format(node[0], node[1][0]))
        tcpout = dir_path + '/out.txt'
        for line in open(tcpout, 'r'):
            if re.search("\[F\.\]", line):
                found = True
                break
        if node[0] == node[1][0].node_name and found == False:
            tc.fin_fail = 1

        os.remove(tcpout)
        found = False
        api.CopyFromWorkload(node[1][1].node_name, node[1][1].workload_name,
                             ['out.txt'], dir_path)
        api.Logger.info("Copy from {} {}".format(node[0], node[1][0]))
        tcpout = dir_path + '/out.txt'
        for line in open(tcpout, 'r'):
            if re.search("\[F\.\]", line):
                found = True
        if node[0] == node[1][1].node_name and found == False:
            tc.fin_fail = 1
        os.remove(tcpout)

    return api.types.status.SUCCESS
Exemplo n.º 7
0
def Trigger(tc):
    tc.cmd_cookies = []

    pairs = api.GetRemoteWorkloadPairs()
    w1 = pairs[0][0]
    w2 = pairs[0][1]

    if w1.IsNaples() and w2.IsNaples():
        api.Logger.info("naples-naples unsupported currently for tcp-proxy")
        return api.types.status.DISABLED

    store_proxy_objects = netagent_cfg_api.QueryConfigs(kind='TCPProxyPolicy')
    if len(store_proxy_objects) == 0:
        api.Logger.error("No tcp proxy objects in store")
        return api.types.status.FAILURE

    ret = netagent_cfg_api.PushConfigObjects(store_proxy_objects,
                                             ignore_error=True)
    if ret != api.types.status.SUCCESS:
        api.Logger.error("Unable to push tcp_proxy policies")
        return api.types.status.FAILURE

    get_config_objects = netagent_cfg_api.GetConfigObjects(store_proxy_objects)
    if len(get_config_objects) == 0:
        api.Logger.error("Unable to fetch newly pushed objects")
        return api.types.status.FAILURE

    req = api.Trigger_CreateExecuteCommandsRequest(serial=True)

    if w2.IsNaples():
        cmd_cookie = "tcp proxy sessions on %s BEFORE running nc" % (
            w2.node_name)
        api.Trigger_AddNaplesCommand(req, w2.node_name,
                                     "/nic/bin/halctl show tcp-proxy session")
        tc.cmd_cookies.append(cmd_cookie)

    if w1.IsNaples():
        cmd_cookie = "tcp proxy sessions on %s BEFORE running nc" % (
            w1.node_name)
        api.Trigger_AddNaplesCommand(req, w1.node_name,
                                     "/nic/bin/halctl show tcp-proxy session")
        tc.cmd_cookies.append(cmd_cookie)

    tc.cmd_descr = "Client: %s(%s) <--> Server: %s(%s) on tcp proxy port %s" %\
                   (w1.workload_name, w1.ip_address, w2.workload_name, w2.ip_address, tc.iterators.port)

    api.Logger.info("Starting netcat test from %s" % (tc.cmd_descr))

    cmd_cookie = "Creating test file on %s" % (w1.workload_name)
    api.Trigger_AddCommand(
        req, w1.node_name, w1.workload_name,
        "base64 /dev/urandom | head -1000 > tcp_proxy_client.dat")
    tc.cmd_cookies.append(cmd_cookie)

    cmd_cookie = "Running nc server on %s" % (w2.workload_name)
    api.Trigger_AddCommand(req,
                           w2.node_name,
                           w2.workload_name,
                           "nc -l %s > tcp_proxy_server.dat" %
                           (tc.iterators.port),
                           background=True)
    tc.cmd_cookies.append(cmd_cookie)

    cmd_cookie = "Running nc client on %s" % (w1.workload_name)
    api.Trigger_AddCommand(
        req, w1.node_name, w1.workload_name,
        "nc %s %s < tcp_proxy_client.dat" % (w2.ip_address, tc.iterators.port))
    tc.cmd_cookies.append(cmd_cookie)

    if w1.IsNaples():
        cmd_cookie = "tcp proxy sessions on %s AFTER running nc" % (
            w1.node_name)
        api.Trigger_AddNaplesCommand(req, w1.node_name,
                                     "/nic/bin/halctl show tcp-proxy session")
        tc.cmd_cookies.append(cmd_cookie)

    if w2.IsNaples():
        cmd_cookie = "tcp proxy sessions on %s AFTER running nc" % (
            w2.node_name)
        api.Trigger_AddNaplesCommand(req, w2.node_name,
                                     "/nic/bin/halctl show tcp-proxy session")
        tc.cmd_cookies.append(cmd_cookie)

    trig_resp = api.Trigger(req)
    term_resp = api.Trigger_TerminateAllCommands(trig_resp)

    tc.resp = api.Trigger_AggregateCommandsResponse(trig_resp, term_resp)

    resp = api.CopyFromWorkload(w1.node_name, w1.workload_name,
                                ['tcp_proxy_client.dat'], tc.GetLogsDir())
    if resp is None:
        api.Logger.error("Could not find tcp_proxy_client.dat")
        return api.types.status.FAILURE
    resp = api.CopyFromWorkload(w2.node_name, w2.workload_name,
                                ['tcp_proxy_server.dat'], tc.GetLogsDir())
    if resp is None:
        api.Logger.error("Could not find tcp_proxy_server.dat")
        return api.types.status.FAILURE
    return api.types.status.SUCCESS