Пример #1
0
def start_fuz(tc):
    ret = copy_fuz(tc)
    if ret != api.types.status.SUCCESS:
        return api.types.status.FAILURE

    tc.serverCmds = []
    tc.clientCmds = []
    tc.cmd_descr = []

    serverReq = None
    clientReq = None

    serverReq = api.Trigger_CreateExecuteCommandsRequest(serial = False)
    clientReq = api.Trigger_CreateExecuteCommandsRequest(serial = False)

    # ping test above sets the workload pairs to remote only
    # setting wl_pairs as per arg selected in testbundle 
    workload_pairs = []
 
    if tc.args.type == 'local_only':
        api.Logger.info("local_only test")
        workload_pairs = api.GetLocalWorkloadPairs()
    elif tc.args.type == 'both':
        api.Logger.info(" both local and remote test")
        workload_pairs = api.GetLocalWorkloadPairs()
        workload_pairs.extend(api.GetRemoteWorkloadPairs())
    else:
        api.Logger.info("remote_only test")
        workload_pairs = api.GetRemoteWorkloadPairs()

    wl_under_move = []
    for wl_info in tc.move_info:
        wl_under_move.append(wl_info.wl)
    '''
    tc.workload_pairs updated in ping test above
    resetting that to limit fuz tests to vm under move
    ''' 
    tc.workload_pairs = [] 
    for pairs in workload_pairs:
        if pairs[0] in wl_under_move or pairs[1] in wl_under_move:
            api.Logger.info("Adding %s and %s for fuz test" %(pairs[0].workload_name, pairs[1].workload_name))
            tc.workload_pairs.append(pairs)

    for idx, pairs in enumerate(tc.workload_pairs):
        client = pairs[0]
        server = pairs[1]
        cmd_descr = "Server: %s(%s) <--> Client: %s(%s)" %\
                       (server.workload_name, server.ip_address, client.workload_name, client.ip_address)
        tc.cmd_descr.append(cmd_descr)
        num_sessions = int(getattr(tc.args, "num_sessions", 1))

        serverCmd = None
        clientCmd = None
        port = api.AllocateTcpPort()

        api.Logger.info("Starting Fuz test from %s num-sessions %d Port %d" % (cmd_descr, num_sessions, port))

        serverCmd = tc.fuz_exec[server.workload_name]  + " -port " + str(port)
        clientCmd = tc.fuz_exec[client.workload_name]  + " -conns " + str(num_sessions) + " -duration " + str(__fuz_run_time) + " -attempts 1 -read-timeout 20 -talk " + server.ip_address + ":" + str(port)

        tc.serverCmds.append(serverCmd)
        tc.clientCmds.append(clientCmd)

        api.Trigger_AddCommand(serverReq, server.node_name, server.workload_name,
                               serverCmd, background = True)

        api.Trigger_AddCommand(clientReq, client.node_name, client.workload_name,
                               clientCmd, background = True)


    tc.server_resp = api.Trigger(serverReq)
    #Sleep for some time as bg may not have been started.
    time.sleep(5)
    tc.fuz_client_resp = api.Trigger(clientReq)
    return api.types.status.SUCCESS
Пример #2
0
def CheckRolloutStatus(tc):
    retries = 0
    start_ts = datetime.now()
    result = api.types.status.FAILURE
    status_found = False
    while retries < 100:
        api.Logger.info("------Issuing Rollout get %s retry------"%retries)
        misc_utils.Sleep(2)
        retries += 1
        # get rollout status
        req = api.Trigger_CreateExecuteCommandsRequest(serial=False)
        for n in tc.Nodes:
            cmd = 'curl -k https://' + api.GetNicIntMgmtIP(n) + ':'+utils.GetNaplesMgmtPort()+'/api/v1/naples/rollout/'
            api.Trigger_AddHostCommand(req, n, cmd)
            api.Logger.info("Sending rollout status get request: %s"%(cmd))
        tc.resp = api.Trigger(req)

        try:
            for cmd in tc.resp.commands:
                api.PrintCommandResults(cmd)
        except Exception as e:
            api.Logger.error(f"Exception occured in sending rollout status get.{e}")
            continue

        for cmd in tc.resp.commands:
            if cmd.exit_code != 0:
                api.Logger.info("Rollout status get request returned failure")
                continue
            resp = json.loads(cmd.stdout)
            try:
                for item in resp['Status']['status']:
                    status_found = True
                    if not item['Op'] == 4:
                        api.Logger.info("opcode is bad for %s"%cmd.node_name)
                        result = api.types.status.FAILURE
                    if "fail" in tc.iterators.option:
                        if not item['opstatus'] == 'failure':
                            api.Logger.info("opstatus is bad for %s"%cmd.node_name)
                            result = api.types.status.FAILURE
                        if tc.iterators.option not in item['Message']:
                            api.Logger.info("message is bad")
                            result = api.types.status.FAILURE
                    else:
                        if not item['opstatus'] == 'success':
                            api.Logger.info("opstatus(%s) is bad for %s"%(item['opstatus'], cmd.node_name))
                            result = api.types.status.FAILURE
                        else:
                            api.Logger.info("Rollout status is SUCCESS for %s"%cmd.node_name)
                            result = api.types.status.SUCCESS
            except Exception as e:
                api.Logger.error("resp: ", json.dumps(resp, indent=1))
                #api.Logger.error(f"Exception occured in parsing response: {e}")
                result = api.types.status.FAILURE
                continue

        if status_found:
            break

    end_ts = datetime.now()
    # find time elapsed in retrieving rollout status and adjust the wait time for traffic test.
    timedelta = end_ts - start_ts
    time_elapsed = timedelta.days * 24 * 3600 + timedelta.seconds

    if time_elapsed < 100:
        time_elapsed = 100
        misc_utils.Sleep(time_elapsed)

    tc.sleep = (tc.sleep - time_elapsed) if (tc.sleep > time_elapsed) else 10
    return result
Пример #3
0
def Trigger(tc):
    tc.cmd_cookies = []
    tc.cmd_cookies1 = []
    tc.cmd_cookies2 = []

    nodes = api.GetWorkloadNodeHostnames()
    push_node_0 = [nodes[0]]
    push_node_1 = [nodes[1]]

    encrypt_objects = netagent_cfg_api.QueryConfigs(kind='IPSecSAEncrypt')
    decrypt_objects = netagent_cfg_api.QueryConfigs(kind='IPSecSADecrypt')
    policy_objects = netagent_cfg_api.QueryConfigs(kind='IPSecPolicy')

    # Configure IPsec on Node 1

    if api.IsNaplesNode(nodes[0]):

        if len(encrypt_objects) == 0:

            newObjects = netagent_cfg_api.AddOneConfig(
                api.GetTopologyDirectory() +
                "/ipsec/ipsec_encryption_node1.json")
            if len(newObjects) == 0:
                api.Logger.error(
                    "Adding new objects to store failed for ipsec_encryption_node1.json"
                )
                return api.types.status.FAILURE

            ret = netagent_cfg_api.PushConfigObjects(newObjects,
                                                     node_names=push_node_0,
                                                     ignore_error=True)
            if ret != api.types.status.SUCCESS:
                api.Logger.error(
                    "Unable to push ipsec-encryption objects to node %s" %
                    nodes[0])
                return api.types.status.FAILURE

            get_config_objects = netagent_cfg_api.GetConfigObjects(newObjects)
            if len(get_config_objects) == 0:
                api.Logger.error("Unable to fetch newly pushed objects")
                return api.types.status.FAILURE

        if len(decrypt_objects) == 0:

            newObjects = netagent_cfg_api.AddOneConfig(
                api.GetTopologyDirectory() +
                "/ipsec/ipsec_decryption_node1.json")
            if len(newObjects) == 0:
                api.Logger.error(
                    "Adding new objects to store failed for ipsec_decryption_node1.json"
                )
                return api.types.status.FAILURE

            ret = netagent_cfg_api.PushConfigObjects(newObjects,
                                                     node_names=push_node_0,
                                                     ignore_error=True)
            if ret != api.types.status.SUCCESS:
                api.Logger.error(
                    "Unable to push ipsec-encryption objects to node %s" %
                    nodes[0])
                return api.types.status.FAILURE

            get_config_objects = netagent_cfg_api.GetConfigObjects(newObjects)
            if len(get_config_objects) == 0:
                api.Logger.error("Unable to fetch newly pushed objects")
                return api.types.status.FAILURE

        if len(policy_objects) == 0:

            newObjects = netagent_cfg_api.AddOneConfig(
                api.GetTopologyDirectory() +
                "/ipsec/ipsec_policies_node1.json")
            if len(newObjects) == 0:
                api.Logger.error(
                    "Adding new objects to store failed for ipsec_policies_node1.json"
                )
                return api.types.status.FAILURE

            ret = netagent_cfg_api.PushConfigObjects(newObjects,
                                                     node_names=push_node_0,
                                                     ignore_error=True)
            if ret != api.types.status.SUCCESS:
                api.Logger.error(
                    "Unable to push ipsec-policy objects to node %s" %
                    nodes[0])
                return api.types.status.FAILURE

            get_config_objects = netagent_cfg_api.GetConfigObjects(newObjects)
            if len(get_config_objects) == 0:
                api.Logger.error("Unable to fetch newly pushed objects")
                return api.types.status.FAILURE

    else:
        workloads = api.GetWorkloads(nodes[0])
        w1 = workloads[0]

        req1 = api.Trigger_CreateExecuteCommandsRequest(serial=True)

        api.Trigger_AddCommand(req1, w1.node_name, w1.workload_name,
                               "sudo ip xfrm state flush")

        api.Trigger_AddCommand(req1, w1.node_name, w1.workload_name,
                               "sudo ip xfrm policy flush")

        for port, spi, aead in zip(tc.args.ports_list, tc.args.spi_list,
                                   tc.args.aead_list):
            api.Trigger_AddCommand(
                req1, w1.node_name, w1.workload_name,
                "sudo ip xfrm policy add src 192.168.100.103/32 dst 192.168.100.101/32 proto tcp dport %s dir in tmpl src 192.168.100.103 dst 192.168.100.101 proto esp mode tunnel"
                % port)

            api.Trigger_AddCommand(
                req1, w1.node_name, w1.workload_name,
                "sudo ip xfrm policy add src 192.168.100.103/32 dst 192.168.100.101/32 proto tcp dport %s dir fwd tmpl src 192.168.100.103 dst 192.168.100.101 proto esp mode tunnel"
                % port)

            api.Trigger_AddCommand(
                req1, w1.node_name, w1.workload_name,
                "sudo ip xfrm policy add src 192.168.100.101/32 dst 192.168.100.103/32 proto tcp sport %s dir out tmpl src 192.168.100.101 dst 192.168.100.103 proto esp mode tunnel"
                % port)

            api.Trigger_AddCommand(
                req1, w1.node_name, w1.workload_name,
                "sudo ip xfrm state add src 192.168.100.103 dst 192.168.100.101 proto esp spi %s mode tunnel aead 'rfc4106(gcm(aes))' %s 128 sel src 192.168.100.103/32 dst 192.168.100.101/32"
                % (spi, aead))

            api.Trigger_AddCommand(
                req1, w1.node_name, w1.workload_name,
                "sudo ip xfrm state add src 192.168.100.101 dst 192.168.100.103 proto esp spi %s mode tunnel aead 'rfc4106(gcm(aes))' %s 128 sel src 192.168.100.101/32 dst 192.168.100.103/32"
                % (spi, aead))

            api.Trigger_AddCommand(
                req1, w1.node_name, w1.workload_name,
                "sudo ip xfrm policy add src 192.168.100.103/32 dst 192.168.100.101/32 proto udp dport %s dir in tmpl src 192.168.100.103 dst 192.168.100.101 proto esp mode tunnel"
                % port)

            api.Trigger_AddCommand(
                req1, w1.node_name, w1.workload_name,
                "sudo ip xfrm policy add src 192.168.100.103/32 dst 192.168.100.101/32 proto udp dport %s dir fwd tmpl src 192.168.100.103 dst 192.168.100.101 proto esp mode tunnel"
                % port)

            api.Trigger_AddCommand(
                req1, w1.node_name, w1.workload_name,
                "sudo ip xfrm policy add src 192.168.100.101/32 dst 192.168.100.103/32 proto udp sport %s dir out tmpl src 192.168.100.101 dst 192.168.100.103 proto esp mode tunnel"
                % port)

        api.Trigger_AddCommand(req1, w1.node_name, w1.workload_name,
                               "sudo ip xfrm state list")

        api.Trigger_AddCommand(req1, w1.node_name, w1.workload_name,
                               "sudo ip xfrm policy list")

        trig_resp1 = api.Trigger(req1)
        term_resp1 = api.Trigger_TerminateAllCommands(trig_resp1)

    # Configure IPsec on Node 2

    if api.IsNaplesNode(nodes[1]):

        if len(encrypt_objects) == 0:

            newObjects = netagent_cfg_api.AddOneConfig(
                api.GetTopologyDirectory() +
                "/ipsec/ipsec_encryption_node2.json")
            if len(newObjects) == 0:
                api.Logger.error(
                    "Adding new objects to store failed for ipsec_encryption_node2.json"
                )
                return api.types.status.FAILURE

            ret = netagent_cfg_api.PushConfigObjects(newObjects,
                                                     node_names=push_node_1,
                                                     ignore_error=True)
            if ret != api.types.status.SUCCESS:
                api.Logger.error(
                    "Unable to push ipsec-encryption objects to node %s" %
                    nodes[1])
                return api.types.status.FAILURE

            get_config_objects = netagent_cfg_api.GetConfigObjects(newObjects)
            if len(get_config_objects) == 0:
                api.Logger.error("Unable to fetch newly pushed objects")
                #return api.types.status.FAILURE

        if len(decrypt_objects) == 0:

            newObjects = netagent_cfg_api.AddOneConfig(
                api.GetTopologyDirectory() +
                "/ipsec/ipsec_decryption_node2.json")
            if len(newObjects) == 0:
                api.Logger.error(
                    "Adding new objects to store failed for ipsec_decryption_node2.json"
                )
                return api.types.status.FAILURE

            ret = netagent_cfg_api.PushConfigObjects(newObjects,
                                                     node_names=push_node_1,
                                                     ignore_error=True)
            if ret != api.types.status.SUCCESS:
                api.Logger.error(
                    "Unable to push ipsec-encryption objects to node %s" %
                    nodes[1])
                return api.types.status.FAILURE

            get_config_objects = netagent_cfg_api.GetConfigObjects(newObjects)
            if len(get_config_objects) == 0:
                api.Logger.error("Unable to fetch newly pushed objects")
                #return api.types.status.FAILURE

        if len(policy_objects) == 0:

            newObjects = netagent_cfg_api.AddOneConfig(
                api.GetTopologyDirectory() +
                "/ipsec/ipsec_policies_node2.json")
            if len(newObjects) == 0:
                api.Logger.error(
                    "Adding new objects to store failed for ipsec_policies_node2.json"
                )
                return api.types.status.FAILURE

            ret = netagent_cfg_api.PushConfigObjects(newObjects,
                                                     node_names=push_node_1,
                                                     ignore_error=True)
            if ret != api.types.status.SUCCESS:
                api.Logger.error(
                    "Unable to push ipsec-encryption objects to node %s" %
                    nodes[1])
                return api.types.status.FAILURE

            get_config_objects = netagent_cfg_api.GetConfigObjects(newObjects)
            if len(get_config_objects) == 0:
                api.Logger.error("Unable to fetch newly pushed objects")
                #return api.types.status.FAILURE

    else:
        workloads = api.GetWorkloads(nodes[1])
        w2 = workloads[0]

        req2 = api.Trigger_CreateExecuteCommandsRequest(serial=True)

        api.Trigger_AddCommand(req2, w2.node_name, w2.workload_name,
                               "sudo ip xfrm state flush")

        api.Trigger_AddCommand(req2, w2.node_name, w2.workload_name,
                               "sudo ip xfrm policy flush")

        for port, spi, aead in zip(tc.args.ports_list, tc.args.spi_list,
                                   tc.args.aead_list):
            api.Trigger_AddCommand(
                req2, w2.node_name, w2.workload_name,
                "sudo ip xfrm policy add src 192.168.100.101/32 dst 192.168.100.103/32 proto tcp dport %s dir in tmpl src 192.168.100.101 dst 192.168.100.103 proto esp mode tunnel"
                % port)

            api.Trigger_AddCommand(
                req2, w2.node_name, w2.workload_name,
                "sudo ip xfrm policy add src 192.168.100.101/32 dst 192.168.100.103/32 proto tcp dport %s dir fwd tmpl src 192.168.100.101 dst 192.168.100.103 proto esp mode tunnel"
                % port)

            api.Trigger_AddCommand(
                req2, w2.node_name, w2.workload_name,
                "sudo ip xfrm policy add src 192.168.100.103/32 dst 192.168.100.101/32 proto tcp sport %s dir out tmpl src 192.168.100.103 dst 192.168.100.101 proto esp mode tunnel"
                % port)

            api.Trigger_AddCommand(
                req2, w2.node_name, w2.workload_name,
                "sudo ip xfrm state add src 192.168.100.101 dst 192.168.100.103 proto esp spi %s mode tunnel aead 'rfc4106(gcm(aes))' %s 128 sel src 192.168.100.101/32 dst 192.168.100.103/32"
                % (spi, aead))

            api.Trigger_AddCommand(
                req2, w2.node_name, w2.workload_name,
                "sudo ip xfrm state add src 192.168.100.103 dst 192.168.100.101 proto esp spi %s mode tunnel aead 'rfc4106(gcm(aes))' %s 128 sel src 192.168.100.103/32 dst 192.168.100.101/32"
                % (spi, aead))

            api.Trigger_AddCommand(
                req2, w2.node_name, w2.workload_name,
                "sudo ip xfrm policy add src 192.168.100.101/32 dst 192.168.100.103/32 proto udp dport %s dir in tmpl src 192.168.100.101 dst 192.168.100.103 proto esp mode tunnel"
                % port)

            api.Trigger_AddCommand(
                req2, w2.node_name, w2.workload_name,
                "sudo ip xfrm policy add src 192.168.100.101/32 dst 192.168.100.103/32 proto udp dport %s dir fwd tmpl src 192.168.100.101 dst 192.168.100.103 proto esp mode tunnel"
                % port)

            api.Trigger_AddCommand(
                req2, w2.node_name, w2.workload_name,
                "sudo ip xfrm policy add src 192.168.100.103/32 dst 192.168.100.101/32 proto udp sport %s dir out tmpl src 192.168.100.103 dst 192.168.100.101 proto esp mode tunnel"
                % port)

        api.Trigger_AddCommand(req2, w2.node_name, w2.workload_name,
                               "sudo ip xfrm state list")

        api.Trigger_AddCommand(req2, w2.node_name, w2.workload_name,
                               "sudo ip xfrm policy list")

        trig_resp2 = api.Trigger(req2)
        term_resp2 = api.Trigger_TerminateAllCommands(trig_resp2)

    workloads = api.GetWorkloads(nodes[0])
    w1 = workloads[0]
    workloads = api.GetWorkloads(nodes[1])
    w2 = workloads[0]
    bypass_test = 0

    if w1.IsNaples() and w2.IsNaples():
        api.Logger.info(
            "Both workloads are Naples, %s is iperf client, %s is iperf server, bypassing test"
            % (w1.node_name, w2.node_name))
        iperf_client_wl = w1
        iperf_server_wl = w2
        bypass_test = 1
    elif w1.IsNaples():
        api.Logger.info("%s is Naples and iperf client, %s is iperf server" %
                        (w1.node_name, w2.node_name))
        iperf_client_wl = w1
        iperf_server_wl = w2
    elif w2.IsNaples():
        api.Logger.info("%s is Naples and iperf client, %s is iperf server" %
                        (w2.node_name, w1.node_name))
        iperf_client_wl = w2
        iperf_server_wl = w1

    req3 = api.Trigger_CreateExecuteCommandsRequest(serial=True)

    cmd_cookie = "Set rcv socket buffer size on %s" % (w1.workload_name)
    api.Trigger_AddCommand(
        req3, w1.node_name, w1.workload_name,
        "sysctl -w net.ipv4.tcp_rmem='4096 2147483647 2147483647'")
    tc.cmd_cookies.append(cmd_cookie)

    cmd_cookie = "Set rcv socket buffer size on %s" % (w2.workload_name)
    api.Trigger_AddCommand(
        req3, w2.node_name, w2.workload_name,
        "sysctl -w net.ipv4.tcp_rmem='4096 2147483647 2147483647'")
    tc.cmd_cookies.append(cmd_cookie)

    cmd_cookie = "Setting MTU to smaller value on %s" % (
        iperf_client_wl.workload_name)
    api.Trigger_AddCommand(req3, iperf_client_wl.node_name,
                           iperf_client_wl.workload_name,
                           "ifconfig %s mtu 1048" % iperf_client_wl.interface)
    tc.cmd_cookies.append(cmd_cookie)

    tc.cmd_descr = "Server: %s(%s) <--> Client: %s(%s) on %s" %\
                   (iperf_server_wl.workload_name, iperf_server_wl.ip_address, iperf_client_wl.workload_name, iperf_client_wl.ip_address, tc.iterators.protocol)
    api.Logger.info("Starting Iperf test over IPSec from %s" % (tc.cmd_descr))

    if bypass_test == 0:
        for port in tc.args.ports_list:
            cmd_cookie = "Running iperf server on %s port %s" % (
                iperf_server_wl.workload_name, port)
            api.Trigger_AddCommand(req3,
                                   iperf_server_wl.node_name,
                                   iperf_server_wl.workload_name,
                                   "iperf -s -p %s" % (port),
                                   background=True)
            tc.cmd_cookies.append(cmd_cookie)

    req4 = api.Trigger_CreateExecuteCommandsRequest(serial=False)
    if bypass_test == 0:
        cmd_cookie = "Brief Sleep"
        api.Trigger_AddCommand(req4, iperf_client_wl.node_name,
                               iperf_client_wl.workload_name, "sleep 1")
        tc.cmd_cookies.append(cmd_cookie)
        for port in tc.args.ports_list:
            cmd_cookie = "Running iperf client on %s port %s" % (
                iperf_client_wl.workload_name, port)
            if tc.iterators.protocol == "tcp":
                api.Trigger_AddCommand(
                    req4, iperf_client_wl.node_name,
                    iperf_client_wl.workload_name, "iperf -c %s -p %s -M %s" %
                    (iperf_server_wl.ip_address, port, tc.iterators.pktsize))
            else:
                api.Trigger_AddCommand(
                    req4, iperf_client_wl.node_name,
                    iperf_client_wl.workload_name,
                    "iperf --udp -c %s -p %s -M %s" %
                    (iperf_server_wl.ip_address, port, tc.iterators.pktsize))
            tc.cmd_cookies.append(cmd_cookie)

    req5 = api.Trigger_CreateExecuteCommandsRequest(serial=True)
    if w1.IsNaples():
        cmd_cookie = "IPSec state on %s AFTER running iperf traffic" % (
            w1.node_name)
        api.Trigger_AddNaplesCommand(
            req5, w1.node_name, "/nic/bin/halctl show ipsec-global-stats")
        tc.cmd_cookies.append(cmd_cookie)
    else:
        cmd_cookie = "IPSec state on %s AFTER running iperf traffic" % (
            w1.node_name)
        api.Trigger_AddCommand(req5, w1.node_name, w1.workload_name,
                               "sudo ip xfrm policy show")
        tc.cmd_cookies.append(cmd_cookie)

    if w2.IsNaples():
        cmd_cookie = "IPSec state on %s AFTER running iperf traffic" % (
            w2.node_name)
        api.Trigger_AddNaplesCommand(
            req5, w2.node_name, "/nic/bin/halctl show ipsec-global-stats")
        tc.cmd_cookies.append(cmd_cookie)
    else:
        cmd_cookie = "IPSec state on %s AFTER running iperf traffic" % (
            w2.node_name)
        api.Trigger_AddCommand(req5, w2.node_name, w2.workload_name,
                               "sudo ip xfrm policy show")
        tc.cmd_cookies.append(cmd_cookie)

    trig_resp3 = api.Trigger(req3)
    trig_resp4 = api.Trigger(req4)
    trig_resp5 = api.Trigger(req5)

    term_resp3 = api.Trigger_TerminateAllCommands(trig_resp3)
    term_resp4 = api.Trigger_TerminateAllCommands(trig_resp4)
    term_resp5 = api.Trigger_TerminateAllCommands(trig_resp5)

    agg_resp4 = api.Trigger_AggregateCommandsResponse(trig_resp4, term_resp4)
    tc.resp = agg_resp4

    return api.types.status.SUCCESS
Пример #4
0
def Trigger(tc):
    req = api.Trigger_CreateExecuteCommandsRequest(serial=True)
    tc.cmd_cookies1 = []

    #Step 0: Update the timeouti & connection-tracking in the config object
    if not tc.skip_security_prof:
        update_timeout("icmp-timeout", tc.iterators.timeout)

    w1, w2 = tc.workload_pairs[0]
    cmd_cookie = "%s(%s) --> %s(%s)" %\
                  (w1.workload_name, w1.ip_address, w2.workload_name, w2.ip_address)
    api.Logger.info("Starting Ping test from %s" % (cmd_cookie))

    cmd_cookie = "halctl clear session"
    api.Trigger_AddNaplesCommand(req, tc.old_node,
                                 "/nic/bin/halctl clear session")
    tc.cmd_cookies1.append(cmd_cookie)

    if not tc.skip_security_prof:
        timeout = timetoseconds(tc.iterators.timeout) + GRACE_TIME
    else:
        timeout = str(DEFAULT_ICMP_TIMEOUT + GRACE_TIME)

    cmd_cookie = "ping"
    api.Trigger_AddCommand(req,
                           w1.node_name,
                           w1.workload_name,
                           "ping -c 180 %s" % w2.ip_address,
                           background=True)
    tc.cmd_cookies1.append(cmd_cookie)

    cmd_cookie = "Before move show session ICMP"
    api.Trigger_AddNaplesCommand(req, tc.old_node,
                                 "/nic/bin/halctl show session | grep ICMP")
    tc.cmd_cookies1.append(cmd_cookie)
    tc.resp1 = api.Trigger(req)

    # vm_utils.do_vmotion(tc, tc.wl, tc.new_node)
    vm_utils.do_vmotion(tc, True)

    #import pdb; pdb.set_trace()

    req = api.Trigger_CreateExecuteCommandsRequest(serial=True)
    tc.cmd_cookies = []

    cmd_cookie = "After move show session ICMP"
    api.Trigger_AddNaplesCommand(req, tc.wl.node_name,
                                 "/nic/bin/halctl show session | grep ICMP")
    tc.cmd_cookies.append(cmd_cookie)

    #Get it from the config
    cmd_cookie = "sleep"
    api.Trigger_AddNaplesCommand(req,
                                 tc.wl.node_name,
                                 "sleep 180",
                                 timeout=210)
    tc.cmd_cookies.append(cmd_cookie)

    cmd_cookie = "After aging show session"
    api.Trigger_AddNaplesCommand(req, tc.wl.node_name,
                                 "/nic/bin/halctl show session | grep ICMP")
    tc.cmd_cookies.append(cmd_cookie)

    tc.resp = api.Trigger(req)
    return api.types.status.SUCCESS
Пример #5
0
def Trigger(tc):

    #==============================================================
    # trigger the commands
    #==============================================================
    req = api.Trigger_CreateExecuteCommandsRequest(serial=True)

    # Populate bw lookup table - manual entry to speed up development
    bw_dict = {}
    bw_dict[(1, 4096)] = 10
    bw_dict[(1, 8192)] = 10
    bw_dict[(1, 65536)] = 50
    bw_dict[(2, 4000)] = 10
    bw_dict[(2, 4096)] = 10
    bw_dict[(2, 8192)] = 10
    bw_dict[(2, 16384)] = 10
    bw_dict[(2, 32768)] = 30
    bw_dict[(2, 65536)] = 50
    bw_dict[(2, 8000)] = 10
    bw_dict[(2, 16000)] = 10
    bw_dict[(2, 32000)] = 30
    bw_dict[(2, 64000)] = 50
    bw_dict[(3, 4095)] = 5
    bw_dict[(3, 3072)] = 5
    bw_dict[(3, 3000)] = 5
    bw_dict[(3, 12288)] = 10
    bw_dict[(3, 24576)] = 20
    bw_dict[(3, 12000)] = 10
    bw_dict[(3, 24000)] = 20
    bw_dict[(4, 4000)] = 5
    bw_dict[(4, 4096)] = 5
    bw_dict[(4, 8192)] = 10
    bw_dict[(4, 16384)] = 10
    bw_dict[(4, 32768)] = 30
    bw_dict[(4, 65536)] = 50
    bw_dict[(4, 16000)] = 10
    bw_dict[(4, 32000)] = 30
    bw_dict[(4, 64000)] = 50
    bw_dict[(5, 20480)] = 20
    bw_dict[(5, 20000)] = 10
    bw_dict[(5, 10000)] = 5
    bw_dict[(6, 12288)] = 10
    bw_dict[(6, 24576)] = 20
    bw_dict[(6, 24000)] = 20
    bw_dict[(7, 28672)] = 20
    bw_dict[(7, 28000)] = 30
    bw_dict[(7, 7700)] = 4
    bw_dict[(8, 16384)] = 5
    bw_dict[(8, 32768)] = 10
    bw_dict[(8, 65536)] = 10
    bw_dict[(8, 32000)] = 10
    bw_dict[(8, 64000)] = 10

    #==============================================================
    # init cmd options
    #==============================================================
    iter_opt = ' -n 10 '
    misc_opt = ' -F --report_gbits '
    cm_opt = ''
    enable_dcqcn = False
    transport_opt = ''
    msg_size = 65536
    size_opt = ' -a '
    mtu_opt = ' -m 4096 '
    qp_opt = ''
    numsges_opt = ''
    bidir_opt = ''
    rxdepth_opt = ''
    txdepth_opt = ''
    atomic_opt = ''
    tc.client_bkg = False
    s_port = 12340
    e_port = s_port + 1
    server_idx = 0
    client_idx = 1
    bkg_timeout = 130
    sq_drain_opt = ''
    async_event_stats_opt = ''
    bw_opt = ''
    port_flap = False
    tc.tcpdump = False

    #==============================================================
    # update non-default cmd options
    #==============================================================
    # if use both duration '-D' and count '-n', count will take precedence
    if hasattr(tc.iterators, 'duration'):
        iter_opt = ' -D {} '.format(tc.iterators.duration)
        # For scale tests, we noticed all 8 threads not started early,
        # so need to give extra timeout
        bkg_timeout = tc.iterators.duration + 60

    if hasattr(tc.iterators, 'count'):
        iter_opt = ' -n {} '.format(tc.iterators.count)

    if getattr(tc.iterators, 'rdma_cm', None) == 'yes':
        cm_opt = ' -R '

    if getattr(tc.iterators, 'transport', None) == 'UD':
        transport_opt = ' -c UD '

    if hasattr(tc.iterators, 'size'):
        msg_size = int(tc.iterators.size)
        size_opt = ' -s {} '.format(msg_size)

    if hasattr(tc.iterators, 'mtu'):
        mtu_opt = ' -m {} '.format(tc.iterators.mtu)

    numsges = getattr(tc.iterators, 'numsges', 1)
    if numsges > 1:
        numsges_opt = ' -W {} '.format(numsges)

    num_qp = getattr(tc.iterators, 'num_qp', 1)
    if num_qp > 1:
        qp_opt = ' -q {} '.format(num_qp)

    num_threads = getattr(tc.iterators, 'threads', 1)
    if num_threads > 1:
        tc.client_bkg = True
        e_port = s_port + tc.iterators.threads

    if getattr(tc.iterators, 'server', None) == 'no':
        server_idx = 1
        client_idx = 0

    if getattr(tc.iterators, 'bidir', None) == 'yes':
        bidir_opt = ' -b '

    if hasattr(tc.iterators, 'rxdepth'):
        rxdepth_opt = ' -r {} '.format(tc.iterators.rxdepth)

    if hasattr(tc.iterators, 'txdepth'):
        txdepth_opt = ' -t {} '.format(tc.iterators.txdepth)

    if getattr(tc.iterators, 'cmp_swp', None) == 'yes':
        atomic_opt = ' -A CMP_AND_SWAP '

    if getattr(tc.iterators, 'enable_dcqcn', None) == 'yes':
        enable_dcqcn = True

    if getattr(tc.iterators, 'sq_drain', None) == 'yes':
        sq_drain_opt = ' --sq-drain '

    if getattr(tc.iterators, 'async_event_stats', None) == 'yes':
        async_event_stats_opt = ' --report-async-ev-stats '

    if getattr(tc.iterators, 'check_bw', None) == 'yes' and \
       num_qp == 1 and \
       (numsges, msg_size) in bw_dict:
        bw_opt = ' -w {} '.format(
            math.ceil(bw_dict[(numsges, msg_size)] / num_threads))

    if getattr(tc.iterators, 'port_flap', None) == 'true' and \
       hasattr(tc.iterators, 'duration'):
        port_flap = True
        tc.client_bkg = True

    if getattr(tc.iterators, 'tcpdump', None) == 'yes' and \
       not hasattr(tc.iterators, 'duration'):
        tc.tcpdump = True
        iter_opt = ' -n 5 '

    #==============================================================
    # run the cmds
    #==============================================================
    w1 = tc.w[server_idx]
    w2 = tc.w[client_idx]

    tc.cmd_descr = "Server: %s(%s) <--> Client: %s(%s)" %\
                    (w1.workload_name, w1.ip_address, w2.workload_name, w2.ip_address)

    api.Logger.info("Starting %s test from %s" %
                    (tc.iterators.command, tc.cmd_descr))

    # Enable rdma sniffer and start tcpdump on Naples Hosts
    if tc.tcpdump == True:
        for w in [w1, w2]:
            if not w.IsNaples():
                continue

            tcpdump_intf = w.interface.split('.')[
                0]  # Get the parent interface
            tcpdump_cmd = "sudo tcpdump -l --immediate-mode -i {} -XXX udp dst port 4791 -w rdma_capture.pcap &".format(
                tcpdump_intf)

            if tc.os == host.OS_TYPE_BSD:
                sniffer_cmd = 'sysctl dev.' + host.GetNaplesSysctl(
                    w.interface) + '.rdma_sniffer=1'
            elif tc.os == host.OS_TYPE_LINUX:
                sniffer_cmd = 'sudo ethtool --set-priv-flags ' + tcpdump_intf + ' rdma-sniffer on'
            else:
                continue

            api.Trigger_AddCommand(req, w.node_name, w.workload_name,
                                   sniffer_cmd)
            api.Trigger_AddCommand(req,
                                   w.node_name,
                                   w.workload_name,
                                   tcpdump_cmd,
                                   background=True)

    if enable_dcqcn == True:
        for w in [w1, w2]:
            if not w.IsNaples():
                continue

            if tc.os == host.OS_TYPE_BSD:
                cmd = 'sysctl sys.class.infiniband.' + host.GetNaplesSysClassSysctl(
                    w.interface) + '.dcqcn.match_default="1"'
            elif tc.os == host.OS_TYPE_LINUX:
                cmd = 'echo 1 > /sys/class/infiniband/' + host.GetNaplesSysClassSysctl(
                    w.interface) + '/dcqcn/match_default'
            else:
                continue

            api.Trigger_AddCommand(req,
                                   w.node_name,
                                   w.workload_name,
                                   cmd,
                                   timeout=120)

    #==============================================================
    # cmd for server
    #==============================================================
    for p in range(s_port, e_port):
        port_opt = ' -p {} '.format(p)
        dev_opt = ' -d {} '.format(tc.devices[server_idx])
        gid_opt = ' -x {} '.format(tc.gid[server_idx])

        cmd = tc.iterators.command
        cmd += dev_opt + iter_opt + gid_opt
        cmd += size_opt + mtu_opt + qp_opt
        cmd += cm_opt + transport_opt + misc_opt + port_opt + bidir_opt + rxdepth_opt + txdepth_opt + atomic_opt + bw_opt
        # add numsges_opt only for Naples
        if w1.IsNaples():
            cmd += numsges_opt

        api.Trigger_AddCommand(req,
                               w1.node_name,
                               w1.workload_name,
                               tc.ib_prefix[server_idx] + cmd,
                               background=True,
                               timeout=120)

    # On Naples-Mellanox setups, with Mellanox as server, it takes a few seconds before the server
    # starts listening. So sleep for a few seconds before trying to start the client
    cmd = 'sleep 2'
    api.Trigger_AddCommand(req, w2.node_name, w2.workload_name, cmd)

    #==============================================================
    # cmd for client
    #==============================================================
    for p in range(s_port, e_port):
        port_opt = ' -p {} '.format(p)
        dev_opt = ' -d {} '.format(tc.devices[client_idx])
        gid_opt = ' -x {} '.format(tc.gid[client_idx])

        cmd = tc.iterators.command
        cmd += dev_opt + iter_opt + gid_opt
        cmd += size_opt + mtu_opt + qp_opt
        cmd += cm_opt + transport_opt + misc_opt + port_opt + bidir_opt + rxdepth_opt + txdepth_opt + atomic_opt
        # add numsges_opt only for Naples
        if w2.IsNaples():
            cmd += numsges_opt + sq_drain_opt + async_event_stats_opt
        # append server's ip_address
        cmd += w1.ip_address

        api.Trigger_AddCommand(
            req,
            w2.node_name,
            w2.workload_name,
            tc.ib_prefix[client_idx] + cmd,
            background=tc.client_bkg,
            timeout=125)  #5 secs more than def test timeout=120

    #Do the port flap only for duration tests
    if hasattr(tc.iterators, 'duration') and port_flap == True:
        num_flaps = int(getattr(tc.iterators, 'duration')) // 20
        num_flaps = num_flaps - 2  #Reduce the number of flaps so that we don't flap during connection close

        export_path_cmd = "export PATH=$PATH:/platform/bin:/nic/bin:/platform/tools:/nic/tools"
        export_ld_path_cmd = "export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/platform/lib:/nic/lib"
        port_down_cmd = "/nic/bin/halctl debug port --port 1  --admin-state down"
        port_up_cmd = "/nic/bin/halctl debug port --port 1  --admin-state up"

        #Sleep for 10 to make sure that we don't flap during connection create
        cmd = 'sleep 10'
        api.Trigger_AddCommand(req,
                               w1.node_name,
                               w1.workload_name,
                               cmd,
                               timeout=20)

        for i in range(num_flaps):
            api.Trigger_AddNaplesCommand(req, w1.node_name, export_path_cmd)
            api.Trigger_AddNaplesCommand(req, w2.node_name, export_path_cmd)
            api.Trigger_AddNaplesCommand(req, w1.node_name, export_ld_path_cmd)
            api.Trigger_AddNaplesCommand(req, w2.node_name, export_ld_path_cmd)
            api.Trigger_AddNaplesCommand(req, w1.node_name, port_down_cmd)
            api.Trigger_AddNaplesCommand(req, w2.node_name, port_down_cmd)
            api.Trigger_AddNaplesCommand(req, w2.node_name, "sleep 1")
            api.Trigger_AddNaplesCommand(req, w1.node_name, port_up_cmd)
            api.Trigger_AddNaplesCommand(req, w2.node_name, port_up_cmd)
            api.Trigger_AddNaplesCommand(req, w2.node_name, "sleep 20")

        #Sleep to let the tests complete before Terminating
        cmd = 'sleep 30'
        api.Trigger_AddCommand(req,
                               w1.node_name,
                               w1.workload_name,
                               cmd,
                               timeout=40)

    if tc.client_bkg and port_flap == False:
        # since the client is running in the background, sleep for 30 secs
        # to allow the test to complete before verifying the result
        # override default timeout to 35, slightly above the sleep duration 30 secs
        cmd = 'sleep ' + str(bkg_timeout)
        api.Trigger_AddCommand(req,
                               w1.node_name,
                               w1.workload_name,
                               cmd,
                               timeout=(bkg_timeout + 5))

    # try to kill lingering processes
    for w in [w1, w2]:
        if not w.IsNaples():
            continue

        cmd = 'killall ' + tc.iterators.command
        api.Trigger_AddCommand(req,
                               w.node_name,
                               w.workload_name,
                               cmd,
                               timeout=(bkg_timeout + 5))

    # print the next_qpid
    for w in [w1, w2]:
        if not w.IsNaples():
            continue

        if tc.os == host.OS_TYPE_BSD:
            cmd = 'sysctl dev.' + host.GetNaplesSysctl(
                w.interface) + '.rdma.info.next_qpid'
        elif tc.os == host.OS_TYPE_LINUX:
            pci = host.GetNaplesPci(w.node_name, w.interface)
            if pci is None:
                continue
            cmd = 'grep next_qpid /sys/kernel/debug/ionic/' + pci + '/lif0/rdma/info'
        else:
            continue

        api.Trigger_AddCommand(req,
                               w.node_name,
                               w.workload_name,
                               cmd,
                               timeout=(bkg_timeout + 5))

    if tc.tcpdump == True:
        api.Trigger_AddCommand(req, w1.node_name, w1.workload_name, "sleep 5")

        tshark_cmd = "sudo tshark -r rdma_capture.pcap -T fields -e ip.addr -e infiniband.bth.opcode -e infiniband.aeth.msn"
        for w in [w1, w2]:
            if not w.IsNaples():
                continue

            api.Trigger_AddCommand(req, w.node_name, w.workload_name,
                                   "sudo killall tcpdump")
            api.Trigger_AddCommand(req,
                                   w.node_name,
                                   w.workload_name,
                                   tshark_cmd,
                                   timeout=60)

    #if dcqcn was enabled, disable it at the end of the test
    if enable_dcqcn == True:
        for w in [w1, w2]:
            if not w.IsNaples():
                continue

            if tc.os == host.OS_TYPE_BSD:
                cmd = 'sysctl sys.class.infiniband.' + host.GetNaplesSysClassSysctl(
                    w.interface) + '.dcqcn.match_default="0"'
            elif tc.os == host.OS_TYPE_LINUX:
                cmd = 'echo 0 > /sys/class/infiniband/' + host.GetNaplesSysClassSysctl(
                    w.interface) + '/dcqcn/match_default'
            else:
                continue

            api.Trigger_AddCommand(req,
                                   w.node_name,
                                   w.workload_name,
                                   cmd,
                                   timeout=120)

    #==============================================================
    # trigger the request
    #==============================================================
    trig_resp = api.Trigger(req)
    term_resp = api.Trigger_TerminateAllCommands(trig_resp)

    tc.resp = api.Trigger_AggregateCommandsResponse(trig_resp, term_resp)
    return api.types.status.SUCCESS
Пример #6
0
def Trigger(tc):
    max_pings = int(getattr(tc.args, "max_pings", 60))
    num_runs = int(getattr(tc.args, "num_runs", 1))
    serverCmd = None
    clientCmd = None
    mode = tc.initial_mode
    IPERF_TIMEOUT = 86400
    try:
        serverReq = api.Trigger_CreateExecuteCommandsRequest()
        clientReq = api.Trigger_CreateExecuteCommandsRequest()
        
        client = tc.wl_pair[0]
        server = tc.wl_pair[1]

        tc.cmd_descr = "Server: %s(%s) <--> Client: %s(%s)" %\
                       (server.workload_name, server.ip_address,
                        client.workload_name, client.ip_address)
        num_streams = int(getattr(tc.args, "num_streams", 2))
        api.Logger.info("Starting Iperf test from %s num-sessions %d"
                        % (tc.cmd_descr, num_streams))

        if tc.iterators.proto == 'tcp':
            port = api.AllocateTcpPort()
            serverCmd = iperf.ServerCmd(port, jsonOut=True, run_core=3)
            clientCmd = iperf.ClientCmd(server.ip_address, port, time=IPERF_TIMEOUT,
                                        jsonOut=True, num_of_streams=num_streams,
                                        run_core=3)
        else:
            port = api.AllocateUdpPort()
            serverCmd = iperf.ServerCmd(port, jsonOut=True, run_core=3)
            clientCmd = iperf.ClientCmd(server.ip_address, port, proto='udp',
                                        jsonOut=True, num_of_streams=num_streams,
                                        run_core=3)

        tc.serverCmd = serverCmd
        tc.clientCmd = clientCmd

        api.Trigger_AddCommand(serverReq, server.node_name, server.workload_name,
                               serverCmd, background=True, timeout=IPERF_TIMEOUT)

        api.Trigger_AddCommand(clientReq, client.node_name, client.workload_name,
                               clientCmd, background=True, timeout=IPERF_TIMEOUT)
        
        tc.server_resp = api.Trigger(serverReq)
        time.sleep(5)
        tc.iperf_client_resp = api.Trigger(clientReq)

        for _i in range(num_runs):
            RF = get_redfish_obj(tc.cimc_info, mode=mode)
            obs_mode = get_nic_mode(RF)
            api.Logger.info("Iteration %d: curr_mode %s" % (_i, obs_mode))
            if mode != obs_mode:
                raise RuntimeError("Expected NIC mode %s, observed %s" % (mode, obs_mode))

            next_mode = "dedicated" if mode == "ncsi" else "ncsi"
            if next_mode == "ncsi":
                ret = set_ncsi_mode(RF, mode="dhcp")
            else:
                ret = set_dedicated_mode(RF, mode="dhcp")
            if ret != api.types.status.SUCCESS:
                api.Logger.error("Mode switch from %s -> %s failed" %(mode, next_mode))
                return api.types.status.FAILURE

            api.Logger.info("Switched mode to %s" % (next_mode))
            time.sleep(5)
            if ret == api.types.status.SUCCESS:
                curr_ilo_ip = tc.ilo_ip if next_mode == "dedicated" else tc.ilo_ncsi_ip
                ret = ping(curr_ilo_ip, max_pings)
                if ret != api.types.status.SUCCESS:
                    RF.logout()
                    raise RuntimeError('Unable to ping ILO, Port Switch fail from'
                                      ' %s -> %s' % (mode, next_mode))
                api.Logger.info("Mode switch from %s -> %s successful" % (mode, next_mode))
            else:
                raise RuntimeError('Mode switch config failed')
            mode = next_mode
    except:
        api.Logger.error(traceback.format_exc())
        return api.types.status.FAILURE

    return api.types.status.SUCCESS
Пример #7
0
def TestPing(tc, input_type, ipaf, pktsize, interval=0.2, count=20, deadline=0,
             pktlossverif=False, background=False, hping3=False):
    if input_type == 'user_input':
        api.Logger.info("user_input")
    elif input_type == 'local_only':
        api.Logger.info("local_only test")
        tc.workload_pairs = api.GetLocalWorkloadPairs()
    elif input_type == 'remote_only':
        api.Logger.info("remote_only test")
        tc.workload_pairs = api.GetRemoteWorkloadPairs()
    else:
        tc.workload_pairs = api.GetLocalWorkloadPairs()
        tc.workload_pairs += api.GetRemoteWorkloadPairs()

    if len(tc.workload_pairs) == 0:
        api.Logger.info("Skipping Testcase due to no workload pairs.")
        tc.skip = True

    if api.GetConfigNicMode() == 'hostpin' and ipaf == 'ipv6':
        api.Logger.info("Skipping Testcase: IPv6 not supported in hostpin mode.")
        return api.types.status.SUCCESS

    req = None
    if not api.IsSimulation():
        req = api.Trigger_CreateAllParallelCommandsRequest()
    else:
        req = api.Trigger_CreateExecuteCommandsRequest(serial = False)

    tc.cmd_cookies = []

    deadline_str = ""
    count_str = ""
    cmd_timeout = 60
    if deadline:
        deadline_str = "-w {}".format(deadline)
        cmd_timeout = deadline + 5
    else:
        count_str = "-c {}".format(count)
        cmd_timeout = int(count * interval) + 5

    cmd_timeout = 60 if cmd_timeout < 60 else cmd_timeout

    for pair in tc.workload_pairs:
        w1 = pair[0]
        w2 = pair[1]
        if ipaf == 'ipv6':
            cmd_cookie = "%s(%s) --> %s(%s)" %\
                         (w1.workload_name, w1.ipv6_address, w2.workload_name, w2.ipv6_address)
            api.Trigger_AddCommand(req, w1.node_name, w1.workload_name,
                                   "sudo ping6 -q -i %s %s -s %d %s %s" % (interval, count_str,
                                   pktsize, w2.ipv6_address, deadline_str),
                                   background=background, timeout=cmd_timeout)
        else:
            cmd_cookie = "%s(%s) --> %s(%s) pktsize: %s, count: %s, interval: %s, deadline: %s, background: %s" %\
                         (w1.workload_name, w1.ip_address, w2.workload_name, w2.ip_address, pktsize, count, interval,
                          deadline, background)
            if hping3:
                # working around hping --quiet mode issue.
                cmd = f"sudo hping3 --icmp -i u{interval*1000000} {count_str} -d {pktsize} {w2.ip_address} 2> out 1> /dev/null && cat out"
            else:
                cmd = f"sudo ping -q -i {interval} {count_str} -s {pktsize} {w2.ip_address} {deadline_str}"
            api.Trigger_AddCommand(req, w1.node_name, w1.workload_name, cmd, background=background, timeout=cmd_timeout)

        api.Logger.info("Ping test from %s" % (cmd_cookie))
        tc.cmd_cookies.append(cmd_cookie)

    tc.resp = api.Trigger(req)
    if tc.resp is None:
        return api.types.status.FAILURE

    result = api.types.status.SUCCESS
    if not background:
        cookie_idx = 0
        for cmd in tc.resp.commands:
            api.Logger.info("ping results for %s" % (tc.cmd_cookies[cookie_idx]))
            api.PrintCommandResults(cmd)
            if cmd.exit_code != 0:
                if api.GetConfigNicMode() == 'hostpin' and pktsize > 1024:
                    result = api.types.status.SUCCESS
                else:
                    result = api.types.status.FAILURE
            elif pktlossverif:
                ping_result = parsePingResult(cmd.stdout)
                if not ping_result:
                    api.Logger.info("ping failed in packet loss verification")
                    result = api.types.status.FAILURE
                    break
            cookie_idx += 1
    else:
        tc.bg_cmd_cookies = tc.cmd_cookies
        tc.bg_cmd_resp   = tc.resp

    return result
Пример #8
0
def Trigger(tc):
    triplet = GetThreeWorkloads()
    server = triplet[0][0]
    client1 = triplet[0][1]
    client2 = triplet[0][2]
    tc.cmd_cookies = []

    naples = server
    if not server.IsNaples():
       naples = client1
       if not client1.IsNaples():
          naples = client2
          if not client2.IsNaples():
             return api.types.status.SUCCESS

    req = api.Trigger_CreateExecuteCommandsRequest(serial = True)
    tc.cmd_descr = "Server: %s(%s) <--> Client: %s(%s)" %\
                   (server.workload_name, server.ip_address, client1.workload_name, client1.ip_address)
    api.Logger.info("Starting RTSP test from %s" % (tc.cmd_descr))

    dir_path = os.path.dirname(os.path.realpath(__file__))
    fullpath = dir_path + '/' + "small.vob"
    api.Logger.info("fullpath %s" % (fullpath))
    resp = api.CopyToWorkload(server.node_name, server.workload_name, [fullpath], 'rtspdir')
    if resp is None:
       return api.types.status.FAILURE

    api.Trigger_AddCommand(req, client1.node_name, client1.workload_name,
                           "ls -al | grep video")
    tc.cmd_cookies.append("Before RTSP")

    server_cmd = "cd rtspdir && vobStreamer -p 2004 small.vob"
    api.Trigger_AddCommand(req, server.node_name, server.workload_name,
                           server_cmd, background = True)
    tc.cmd_cookies.append("Run RTSP server")
   
    api.Trigger_AddCommand(req, client1.node_name, client1.workload_name,
                           "openRTSP rtsp://%s:2004/vobStream" % server.ip_address)
    tc.cmd_cookies.append("Run RTSP client1")

    ## Add Naples command validation
    api.Trigger_AddNaplesCommand(req, naples.node_name,
                                "/nic/bin/halctl show session --alg rtsp")
    tc.cmd_cookies.append("show session RTSP established")
    api.Trigger_AddNaplesCommand(req, naples.node_name,
                            "/nic/bin/halctl show nwsec flow-gate | grep RTSP")
    tc.cmd_cookies.append("show flow-gate") 

    api.Trigger_AddCommand(req, client1.node_name, client1.workload_name,
                           "ls -al | grep video")
    tc.cmd_cookies.append("After RTSP")

    trig_resp = api.Trigger(req)

    dest = trig_resp.commands[2].stdout.find("Transport: RTP/AVP;multicast;destination=")
    dest += 41
    ip = trig_resp.commands[2].stdout[dest:dest+35]
    end = ip.find(";")

    req2 = api.Trigger_CreateExecuteCommandsRequest(serial = True)
    api.Trigger_AddCommand(req2, client2.node_name, client2.workload_name,
                           "ip route add 224.0.0.0/4 dev %s"%(client2.interface), background = True)
    tc.cmd_cookies.append("ip route client2")

    api.Trigger_AddCommand(req2, client2.node_name, client2.workload_name,
                           "sed -i 's/geteuid/getppid/' /usr/bin/vlc && cvlc -vvv rtp://%s:8888 --start-time=00 --run-time=5" % ip[0:end])
    tc.cmd_cookies.append("Run RTP client2")

    api.Trigger_AddNaplesCommand(req2, naples.node_name,
                                "/nic/bin/halctl show session --srcip %s "%(client2.ip_address))
    tc.cmd_cookies.append("show session RTP")

    trig_resp2 = api.Trigger(req2)
    term_resp2 = api.Trigger_TerminateAllCommands(trig_resp2)
    tc.resp2 = api.Trigger_AggregateCommandsResponse(trig_resp2, term_resp2)

    term_resp = api.Trigger_TerminateAllCommands(trig_resp)
    tc.resp = api.Trigger_AggregateCommandsResponse(trig_resp, term_resp)

    ForceReleasePort("554/tcp", server)
    ForceReleasePort("554/tcp", client1)
    ForceReleasePort("554/tcp", client2)

    return api.types.status.SUCCESS
Пример #9
0
def SetupFTPServer(node,
                   workload,
                   promiscous=False,
                   bind_fail=False,
                   listen_port=FTP_STANDARD_PORT,
                   restart=True):
    ftpdata = dir_path + '/' + "ftp_server.txt"
    api.Logger.info("fullpath %s" % (ftpdata))
    resp = api.CopyToWorkload(node, workload, [ftpdata], 'ftpdir')
    if resp is None:
        return None

    if promiscous == True:
        if bind_fail == True:
            vsftpd_file = "lftp_vsftpd_bind_fail.conf"
            vsftpd_conf = dir_path + '/' + "lftp_vsftpd_bind_fail.conf"
        else:
            vsftpd_file = "lftp_vsftpd.conf"
            vsftpd_conf = dir_path + '/' + "lftp_vsftpd.conf"
    else:
        vsftpd_file = "vsftpd.conf"
        vsftpd_conf = dir_path + '/' + "vsftpd.conf"
    resp = api.CopyToWorkload(node, workload, [vsftpd_conf], 'ftpdir')
    if resp is None:
        return None

    req = api.Trigger_CreateExecuteCommandsRequest(serial=True)

    api.Trigger_AddCommand(
        req, node, workload,
        "cp ftpdir/%s /etc/vsftpd/vsftpd.conf" % (vsftpd_file))
    if listen_port != FTP_STANDARD_PORT:
        api.Trigger_AddCommand(
            req, node, workload,
            "echo \"listen_port=%d\" | tee -a /etc/vsftpd/vsftpd.conf" %
            (listen_port))
    api.Trigger_AddCommand(req, node, workload,
                           "useradd -m -c \"admin\" -s /bin/bash admin")
    api.Trigger_AddCommand(req, node, workload,
                           "useradd -m -c \"admin1\" -s /bin/bash admin1")
    api.Trigger_AddCommand(req, node, workload,
                           "echo \"admin\" | tee -a /etc/vsftpd.userlist")
    api.Trigger_AddCommand(req, node, workload,
                           "echo \"admin1\" | tee -a /etc/vsftpd.userlist")
    api.Trigger_AddCommand(req, node, workload,
                           "echo \"linuxpassword\" | passwd --stdin admin")
    api.Trigger_AddCommand(req, node, workload,
                           "echo \"docker\" | passwd --stdin admin1")
    api.Trigger_AddCommand(req, node, workload,
                           "echo \"docker\" | passwd --stdin admin1")
    api.Trigger_AddCommand(
        req, node, workload,
        "mkdir /home/admin/ftp && mv ftpdir/ftp_server.txt /home/admin/ftp")
    api.Trigger_AddCommand(
        req, node, workload,
        "touch /home/admin/ftp/ftp_client.txt && chmod 666 /home/admin/ftp/ftp_client.txt"
    )
    if restart == True:
        api.Trigger_AddCommand(req, node, workload, "systemctl stop vsftpd")

    api.Trigger_AddCommand(req, node, workload, "systemctl start vsftpd")
    api.Trigger_AddCommand(req, node, workload, "systemctl enable vsftpd")
    trig_resp = api.Trigger(req)
    term_resp = api.Trigger_TerminateAllCommands(trig_resp)
    for cmd in trig_resp.commands:
        api.PrintCommandResults(cmd)
    return api.types.status.SUCCESS
Пример #10
0
def __runTraffic(intf):

    api.Logger.info("Run traffic: %s" % intf)

    client = None
    server = None
    clientCmd = None
    serverCmd = None
    clientReq = None
    serverReq = None

    for pairs in api.GetRemoteWorkloadPairs():
        client = pairs[0]
        api.Logger.error("Comparing client interface %s with %s" %
                         (client.interface, intf))
        if client.interface == intf:
            server = pairs[1]
            break

    if server is None:
        api.Logger.error("No workload found for interface %s" % intf)
        return api.types.status.FAILURE

    cmd_descr = "Server: %s(%s) <--> Client: %s(%s)" %\
                   (server.workload_name, server.ip_address,\
                    client.workload_name, client.ip_address)
    api.Logger.info("Starting Iperf test from %s" % cmd_descr)

    port = api.AllocateTcpPort()
    serverCmd = iperf.ServerCmd(port)
    clientCmd = iperf.ClientCmd(server.ip_address, port)

    serverReq = api.Trigger_CreateExecuteCommandsRequest(serial=False)
    api.Trigger_AddCommand(serverReq,
                           server.node_name,
                           server.workload_name,
                           serverCmd,
                           background=True)

    clientReq = api.Trigger_CreateExecuteCommandsRequest(serial=False)
    api.Trigger_AddCommand(clientReq, client.node_name, client.workload_name,
                           clientCmd)

    # Server runs in the background
    server_resp = api.Trigger(serverReq)

    # Sleep for some time as bg may not have been started.
    time.sleep(5)
    client_resp = api.Trigger(clientReq)

    # Stop the backgrounded server
    term_resp = api.Trigger_TerminateAllCommands(server_resp)

    # We don't bother checking the iperf results, we just wanted traffic
    # so just check that the commands succeeded
    ret = api.types.status.SUCCESS
    for cmd in server_resp.commands:
        if cmd.exit_code != 0:
            ret = api.types.status.FAILURE
    for cmd in client_resp.commands:
        if cmd.exit_code != 0:
            ret = api.types.status.FAILURE
    for cmd in term_resp.commands:
        if cmd.exit_code != 0:
            ret = api.types.status.FAILURE

    return ret
Пример #11
0
def Trigger(tc):
    pairs = api.GetLocalWorkloadPairs()
    server = pairs[0][0]
    client = pairs[0][1]
    tc.cmd_cookies = []

    naples = server
    if not server.IsNaples():
       naples = client
       if not client.IsNaples():
          return api.types.status.SUCCESS

    req = api.Trigger_CreateExecuteCommandsRequest(serial = True)
    tc.cmd_descr = "Server: %s(%s) <--> Client: %s(%s)" %\
                   (server.workload_name, server.ip_address, client.workload_name, client.ip_address)
    api.Logger.info("Starting SUNRPC test from %s" % (tc.cmd_descr))

    SetupNFSServer(server, client)

    api.Trigger_AddCommand(req, server.node_name, server.workload_name,
                           "sh -c 'ls -al /home/sunrpcmntdir | grep sunrpc_file.txt'")
    tc.cmd_cookies.append("Before rpc")

    api.Trigger_AddCommand(req, client.node_name, client.workload_name,
                           "sudo sh -c 'mkdir -p /home/sunrpcdir && mount %s:/home/sunrpcmntdir /home/sunrpcdir' "%(server.ip_address))
    tc.cmd_cookies.append("Create mount point")

    api.Trigger_AddCommand(req, client.node_name, client.workload_name,
                           "sudo chmod 777 /home/sunrpcdir")
    tc.cmd_cookies.append("add permission")
    
    api.Trigger_AddCommand(req, client.node_name, client.workload_name,
                           "mv sunrpcdir/sunrpc_file.txt /home/sunrpcdir/")
    tc.cmd_cookies.append("Create file")

    api.Trigger_AddCommand(req, client.node_name, client.workload_name,
                           "ls -al /home/sunrpcdir")
    tc.cmd_cookies.append("verify file")

    api.Trigger_AddCommand(req, server.node_name, server.workload_name,
                           "ls -al /home/sunrpcmntdir/")
    tc.cmd_cookies.append("After rpc")

    api.Trigger_AddCommand(req, server.node_name, server.workload_name,
                           "sh -c 'cat /home/sunrpcmntdir/sunrpc_file.txt'")
    tc.cmd_cookies.append("After rpc")

    # Add Naples command validation
    api.Trigger_AddNaplesCommand(req, naples.node_name, 
                           "/nic/bin/halctl show session --alg sun_rpc")
    tc.cmd_cookies.append("show session")

    api.Trigger_AddNaplesCommand(req, naples.node_name,
                           "/nic/bin/halctl show nwsec flow-gate")
    tc.cmd_cookies.append("show security flow-gate")

    # Get the timeout from the config
    api.Trigger_AddNaplesCommand(req, naples.node_name, "sleep 120", timeout=120)
    tc.cmd_cookies.append("sleep")

    api.Trigger_AddNaplesCommand(req, naples.node_name,
                           "/nic/bin/halctl show nwsec flow-gate | grep SUN_RPC")
    tc.cmd_cookies.append("After flow-gate ageout")

    trig_resp = api.Trigger(req)
    term_resp = api.Trigger_TerminateAllCommands(trig_resp)

    tc.resp = api.Trigger_AggregateCommandsResponse(trig_resp, term_resp)

    CleanupNFSServer(server, client)
    return api.types.status.SUCCESS
Пример #12
0
def Trigger(tc):
    if tc.skip == True:
        return api.types.status.FAILURE

    #
    # Set-up Test Environment
    #
    tc.cmd_cookies = []
    cmd_cookie = "%s(%s) --> %s(%s)" %\
                 (tc.server.workload_name, tc.server.ip_address,
                  tc.client.workload_name, tc.client.ip_address)
    api.Logger.info("Starting Single-IPv4-ICMP-Flow-Drops test from %s" %\
                   (cmd_cookie))

    #
    # Start TCPDUMP in background on Server/Client
    #
    req1 = api.Trigger_CreateExecuteCommandsRequest(serial = True)
    #cmd_cookie = "start tcpdump on Server"
    #cmd = "sudo tcpdump -nnSXi {} > out.txt".format(tc.server.interface)
    #add_command(tc, req1, cmd_cookie, cmd, tc.server, True)

    #cmd_cookie = "start tcpdump on Client"
    #cmd = "sudo tcpdump -nnSXi {} > out.txt".format(tc.client.interface)
    #add_command(tc, req1, cmd_cookie, cmd, tc.client, True)

    #
    # Start with a clean slate by clearing all sessions/flows
    #
    cmd_cookie = "clear session"
    cmd = "/nic/bin/halctl clear session"
    add_naples_command(tc, req1, cmd_cookie, cmd, tc.naples)

    #
    # Make sure that Client<=>Server Forwarding is set up
    #
    cmd_cookie = "trigger ping: Create case"
    cmd = "ping -c1 %s -I %s" %\
          (tc.server.ip_address, tc.client.interface)
    add_command(tc, req1, cmd_cookie, cmd, tc.client, False)

    #
    # Send Good-Data from Client with ICMP-type=4,6 and Non-zero TTL
    #
    cmd_cookie = "send good data from Client for ICMP-Type=4: Create case"
    cmd = "hping3 --icmp --icmptype 4 --force-icmp --count 1 {}"\
    .format(tc.server.ip_address)
    add_command(tc, req1, cmd_cookie, cmd, tc.client, False)

    cmd_cookie = "send good data from Client for ICMP-Type=6: Create case"
    cmd = "hping3 --icmp --icmptype 6 --force-icmp --count 1 {}"\
    .format(tc.server.ip_address)
    add_command(tc, req1, cmd_cookie, cmd, tc.client, False)

    cmd_cookie = "send good data from Server for ICMP-Type=4: Create case"
    cmd = "hping3 --icmp --icmptype 4 --force-icmp --count 1 {}"\
    .format(tc.client.ip_address)
    add_command(tc, req1, cmd_cookie, cmd, tc.server, False)

    cmd_cookie = "send good data from Server for ICMP-Type=6: Create case"
    cmd = "hping3 --icmp --icmptype 6 --force-icmp --count 1 {}"\
    .format(tc.client.ip_address)
    add_command(tc, req1, cmd_cookie, cmd, tc.server, False)

    #
    # Send Bad-Data (TTL=0) from Client with ICMP-type=4,6
    #
    cmd_cookie = "send bad data from Client for ICMP-Type=4 TTL=0: Create case"
    cmd = "hping3 --icmp --icmptype 4 --force-icmp --ttl 0 --count 1 {}"\
    .format(tc.server.ip_address)
    add_command(tc, req1, cmd_cookie, cmd, tc.client, False)

    cmd_cookie = "send bad data from Client for ICMP-Type=6 TTL=0: Create case"
    cmd = "hping3 --icmp --icmptype 6 --force-icmp --ttl 0 --count 1 {}"\
    .format(tc.server.ip_address)
    add_command(tc, req1, cmd_cookie, cmd, tc.client, False)

    cmd_cookie = "send bad data from Server for ICMP-Type=4 TTL=0: Create case"
    cmd = "hping3 --icmp --icmptype 4 --force-icmp --ttl 0 --count 1 {}"\
    .format(tc.client.ip_address)
    add_command(tc, req1, cmd_cookie, cmd, tc.server, False)

    cmd_cookie = "send bad data from Server for ICMP-Type=6 TTL=0: Create case"
    cmd = "hping3 --icmp --icmptype 6 --force-icmp --ttl 0 --count 1 {}"\
    .format(tc.client.ip_address)
    add_command(tc, req1, cmd_cookie, cmd, tc.server, False)

    #
    # Do "show session" command
    #
    cmd_cookie = "show session"
    cmd = "/nic/bin/halctl show session"
    add_naples_command(tc, req1, cmd_cookie, cmd, tc.naples)

    #
    # Trigger "metrics get IPv4FlowDropMetrics" output
    #
    cmd_cookie = "show flow-drop: Create case"
    cmd = "PATH=$PATH:/platform/bin/;\
           LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/platform/lib/:/nic/lib/;\
           export PATH; export LD_LIBRARY_PATH;\
           /nic/bin/delphictl metrics get IPv4FlowDropMetrics"
    add_naples_command(tc, req1, cmd_cookie, cmd, tc.naples)

    tc.resp1 = api.Trigger(req1)
    for command in tc.resp1.commands:
        api.PrintCommandResults(command)

    #
    # Clearing all sessions/flows
    #
    req2 = api.Trigger_CreateExecuteCommandsRequest(serial = True)
    cmd_cookie = "clear session"
    cmd = "/nic/bin/halctl clear session"
    add_naples_command(tc, req2, cmd_cookie, cmd, tc.naples)

    #
    # Make sure that Client<=>Server Forwarding is set up
    #
    cmd_cookie = "trigger ping: Re-use case"
    cmd = "ping -c1 %s -I %s" %\
          (tc.server.ip_address, tc.client.interface)
    add_command(tc, req2, cmd_cookie, cmd, tc.client, False)

    #
    # Re-send Good-Data from Client with ICMP-type=4,6 and Non-zero TTL
    #
    cmd_cookie = "re-send good data from Client for ICMP-Type=4: Re-use case"
    cmd = "hping3 --icmp --icmptype 4 --force-icmp --count 1 {}"\
    .format(tc.server.ip_address)
    add_command(tc, req2, cmd_cookie, cmd, tc.client, False)

    cmd_cookie = "re-send good data from Client for ICMP-Type=6: Re-use case"
    cmd = "hping3 --icmp --icmptype 6 --force-icmp --count 1 {}"\
    .format(tc.server.ip_address)
    add_command(tc, req2, cmd_cookie, cmd, tc.client, False)

    cmd_cookie = "re-send good data from Server for ICMP-Type=4: Re-use case"
    cmd = "hping3 --icmp --icmptype 4 --force-icmp --count 1 {}"\
    .format(tc.client.ip_address)
    add_command(tc, req2, cmd_cookie, cmd, tc.server, False)

    cmd_cookie = "re-send good data from Server for ICMP-Type=6: Re-use case"
    cmd = "hping3 --icmp --icmptype 6 --force-icmp --count 1 {}"\
    .format(tc.client.ip_address)
    add_command(tc, req2, cmd_cookie, cmd, tc.server, False)

    #
    # Re-send Bad-Data (TTL=0) from Client with ICMP-type=4,6
    #
    cmd_cookie = "re-send bad data from Client for ICMP-Type=4 TTL=0: Re-use case"
    cmd = "hping3 --icmp --icmptype 4 --force-icmp --ttl 0 --count 1 {}"\
    .format(tc.server.ip_address)
    add_command(tc, req2, cmd_cookie, cmd, tc.client, False)

    cmd_cookie = "re-send bad data from Client for ICMP-Type=6 TTL=0: Re-use case"
    cmd = "hping3 --icmp --icmptype 6 --force-icmp --ttl 0 --count 1 {}"\
    .format(tc.server.ip_address)
    add_command(tc, req2, cmd_cookie, cmd, tc.client, False)

    cmd_cookie = "re-send bad data from Server for ICMP-Type=4 TTL=0: Re-use case"
    cmd = "hping3 --icmp --icmptype 4 --force-icmp --ttl 0 --count 1 {}"\
    .format(tc.client.ip_address)
    add_command(tc, req2, cmd_cookie, cmd, tc.server, False)

    cmd_cookie = "re-send bad data from Server for ICMP-Type=6 TTL=0: Re-use case"
    cmd = "hping3 --icmp --icmptype 6 --force-icmp --ttl 0 --count 1 {}"\
    .format(tc.client.ip_address)
    add_command(tc, req2, cmd_cookie, cmd, tc.server, False)

    #
    # Do "show session" command
    #
    cmd_cookie = "show session"
    cmd = "/nic/bin/halctl show session"
    add_naples_command(tc, req2, cmd_cookie, cmd, tc.naples)

    #
    # Trigger "metrics get IPv4FlowDropMetrics" output
    #
    cmd_cookie = "show flow-drop: Re-use case"
    cmd = "PATH=$PATH:/platform/bin/;\
           LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/platform/lib/:/nic/lib/;\
           export PATH; export LD_LIBRARY_PATH;\
           /nic/bin/delphictl metrics get IPv4FlowDropMetrics"
    add_naples_command(tc, req2, cmd_cookie, cmd, tc.naples)

    tc.resp2 = api.Trigger(req2)
    for command in tc.resp2.commands:
        api.PrintCommandResults(command)

    #
    # Clearing all sessions/flows and Sleep for 45secs
    #
    req3 = api.Trigger_CreateExecuteCommandsRequest(serial = True)
    cmd_cookie = "clear session"
    cmd = "/nic/bin/halctl clear session; sleep 45"
    add_naples_command(tc, req3, cmd_cookie, cmd, tc.naples)

    #
    # Do "show session" command
    #
    cmd_cookie = "show session"
    cmd = "/nic/bin/halctl show session"
    add_naples_command(tc, req3, cmd_cookie, cmd, tc.naples)

    #
    # Trigger "metrics get IPv4FlowDropMetrics" output
    #
    cmd_cookie = "show flow-drop: Delete case"
    cmd = "PATH=$PATH:/platform/bin/;\
           LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/platform/lib/:/nic/lib/;\
           export PATH; export LD_LIBRARY_PATH;\
           /nic/bin/delphictl metrics get IPv4FlowDropMetrics"
    add_naples_command(tc, req3, cmd_cookie, cmd, tc.naples)

    tc.resp3 = api.Trigger(req3)
    for command in tc.resp3.commands:
        api.PrintCommandResults(command)

    return api.types.status.SUCCESS
Пример #13
0
def Setup(tc):
    result = api.types.status.SUCCESS
    tc.workload_pairs = []
    tc.skip = False
    tc.sleep = getattr(tc.args, "sleep", 200)
    tc.allowed_down_time = getattr(tc.args, "allowed_down_time", 0)
    tc.pkg_name = getattr(tc.args, "naples_upgr_pkg", "naples_fw.tar")
    tc.node_selection = tc.iterators.selection
    tc.iperf = getattr(tc.args, "iperf", False)
    tc.trex = getattr(tc.args, "trex", False)
    tc.background_ping = getattr(tc.args, "background_ping", True)
    tc.failure_stage = getattr(tc.args, "failure_stage", None)
    tc.failure_reason = getattr(tc.args, "failure_reason", None)

    if tc.node_selection not in ["any", "all"]:
        api.Logger.error(
            "Incorrect Node selection option {} specified. Use 'any' or 'all'".
            format(tc.node_selection))
        tc.skip = True
        return api.types.status.FAILURE

    tc.nodes = api.GetNaplesHostnames()
    if tc.node_selection == "any":
        tc.nodes = [random.choice(tc.nodes)]

    if len(tc.nodes) == 0:
        api.Logger.error("No naples nodes found")
        result = api.types.status.FAILURE
    api.Logger.info("Running ISSU upgrade test on %s" % (", ".join(tc.nodes)))

    req = api.Trigger_CreateExecuteCommandsRequest()
    for node in tc.nodes:
        api.Trigger_AddNaplesCommand(
            req, node, "rm -rf /data/techsupport/DSC_TechSupport_*")
        api.Trigger_AddNaplesCommand(req, node,
                                     "rm -rf /update/pds_upg_status.txt")
        api.Trigger_AddNaplesCommand(
            req, node, "touch /data/upgrade_to_same_firmware_allowed && sync")
        api.Trigger_AddNaplesCommand(req, node,
                                     "touch /data/no_watchdog && sync")
        api.Trigger_AddNaplesCommand(req, node, "/nic/tools/fwupdate -r")
    resp = api.Trigger(req)

    for cmd_resp in resp.commands:
        api.PrintCommandResults(cmd_resp)
        if cmd_resp.exit_code != 0:
            api.Logger.error("Setup failed %s", cmd_resp.command)
            tc.skip = True
            return api.types.status.FAILURE

    if upgrade_utils.ResetUpgLog(tc.nodes) != api.types.status.SUCCESS:
        api.Logger.error("Failed in Reseting Upgrade Log files.")
        return api.types.status.FAILURE

    # verify mgmt connectivity
    result = traffic.VerifyMgmtConnectivity(tc.nodes)
    if result != api.types.status.SUCCESS:
        api.Logger.error("Failed in Mgmt Connectivity Check during Setup.")
        tc.skip = True
        return result

    if tc.failure_stage != None:
        result = upgrade_utils.HitlessNegativeSetup(tc)
        if result != api.types.status.SUCCESS:
            return result

    # choose workloads for connectivity/traffic test
    result = ChooseWorkLoads(tc)
    if result != api.types.status.SUCCESS or tc.skip:
        api.Logger.error("Failed to Choose Workloads.")
        return result

    # verify endpoint connectivity
    if VerifyConnectivity(tc) != api.types.status.SUCCESS:
        api.Logger.error("Failed in Connectivity Check during Setup.")
        if not SKIP_CONNECTIVITY_FAILURE:
            result = api.types.status.FAILURE
            tc.skip = True
            return result

    # setup packet test based on upgrade_mode
    result = PacketTestSetup(tc)
    if result != api.types.status.SUCCESS or tc.skip:
        api.Logger.error("Failed in Packet Test setup.")
        return result

    # Update security profile
    if utils.UpdateSecurityProfileTimeouts(tc) != api.types.status.SUCCESS:
        api.Logger.error("Failed to update the security profile")
        tc.skip = True
        result = api.types.status.FAILURE

    api.Logger.info(f"Upgrade: Setup returned {result}")
    return result
Пример #14
0
def ConfigWorkloadSecondaryIp(workload, is_add, sec_ip_count_per_intf=1):
    res = api.types.status.SUCCESS
    wl_sec_ip_list = []
    if (workload.uplink_vlan != 0):
        return wl_sec_ip_list

    nodes = api.GetWorkloadNodeHostnames()
    max_untag_wl = 0
    max_tag_wl = 0
    if is_add == True:
        op = "add"
    else:
        op = "del"

    is_wl_bm_type = False
    for node in nodes:
        if api.IsBareMetalWorkloadType(node):
            is_wl_bm_type = True
        workloads = api.GetWorkloads(node)
        num_untag_wl_in_node = 0
        num_tag_wl_in_node = 0
        for wl in workloads:
            if (wl.uplink_vlan == 0):
                num_untag_wl_in_node += 1
            else:
                num_tag_wl_in_node += 1
        if num_untag_wl_in_node > max_untag_wl:
            max_untag_wl = num_untag_wl_in_node
        if num_tag_wl_in_node > max_tag_wl:
            max_tag_wl = num_tag_wl_in_node
        #api.Logger.info("Node {} WL #untag {} #tag {} ".format(node, num_untag_wl_in_node, num_tag_wl_in_node))

    #api.Logger.info("Topo Max untag WL {} Max tag WL {} ".format(max_untag_wl, max_tag_wl))
    sec_ip_incr_step = max_untag_wl
    if is_wl_bm_type == False:
        sec_ip_incr_step += max_tag_wl

    req = api.Trigger_CreateExecuteCommandsRequest(serial=True)

    wl = workload
    sec_ipv4_allocator = resmgr.IpAddressStep(
        wl.ip_prefix.split('/')[0],
        str(ipaddress.IPv4Address(sec_ip_incr_step)), sec_ip_count_per_intf)
    sec_ip4_addr_str = str(sec_ipv4_allocator.Alloc())
    for i in range(sec_ip_count_per_intf):
        sec_ip4_addr_str = str(sec_ipv4_allocator.Alloc())
        sec_ip_prefix = sec_ip4_addr_str + "/" + str(
            wl.ip_prefix.split('/')[1])
        wl_sec_ip_list.append(sec_ip4_addr_str)
        #api.Logger.info("Node {} WL {} Intf {} Secondary IP {} Op {}".format(
        #                 wl.node_name, wl.workload_name, wl.interface, sec_ip_prefix, op))
        api.Trigger_AddCommand(
            req, wl.node_name, wl.workload_name,
            "ip address %s %s dev %s " % (op, sec_ip_prefix, wl.interface))
    trig_resp = api.Trigger(req)

    #api.Logger.info("Response ")
    #for cmd in trig_resp.commands:
    #    api.PrintCommandResults(cmd)

    return wl_sec_ip_list
Пример #15
0
def Trigger(tc):
    if tc.ignore == True:
        return api.types.status.SUCCESS

    if tc.error == True:
        return api.types.status.FAILURE

    protoDir1 = api.GetTopologyDirectory() +\
                "/gen/telemetry/{}/{}".format('mirror', tc.iterators.proto)
    api.Logger.info("Template Config files location: ", protoDir1)
    protoDir2 = api.GetTopologyDirectory() +\
                "/gen/telemetry/{}/{}".format('flowmon', tc.iterators.proto)
    api.Logger.info("Template Config files location: ", protoDir2)

    result = api.types.status.SUCCESS

    count = 0
    MirrorPolicies = utils.GetTargetJsons('mirror', tc.iterators.proto)
    FlowMonPolicies = utils.GetTargetJsons('flowmon', tc.iterators.proto)
    #mirror_policy_idx = 0
    flowmon_policy_idx = 0
    ret_count = 0
    for mirror_json in MirrorPolicies:
        #
        # Get template-Mirror Config
        #
        newMirrorObjects = agent_api.AddOneConfig(mirror_json)
        if len (newMirrorObjects) == 0:
            api.Logger.error("Adding new Mirror objects to store failed")
            tc.error = True
            return api.types.status.FAILURE
        agent_api.RemoveConfigObjects(newMirrorObjects)

        #
        # Ignore Multi-collector template config's, since Expanded-Telemetry
        # testbundle dynamically creates such config's
        #
        if len(newMirrorObjects[0].spec.collectors) > 1:
            continue

        idx = 0
        for flowmon_json in FlowMonPolicies:
            if idx < flowmon_policy_idx:
                idx += 1
                continue

            #
            # Get template-FlowMon Config
            #
            newFlowMonObjects = agent_api.AddOneConfig(flowmon_json)
            if len (newFlowMonObjects) == 0:
                api.Logger.error("Adding new FlowMon objects to store failed")
                tc.error = True
                return api.types.status.FAILURE
            agent_api.RemoveConfigObjects(newFlowMonObjects)

            #
            # Ignore Multi-collector template config's, since Expanded-Telemetry
            # testbundle dynamically creates such config's
            #
            if len(newFlowMonObjects[0].spec.exports) > 1:
                flowmon_policy_idx += 1
                idx += 1
                continue

            #
            # Modify template-Mirror / template-FlowMon Config to make sure that
            # Naples-node # act as either source or destination
            #
            # Set up Collector in the remote node
            #
            eutils.generateMirrorConfig(tc, mirror_json, newMirrorObjects)
            eutils.generateFlowMonConfig(tc, flowmon_json, newFlowMonObjects)

            ret_count = 0
            for i in range(0, len(tc.mirror_verif)):
                #
                # If Execution-Optimization is enabled, no need to run the test
                # for the same protocol more than once
                #
                if i > 0 and tc.mirror_verif[i]['protocol'] ==\
                             tc.mirror_verif[i-1]['protocol']:
                    continue

                #
                # Flow-ERSPAN/FlowMon for TCP-traffic is not tested (yet) in
                # Classic-mode until applicable pkt-trigger tools are identified
                #
                if tc.classic_mode == True and\
                   tc.mirror_verif[i]['protocol'] == 'tcp':
                    continue

                #
                # Push Mirror / FlowMon Config to Naples
                #
                ret = agent_api.PushConfigObjects(newMirrorObjects,
                                [tc.naples.node_name], [tc.naples_device_name])
                if ret != api.types.status.SUCCESS:
                    api.Logger.error("Unable to push mirror objects")
                    tc.error = True
                    return api.types.status.FAILURE

                ret = agent_api.PushConfigObjects(newFlowMonObjects,
                                [tc.naples.node_name], [tc.naples_device_name])
                if ret != api.types.status.SUCCESS:
                    agent_api.DeleteConfigObjects(newMirrorObjects, 
                              [tc.naples.node_name], [tc.naples_device_name])
                    api.Logger.error("Unable to push flowmon objects")
                    tc.error = True
                    return api.types.status.FAILURE

                #
                # Establish Forwarding set up between Naples-peer and Collectors
                #
                eutils.establishForwardingSetup(tc)

                #
                # Give a little time for flows clean-up to happen so that
                # stale IPFIX records don't show up
                #
                if tc.classic_mode == True:
                    time.sleep(1)

                req_tcpdump_erspan = api.Trigger_CreateExecuteCommandsRequest(\
                                     serial = True)
                req_tcpdump_flowmon = api.Trigger_CreateExecuteCommandsRequest(\
                                      serial = True)
                for c in range(0, len(tc.flow_collector)):
                    #
                    # Set up TCPDUMP's on the collector
                    #
                    idx = tc.flow_collector_idx[c]
                    if tc.flow_collector[c].IsNaples():
                        cmd = "tcpdump -c 600 -XX -vv -nni {} ip proto gre\
                        and dst {} --immediate-mode -U -w flow-mirror-{}.pcap"\
                        .format(tc.flow_collector[c].interface, 
                                tc.collector_ip_address[idx], c)
                    else:
                        cmd = "tcpdump -p -c 600 -XX -vv -nni {} ip proto gre\
                        and dst {} --immediate-mode -U -w flow-mirror-{}.pcap"\
                        .format(tc.flow_collector[c].interface, 
                                tc.collector_ip_address[idx], c)
                    eutils.add_command(req_tcpdump_erspan,
                                       tc.flow_collector[c], cmd, True)

                for c in range(0, len(tc.flowmon_collector)):
                    idx = tc.flowmon_collector_idx[c]
                    if tc.flowmon_collector[c].IsNaples():
                        cmd = "tcpdump -c 600 -XX -vv -nni {} udp and\
                               dst port {} and dst {} --immediate-mode\
                               -U -w flowmon-{}.pcap"\
                        .format(tc.flowmon_collector[c].interface,
                        tc.export_port[c], tc.collector_ip_address[idx], c)
                    else:
                        cmd = "tcpdump -p -c 600 -XX -vv -nni {} udp and\
                               dst port {} and dst {} --immediate-mode\
                               -U -w flowmon-{}.pcap"\
                        .format(tc.flowmon_collector[c].interface,
                        tc.export_port[c], tc.collector_ip_address[idx], c)
                    eutils.add_command(req_tcpdump_flowmon,
                                       tc.flowmon_collector[c], cmd, True)

                resp_tcpdump_erspan = api.Trigger(req_tcpdump_erspan)
                for cmd in resp_tcpdump_erspan.commands:
                    api.PrintCommandResults(cmd)

                #
                # Classic mode requires a delay to make sure that TCPDUMP 
                # background process is fully up
                #
                if tc.classic_mode == True:
                    time.sleep(2)

                resp_tcpdump_flowmon = api.Trigger(req_tcpdump_flowmon)
                for cmd in resp_tcpdump_flowmon.commands:
                    api.PrintCommandResults(cmd)

                #
                # Classic mode requires a delay to make sure that TCPDUMP 
                # background process is fully up
                #
                if tc.classic_mode == True:
                    time.sleep(2)

                #
                # Trigger packets for ERSPAN / FLOWMON to take effect
                #
                tc.protocol = tc.mirror_verif[i]['protocol']
                tc.dest_port = utils.GetDestPort(tc.mirror_verif[i]['port'])
                if api.GetNodeOs(tc.naples.node_name) == 'linux':
                    eutils.triggerTrafficInClassicModeLinux(tc)
                else:
                    eutils.triggerTrafficInHostPinModeOrFreeBSD(tc)

                #
                # Dump sessions/flows/P4-tables for debug purposes
                #
                eutils.showSessionAndP4TablesForDebug(tc, tc.flow_collector,
                                                      tc.flow_collector_idx)

                #
                # Terminate TCPDUMP background process
                #
                term_resp_tcpdump_erspan = api.Trigger_TerminateAllCommands(\
                                           resp_tcpdump_erspan)
                tc.resp_tcpdump_erspan = api.Trigger_AggregateCommandsResponse(\
                                  resp_tcpdump_erspan, term_resp_tcpdump_erspan)

                term_resp_tcpdump_flowmon = api.Trigger_TerminateAllCommands(\
                                            resp_tcpdump_flowmon)
                tc.resp_tcpdump_flowmon =api.Trigger_AggregateCommandsResponse(\
                                resp_tcpdump_flowmon, term_resp_tcpdump_flowmon)

                # Delete the objects
                agent_api.DeleteConfigObjects(newMirrorObjects, 
                          [tc.naples.node_name], [tc.naples_device_name])
                agent_api.DeleteConfigObjects(newFlowMonObjects, 
                          [tc.naples.node_name], [tc.naples_device_name])

                #
                # Make sure that Mirror/Flowmon-config has been removed
                #
                tc.resp_cleanup = eutils.showP4TablesForValidation(tc)

                #
                # Validate ERSPAN packets reception
                #
                tc.tcp_erspan_pkts_expected = \
                                   NUMBER_OF_TCP_ERSPAN_PACKETS_PER_SESSION
                tc.udp_erspan_pkts_expected = (tc.udp_count << 1)
                tc.icmp_erspan_pkts_expected = (tc.icmp_count << 1)

                protocol = tc.protocol
                if tc.protocol == 'udp' and tc.iterators.proto == 'mixed':
                    tc.protocol = 'udp-mixed'
                    tc.icmp_erspan_pkts_expected = tc.udp_erspan_pkts_expected

                tc.feature = 'flow-erspan'
                res_1 = eutils.validateErspanPackets(tc, tc.flow_collector,
                                                     tc.flow_collector_idx)

                #
                # Validate IPFIX packets reception
                #
                tc.protocol = protocol
                tc.feature = 'flowmon'
                res_2 = eutils.validateIpFixPackets(tc)

                #
                # Validate Config-cleanup
                #
                res_3 = eutils.validateConfigCleanup(tc)

                if res_1 == api.types.status.FAILURE or\
                   res_2 == api.types.status.FAILURE or\
                   res_3 == api.types.status.FAILURE:
                    result = api.types.status.FAILURE

                if result == api.types.status.FAILURE:
                    break

                ret_count += 1

            flowmon_policy_idx += 1
            break

        if result == api.types.status.FAILURE:
            break

        count += ret_count

    tc.SetTestCount(count)
    return result
Пример #16
0
def Trigger(tc):

    if tc.skip: return api.types.status.SUCCESS
    result = api.types.status.SUCCESS

    req = api.Trigger_CreateExecuteCommandsRequest(serial=True)

    if tc.args.mode == "non-promiscuous":
        # Run tcdpump in non-promiscuous mode
        tcpdump_flags_extra = " -p "
    else:
        tcpdump_flags_extra = ""

    # Run tcpdump on all interfaces
    for intf in tc.all_intfs:
        if api.GetNodeOs(
                tc.naples_node) == "windows" and intf in tc.host_intfs:
            intfGuid = ionic_utils.winIntfGuid(tc.naples_node, intf)
            intfVal = str(ionic_utils.winTcpDumpIdx(tc.naples_node, intfGuid))
            cmd = "/mnt/c/Windows/System32/tcpdump.exe"
        else:
            intfVal = intf
            cmd = "tcpdump"

        cmd += " -l -i " + intfVal + tcpdump_flags_extra + " -tne ether host " + tc.random_mac
        __PR_AddCommand(intf, tc, req, cmd, True)

    cmd = "sleep 1; ping -c 5 " + tc.target_IP + ";sleep 1"
    api.Trigger_AddHostCommand(req, tc.peer_node, cmd)
    trig_resp = api.Trigger(req)

    # Verify packet filter flags of each interface in halctl
    show_lif_resp, ret = hal_show_utils.GetHALShowOutput(tc.naples_node, "lif")
    if not ret:
        api.Logger.error("Something went wrong with GetHALShowOutput")
        result = api.types.status.FAILURE

    lif_obj_docs = yaml.load_all(show_lif_resp.commands[0].stdout)

    for lif_obj in lif_obj_docs:

        if lif_obj == None:
            break

        # See if the lif belongs to any of the interface in tc.all_intfs (inteface lif)
        intf_lif = False
        for intf in tc.all_intfs:
            if api.GetNodeOs(
                    tc.naples_node) == "windows" and intf in tc.host_intfs:
                halIntfName = ionic_utils.winHalIntfName(tc.naples_node, intf)
            else:
                halIntfName = intf
            if lif_obj['spec']['name'].startswith(halIntfName):
                intf_lif = True
                break

        lif_pr_flag = lif_obj['spec']['packetfilter']['receivepromiscuous']

        # A lif must have its PR flag when it is an interface lif and tc.args.mode is 'promiscuous'
        if tc.args.mode == "promiscuous":
            if intf_lif and lif_pr_flag != True:
                api.Logger.error(
                    "halctl PR flag not set for promiscuous mode interface [%s]"
                    % (lif_obj['spec']['name']))
                result = api.types.status.FAILURE
        else:
            if lif_pr_flag == True:
                api.Logger.error(
                    "halctl PR flag set for non-promiscuous mode LIF [%s]" %
                    (lif_obj['spec']['name']))
                result = api.types.status.FAILURE

    term_resp = api.Trigger_TerminateAllCommands(trig_resp)
    resp = api.Trigger_AggregateCommandsResponse(trig_resp, term_resp)

    # Search tcpdump stdout for packets with dst MAC matching tc.random_mac
    pattern = "> " + tc.random_mac

    cmds = resp.commands[:-1]
    for intf, cmd in zip(tc.all_intfs, cmds):
        found = cmd.stdout.find(pattern)
        if found > 0 and not tc.expect_pkt[intf]:
            api.Logger.error(
                "Interface [%s] received Unknown unicast packet while not expecting"
                % (intf))
            result = api.types.status.FAILURE
        elif found == -1 and tc.expect_pkt[intf]:
            api.Logger.error(
                "Interface [%s] did not receive expected Unknown unicast packet"
                % (intf))
            result = api.types.status.FAILURE

    # Incase of testcase failure, dump the entire command output for further debug
    if result == api.types.status.FAILURE:
        api.Logger.error(
            " ============================= COMMAND DUMP ================================================"
        )
        for cmd in resp.commands:
            api.PrintCommandResults(cmd)
        api.Logger.error(
            " =============================  END  ========================================================"
        )

    return result
Пример #17
0
def Trigger(tc):
    if tc.args.type == 'local_only':
        pairs = api.GetLocalWorkloadPairs()
    else:
        pairs = api.GetRemoteWorkloadPairs()
    tc.cmd_cookies = []
    server, client = pairs[0]
    naples = server
    if not server.IsNaples():
        naples = client
        if not client.IsNaples():
            return api.types.status.SUCCESS
        else:
            client, server = pairs[0]

    tc.client = client
    tc.server = server

    addPktFltrRuleOnEp(tc, True)
    req = api.Trigger_CreateExecuteCommandsRequest(serial=True)
    cmd_cookie = "%s(%s) --> %s(%s)" %\
                (server.workload_name, server.ip_address, client.workload_name, client.ip_address)
    api.Logger.info("Starting UDP Aging test from %s" % (cmd_cookie))

    cmd_cookie = "halctl clear session"
    api.Trigger_AddNaplesCommand(req, server.node_name,
                                 "/nic/bin/halctl clear session")
    tc.cmd_cookies.append(cmd_cookie)

    server_port = api.AllocateUdpPort()
    timeout_str = 'udp-timeout'
    if tc.args.skip_security_prof == False:
        timeout = get_timeout(timeout_str)
    else:
        timeout = DEFAULT_UDP_TIMEOUT

    #Step 0: Update the timeout in the config object
    if tc.args.skip_security_prof == False:
        update_timeout(timeout_str, tc.iterators.timeout)

    if tc.args.skip_security_prof == False:
        timeout = get_timeout(timeout_str)
    else:
        timeout = DEFAULT_UDP_TIMEOUT

    #profilereq = api.Trigger_CreateExecuteCommandsRequest(serial = True)
    #api.Trigger_AddNaplesCommand(profilereq, naples.node_name, "/nic/bin/halctl show nwsec profile --id 11")
    #profcommandresp = api.Trigger(profilereq)
    #cmd = profcommandresp.commands[-1]
    #for command in profcommandresp.commands:
    #    api.PrintCommandResults(command)
    #timeout = get_haltimeout(timeout_str, cmd)
    #tc.config_update_fail = 0
    #if (timeout != timetoseconds(tc.iterators.timeout)):
    #    tc.config_update_fail = 1

    cmd_cookie = "start server"
    api.Trigger_AddCommand(req,
                           server.node_name,
                           server.workload_name,
                           "sudo hping3 -9 %s" % (server_port),
                           background=True)
    tc.cmd_cookies.append(cmd_cookie)

    cmd_cookie = "start client"
    api.Trigger_AddCommand(
        req, client.node_name, client.workload_name,
        "sudo hping3 -2 %s -p %s -c 1" % (server.ip_address, server_port))
    tc.cmd_cookies.append(cmd_cookie)

    cmd_cookie = "Before aging show session"
    api.Trigger_AddNaplesCommand(
        req, naples.node_name,
        "/nic/bin/halctl show session --dstip %s | grep UDP" %
        (server.ip_address))
    tc.cmd_cookies.append(cmd_cookie)

    #Get it from the config
    cmd_cookie = "sleep"
    api.Trigger_AddNaplesCommand(req,
                                 naples.node_name,
                                 "sleep %s" % (timeout + GRACE_TIME),
                                 timeout=300)
    tc.cmd_cookies.append(cmd_cookie)

    cmd_cookie = "After aging show session"
    api.Trigger_AddNaplesCommand(
        req, naples.node_name,
        "/nic/bin/halctl show session --dstip %s | grep UDP" %
        (server.ip_address))
    tc.cmd_cookies.append(cmd_cookie)

    trig_resp = api.Trigger(req)
    term_resp = api.Trigger_TerminateAllCommands(trig_resp)

    tc.resp = api.Trigger_AggregateCommandsResponse(trig_resp, term_resp)
    return api.types.status.SUCCESS
Пример #18
0
 def DeleteTunnels(self, serial=True):
     req = api.Trigger_CreateExecuteCommandsRequest(serial)
     for tunnel in self.__tunnels:
         tunnel.Delete(req)
     return self.__process_req(req)
Пример #19
0
def Trigger(tc):
    serverCmd = None
    clientCmd = None
    IPERF_TIMEOUT = 86400
    try:
        serverReq = api.Trigger_CreateExecuteCommandsRequest()
        clientReq = api.Trigger_CreateExecuteCommandsRequest()

        client = tc.wl_pair[0]
        server = tc.wl_pair[1]

        tc.cmd_descr = "Server: %s(%s) <--> Client: %s(%s)" %\
                       (server.workload_name, server.ip_address,
                        client.workload_name, client.ip_address)
        num_streams = int(getattr(tc.args, "num_streams", 2))
        api.Logger.info("Starting Iperf test from %s num-sessions %d" %
                        (tc.cmd_descr, num_streams))

        if tc.iterators.proto == 'tcp':
            port = api.AllocateTcpPort()
            serverCmd = iperf.ServerCmd(port, jsonOut=True, run_core=3)
            clientCmd = iperf.ClientCmd(server.ip_address,
                                        port,
                                        time=IPERF_TIMEOUT,
                                        jsonOut=True,
                                        num_of_streams=num_streams,
                                        run_core=3)
        else:
            port = api.AllocateUdpPort()
            serverCmd = iperf.ServerCmd(port, jsonOut=True, run_core=3)
            clientCmd = iperf.ClientCmd(server.ip_address,
                                        port,
                                        proto='udp',
                                        jsonOut=True,
                                        num_of_streams=num_streams,
                                        run_core=3)

        tc.serverCmd = serverCmd
        tc.clientCmd = clientCmd

        api.Trigger_AddCommand(serverReq,
                               server.node_name,
                               server.workload_name,
                               serverCmd,
                               background=True,
                               timeout=IPERF_TIMEOUT)

        api.Trigger_AddCommand(clientReq,
                               client.node_name,
                               client.workload_name,
                               clientCmd,
                               background=True,
                               timeout=IPERF_TIMEOUT)

        tc.server_resp = api.Trigger(serverReq)
        time.sleep(5)
        tc.iperf_client_resp = api.Trigger(clientReq)

        _run_vmedia_traffic(tc.node_name)
    except:
        api.Logger.error(traceback.format_exc())
        return api.types.status.FAILURE

    return api.types.status.SUCCESS
Пример #20
0
def Trigger(tc):
    tc.cmd_cookies = []

    pairs = api.GetRemoteWorkloadPairs()
    w1 = pairs[0][0]
    w2 = pairs[0][1]

    if w1.IsNaples() and w2.IsNaples():
        api.Logger.info("naples-naples unsupported currently for tcp-proxy")
        return api.types.status.DISABLED

    store_proxy_objects = netagent_cfg_api.QueryConfigs(kind='TCPProxyPolicy')
    if len(store_proxy_objects) == 0:
        api.Logger.error("No tcp proxy objects in store")
        return api.types.status.FAILURE

    ret = netagent_cfg_api.PushConfigObjects(store_proxy_objects,
                                             ignore_error=True)
    if ret != api.types.status.SUCCESS:
        api.Logger.error("Unable to push tcp_proxy policies")
        return api.types.status.FAILURE

    get_config_objects = netagent_cfg_api.GetConfigObjects(store_proxy_objects)
    if len(get_config_objects) == 0:
        api.Logger.error("Unable to fetch newly pushed objects")
        return api.types.status.FAILURE

    req = api.Trigger_CreateExecuteCommandsRequest(serial=True)

    if w2.IsNaples():
        cmd_cookie = "tcp proxy sessions on %s BEFORE running iperf traffic" % (
            w2.node_name)
        api.Trigger_AddNaplesCommand(req, w2.node_name,
                                     "/nic/bin/halctl show tcp-proxy session")
        tc.cmd_cookies.append(cmd_cookie)

    if w1.IsNaples():
        cmd_cookie = "tcp proxy sessions on %s BEFORE running iperf traffic" % (
            w1.node_name)
        api.Trigger_AddNaplesCommand(req, w1.node_name,
                                     "/nic/bin/halctl show tcp-proxy session")
        tc.cmd_cookies.append(cmd_cookie)

    tc.cmd_descr = "Server: %s(%s) <--> Client: %s(%s) on tcp proxy port %s pktsize %s" %\
                   (w1.workload_name, w1.ip_address, w2.workload_name, w2.ip_address, tc.iterators.port, tc.iterators.pktsize)

    api.Logger.info("Starting Iperf test from %s" % (tc.cmd_descr))

    cmd_cookie = "Running iperf server on %s" % (w1.workload_name)
    api.Trigger_AddCommand(req,
                           w1.node_name,
                           w1.workload_name,
                           "iperf3 -s -p %s" % (tc.iterators.port),
                           background=True)
    tc.cmd_cookies.append(cmd_cookie)

    cmd_cookie = "Running iperf client on %s" % (w2.workload_name)
    api.Trigger_AddCommand(
        req, w2.node_name, w2.workload_name, "iperf3 -c %s -p %s -M %s" %
        (w1.ip_address, tc.iterators.port, tc.iterators.pktsize))
    tc.cmd_cookies.append(cmd_cookie)

    if w1.IsNaples():
        cmd_cookie = "tcp proxy sessions on %s AFTER running iperf traffic" % (
            w1.node_name)
        api.Trigger_AddNaplesCommand(req, w1.node_name,
                                     "/nic/bin/halctl show tcp-proxy session")
        tc.cmd_cookies.append(cmd_cookie)

    if w2.IsNaples():
        cmd_cookie = "tcp proxy sessions on %s AFTER running iperf traffic" % (
            w2.node_name)
        api.Trigger_AddNaplesCommand(req, w2.node_name,
                                     "/nic/bin/halctl show tcp-proxy session")
        tc.cmd_cookies.append(cmd_cookie)

    trig_resp = api.Trigger(req)
    term_resp = api.Trigger_TerminateAllCommands(trig_resp)

    tc.resp = api.Trigger_AggregateCommandsResponse(trig_resp, term_resp)
    return api.types.status.SUCCESS
Пример #21
0
def __execute_athena_client(node, nic, tc, cmd_options):
    req = api.Trigger_CreateExecuteCommandsRequest(serial=True)
    cmd = __CMDPATH + __CMDBASE + __CMDSEP + cmd_options
    api.Trigger_AddNaplesCommand(req, node, cmd, nic, timeout=__CMD_TIMEOUT)
    tc.resp = api.Trigger(req)
Пример #22
0
def Trigger(tc):
    pkgname = os.path.basename(tc.args.package)
    req = api.Trigger_CreateExecuteCommandsRequest()
    for n in tc.nodes:
        api.Trigger_AddHostCommand(req, n, "mkdir -p %s" % api.GetHostToolsDir())
        api.Trigger_AddHostCommand(req, n, "tar xf %s" % pkgname)
        if tc.os == 'linux':
            api.Trigger_AddHostCommand(req, n, "make modules", rundir = pnsodefs.PNSO_DRIVER_DIR, timeout=120)
        else:
            api.Trigger_AddHostCommand(req, n, "./freebsd_build.sh", rundir = pnsodefs.PNSO_DRIVER_DIR, timeout=120)
        api.Trigger_AddHostCommand(req, n, "ls sonic.ko", rundir = pnsodefs.PNSO_DRIVER_DIR)
        api.Trigger_AddHostCommand(req, n, "ls pencake.ko", rundir = pnsodefs.PNSO_DRIVER_DIR)
        api.Trigger_AddHostCommand(req, n, "dmesg -c 2>&1 > /dev/null")
        api.Trigger_AddHostCommand(req, n, "cp sonic.ko %s" % api.GetHostToolsDir(), rundir = pnsodefs.PNSO_DRIVER_DIR)
        api.Trigger_AddHostCommand(req, n, "cp pencake.ko %s" % api.GetHostToolsDir(), rundir = pnsodefs.PNSO_DRIVER_DIR)
        api.Trigger_AddHostCommand(req, n, "lspci -d 1dd8:")
        
        if tc.os == 'linux':
            api.Trigger_AddHostCommand(req, n, "rmmod pencake || true", rundir = pnsodefs.PNSO_DRIVER_DIR,
                                       timeout = int(tc.args.maxcpus) * 100)
            api.Trigger_AddHostCommand(req, n, "rmmod sonic || true", rundir = pnsodefs.PNSO_DRIVER_DIR,
                                       timeout = int(tc.args.maxcpus) * 100)
            api.Trigger_AddHostCommand(req, n, 
                        "insmod sonic.ko core_count=%d" % tc.args.maxcpus,
                        rundir = pnsodefs.PNSO_DRIVER_DIR, timeout = int(tc.args.maxcpus) * 100)
            api.Trigger_AddHostCommand(req, n, "insmod pencake.ko repeat=1",
                                       rundir = pnsodefs.PNSO_DRIVER_DIR,
                                       timeout = int(tc.args.maxcpus) * 100)
            api.Trigger_AddHostCommand(req, n, "sleep 40", timeout=300)
        else:
            api.Trigger_AddHostCommand(req, n, "kldunload pencake || true", rundir = pnsodefs.PNSO_DRIVER_DIR,
                                       timeout = int(tc.args.maxcpus) * 100)
            api.Trigger_AddHostCommand(req, n, "kldunload sonic || true", rundir = pnsodefs.PNSO_DRIVER_DIR,
                                       timeout = int(tc.args.maxcpus) * 100)
            api.Trigger_AddHostCommand(req, n, 
                        "kenv compat.linuxkpi.sonic_core_count=%d" % tc.args.maxcpus,
                        rundir = pnsodefs.PNSO_DRIVER_DIR)
            api.Trigger_AddHostCommand(req, n, "kldload ./sonic.ko", rundir = pnsodefs.PNSO_DRIVER_DIR, 
                                       timeout = int(tc.args.maxcpus) * 100)
            api.Trigger_AddHostCommand(req, n, "kldload ./pencake.ko", rundir = pnsodefs.PNSO_DRIVER_DIR,
                                       timeout = int(tc.args.maxcpus) * 100)
            api.Trigger_AddHostCommand(req, n, "sleep 20", timeout=300)

        cmd = api.Trigger_AddHostCommand(req, n, "dmesg | tail -n 100")
        tc.dmesg_commands.append(cmd)

        if tc.os == 'linux':
            for c in range(1, 5):
                output = api.Trigger_AddHostCommand(req, n, "cat /sys/module/pencake/status/%d" % c)
                tc.output_commands.append(output)
            tc.succ_cmd = api.Trigger_AddHostCommand(req, n, "cat /sys/module/pencake/status/success")
            tc.fail_cmd = api.Trigger_AddHostCommand(req, n, "cat /sys/module/pencake/status/fail")
        else:
            output = api.Trigger_AddHostCommand(req, n, "cat /dev/pencake")
            tc.output_commands.append(output)
            tc.succ_cmd = api.Trigger_AddHostCommand(req, n, "sysctl -n compat.linuxkpi.pencake_success_cnt")
            tc.fail_cmd = api.Trigger_AddHostCommand(req, n, "sysctl -n compat.linuxkpi.pencake_fail_cnt")


    tc.resp = api.Trigger(req)

    return api.types.status.SUCCESS
Пример #23
0
def Trigger(tc):
    if tc.skip == True:
        return api.types.status.FAILURE

    #
    # Set-up Test Environment
    #
    tc.cmd_cookies = []
    tc.cookie_idx = 0
    cmd_cookie = "%s(%s) --> %s(%s)" %\
                 (tc.server.workload_name, tc.server.ip_address,
                  tc.client.workload_name, tc.client.ip_address)
    api.Logger.info("Starting Single-IPv4-TCP-Flow-Drops test from %s" %\
                   (cmd_cookie))

    #
    # Start TCPDUMP in background on Server/Client
    #
    req = api.Trigger_CreateExecuteCommandsRequest(serial=True)
    #cmd_cookie = "start tcpdump on Server"
    #cmd = "sudo tcpdump -nnSXi {} > out.txt".format(tc.server.interface)
    #add_command(tc, req, cmd_cookie, cmd, tc.server, True)

    #cmd_cookie = "start tcpdump on Client"
    #cmd = "sudo tcpdump -nnSXi {} > out.txt".format(tc.client.interface)
    #add_command(tc, req, cmd_cookie, cmd, tc.client, True)

    #
    # Start with a clean slate by clearing all sessions/flows
    # 45 secs sleep is to ensure that residual FlowDrops induced by previous
    # non-flowstats testbundle (in this case Connectivity) is flushed out
    #
    cmd_cookie = "clear session"
    cmd = "/nic/bin/halctl clear session; sleep 45"
    add_naples_command(tc, req, cmd_cookie, cmd, tc.naples)

    #
    # Make sure that Client<=>Server Forwarding is set up
    #
    cmd_cookie = "trigger ping: Create case"
    cmd = "ping -c1 %s -I %s" %\
          (tc.server.ip_address, tc.client.interface)
    add_command(tc, req, cmd_cookie, cmd, tc.client, False)

    tc.resp = api.Trigger(req)
    for command in tc.resp.commands:
        api.PrintCommandResults(command)

    #
    # Allocate TCP-portnum for Server and start the service on the Server
    #
    req_nc1 = api.Trigger_CreateExecuteCommandsRequest(serial=True)
    tc.server_port = api.AllocateTcpPort()
    cmd_cookie = "start server"
    cmd = "nc --listen %s" % (tc.server_port)
    add_command(tc, req_nc1, cmd_cookie, cmd, tc.server, True)

    #
    # Allocate TCP-portnum for Client and establish TCP-connection
    #
    tc.client_port = api.AllocateTcpPort()
    cmd_cookie = "start client"
    cmd = "nc {} {} --source-port {} "\
    .format(tc.server.ip_address, tc.server_port, tc.client_port)
    add_command(tc, req_nc1, cmd_cookie, cmd, tc.client, True)

    resp_nc1 = api.Trigger(req_nc1)
    for command in resp_nc1.commands:
        api.PrintCommandResults(command)

    #
    # Do applicable "show session" commands and
    # retrieve Seq-num and Ack-num associated with the session
    #
    req1 = api.Trigger_CreateExecuteCommandsRequest(serial=True)
    cmd_cookie = "show session"
    cmd = "/nic/bin/halctl show session"
    add_naples_command(tc, req1, cmd_cookie, cmd, tc.naples)

    cmd_cookie = "show session detail"
    cmd = "/nic/bin/halctl show session --dstport {} --dstip {} --yaml"\
    .format(tc.server_port, tc.server.ip_address)
    add_naples_command(tc, req1, cmd_cookie, cmd, tc.naples)

    tc.resp1 = api.Trigger(req1)
    cmd = tc.resp1.commands[-1]
    for command in tc.resp1.commands:
        api.PrintCommandResults(command)
    tc.pre_ctrckinf = get_conntrackinfo(cmd)

    #
    # Send Bad Data with TTL=0 from both Client and Server
    #
    req2 = api.Trigger_CreateExecuteCommandsRequest(serial=True)
    cmd_cookie = "send bad data from Client TTL=0: Create case"
    cmd = "hping3 --count 1 --baseport {} --destport {} --setseq {} --setack {}\
           --ttl 0 --data 10 {}".format(tc.client_port, tc.server_port,
                                        tc.pre_ctrckinf.r_tcpacknum,
                                        tc.pre_ctrckinf.r_tcpseqnum,
                                        tc.server.ip_address)
    add_command(tc, req2, cmd_cookie, cmd, tc.client, False)

    cmd_cookie = "send bad data from Server TTL=0: Create case"
    cmd = "hping3 --count 1 --baseport {} --destport {} --setseq {} --setack {}\
           --ttl 0 --data 10 {}".format(tc.server_port, tc.client_port,
                                        tc.pre_ctrckinf.i_tcpacknum,
                                        tc.pre_ctrckinf.i_tcpseqnum,
                                        tc.client.ip_address)
    add_command(tc, req2, cmd_cookie, cmd, tc.server, False)

    #
    # Send Bad Data with TCP-RSVD-FLAGS-BIT-0 set from both Client and Server
    #
    cmd_cookie = "send bad data from Client TCP-RSVD-FLAGS-BIT-0: Create case"
    cmd = "hping3 --count 1 --baseport {} --destport {} --setseq {} --setack {}\
           --xmas --data 10 {}".format(tc.client_port, tc.server_port,
                                       tc.pre_ctrckinf.r_tcpacknum + 10,
                                       tc.pre_ctrckinf.r_tcpseqnum,
                                       tc.server.ip_address)
    add_command(tc, req2, cmd_cookie, cmd, tc.client, False)

    cmd_cookie = "send bad data from Server TCP-RSVD-FLAGS-BIT-0: Create case"
    cmd = "hping3 --count 1 --baseport {} --destport {} --setseq {} --setack {}\
           --xmas --data 10 {}".format(tc.server_port, tc.client_port,
                                       tc.pre_ctrckinf.i_tcpacknum + 10,
                                       tc.pre_ctrckinf.i_tcpseqnum,
                                       tc.client.ip_address)
    add_command(tc, req2, cmd_cookie, cmd, tc.server, False)

    #
    # Send Bad Data with TCP-RSVD-FLAGS-BIT-1 set from both Client and Server
    #
    cmd_cookie = "send bad data from Client TCP-RSVD-FLAGS-BIT-1: Create case"
    cmd = "hping3 --count 1 --baseport {} --destport {} --setseq {} --setack {}\
           --ymas --data 10 {}".format(tc.client_port, tc.server_port,
                                       tc.pre_ctrckinf.r_tcpacknum + 20,
                                       tc.pre_ctrckinf.r_tcpseqnum,
                                       tc.server.ip_address)
    add_command(tc, req2, cmd_cookie, cmd, tc.client, False)

    cmd_cookie = "send bad data from Server TCP-RSVD-FLAGS-BIT-1: Create case"
    cmd = "hping3 --count 1 --baseport {} --destport {} --setseq {} --setack {}\
           --ymas --data 10 {}".format(tc.server_port, tc.client_port,
                                       tc.pre_ctrckinf.i_tcpacknum + 20,
                                       tc.pre_ctrckinf.i_tcpseqnum,
                                       tc.client.ip_address)
    add_command(tc, req2, cmd_cookie, cmd, tc.server, False)

    tc.resp2 = api.Trigger(req2)
    for command in tc.resp2.commands:
        api.PrintCommandResults(command)

    #
    # Do "show session" command
    #
    req3 = api.Trigger_CreateExecuteCommandsRequest(serial=True)
    tc.cookie_idx_3 = tc.cookie_idx
    cmd_cookie = "show session"
    cmd = "/nic/bin/halctl show session"
    add_naples_command(tc, req3, cmd_cookie, cmd, tc.naples)

    #
    # Trigger "metrics get IPv4FlowDropMetrics" output
    #
    cmd_cookie = "show flow-drop: Create case"
    cmd = "PATH=$PATH:/platform/bin/;\
           LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/platform/lib/:/nic/lib/;\
           export PATH; export LD_LIBRARY_PATH;\
           /nic/bin/delphictl metrics get IPv4FlowDropMetrics"

    add_naples_command(tc, req3, cmd_cookie, cmd, tc.naples)

    tc.resp3 = api.Trigger(req3)
    for command in tc.resp3.commands:
        api.PrintCommandResults(command)

    #
    # Clear all sessions/flows
    #
    api.Trigger_TerminateAllCommands(resp_nc1)
    req_nc2 = api.Trigger_CreateExecuteCommandsRequest(serial=True)
    cmd_cookie = "clear session"
    cmd = "/nic/bin/halctl clear session"
    add_naples_command(tc, req_nc2, cmd_cookie, cmd, tc.naples)

    #
    # Make sure that Client<=>Server Forwarding is set up
    #
    cmd_cookie = "trigger ping: Re-use case"
    cmd = "ping -c1 %s -I %s" %\
          (tc.server.ip_address, tc.client.interface)
    add_command(tc, req_nc2, cmd_cookie, cmd, tc.client, False)

    #
    # Re-establish TCP-connection
    #
    cmd_cookie = "show session"
    cmd = "/nic/bin/halctl show session"
    add_naples_command(tc, req_nc2, cmd_cookie, cmd, tc.naples)

    cmd_cookie = "restart server"
    cmd = "nc --listen %s" % (tc.server_port)
    add_command(tc, req_nc2, cmd_cookie, cmd, tc.server, True)

    cmd_cookie = "restart client"
    cmd = "nc {} {} --source-port {} "\
    .format(tc.server.ip_address, tc.server_port, tc.client_port)
    add_command(tc, req_nc2, cmd_cookie, cmd, tc.client, True)

    resp_nc2 = api.Trigger(req_nc2)
    for command in resp_nc2.commands:
        api.PrintCommandResults(command)

    #
    # Do applicable "show session" commands and
    # retrieve Seq-num and Ack-num associated with the session
    #
    req4 = api.Trigger_CreateExecuteCommandsRequest(serial=True)
    cmd_cookie = "show session"
    cmd = "/nic/bin/halctl show session"
    add_naples_command(tc, req4, cmd_cookie, cmd, tc.naples)

    cmd_cookie = "show session detail"
    cmd = "/nic/bin/halctl show session --dstport {} --dstip {} --yaml"\
    .format(tc.server_port, tc.server.ip_address)
    add_naples_command(tc, req4, cmd_cookie, cmd, tc.naples)

    tc.resp4 = api.Trigger(req4)
    cmd = tc.resp4.commands[-1]
    for command in tc.resp4.commands:
        api.PrintCommandResults(command)
    tc.post_ctrckinf = get_conntrackinfo(cmd)

    #
    # Re-send Bad Data with TTL=0 from both Client and Server
    #
    req5 = api.Trigger_CreateExecuteCommandsRequest(serial=True)
    cmd_cookie = "send bad data from Client TTL=0: Re-use case"
    cmd = "hping3 --count 1 --baseport {} --destport {} --setseq {} --setack {}\
           --ttl 0 --data 10 {}".format(tc.client_port, tc.server_port,
                                        tc.post_ctrckinf.r_tcpacknum,
                                        tc.post_ctrckinf.r_tcpseqnum,
                                        tc.server.ip_address)
    add_command(tc, req5, cmd_cookie, cmd, tc.client, False)

    cmd_cookie = "send bad data from Server TTL=0: Re-use case"
    cmd = "hping3 --count 1 --baseport {} --destport {} --setseq {} --setack {}\
           --ttl 0 --data 10 {}".format(tc.server_port, tc.client_port,
                                        tc.post_ctrckinf.i_tcpacknum,
                                        tc.post_ctrckinf.i_tcpseqnum,
                                        tc.client.ip_address)
    add_command(tc, req5, cmd_cookie, cmd, tc.server, False)

    #
    # Re-send Bad Data with TCP-RSVD-FLAGS-BIT-0 set from both Client and Server
    #
    cmd_cookie = "send bad data from Client TCP-RSVD-FLAGS-BIT-0: Re-use case"
    cmd = "hping3 --count 1 --baseport {} --destport {} --setseq {} --setack {}\
           --xmas --data 10 {}".format(tc.client_port, tc.server_port,
                                       tc.post_ctrckinf.r_tcpacknum + 10,
                                       tc.post_ctrckinf.r_tcpseqnum,
                                       tc.server.ip_address)
    add_command(tc, req5, cmd_cookie, cmd, tc.client, False)

    cmd_cookie = "send bad data from Server TCP-RSVD-FLAGS-BIT-0: Re-use case"
    cmd = "hping3 --count 1 --baseport {} --destport {} --setseq {} --setack {}\
           --xmas --data 10 {}".format(tc.server_port, tc.client_port,
                                       tc.post_ctrckinf.i_tcpacknum + 10,
                                       tc.post_ctrckinf.i_tcpseqnum,
                                       tc.client.ip_address)
    add_command(tc, req5, cmd_cookie, cmd, tc.server, False)

    #
    # Re-send Bad Data with TCP-RSVD-FLAGS-BIT-1 set from both Client and Server
    #
    cmd_cookie = "send bad data from Client TCP-RSVD-FLAGS-BIT-1: Re-use case"
    cmd = "hping3 --count 1 --baseport {} --destport {} --setseq {} --setack {}\
           --ymas --data 10 {}".format(tc.client_port, tc.server_port,
                                       tc.post_ctrckinf.r_tcpacknum + 20,
                                       tc.post_ctrckinf.r_tcpseqnum,
                                       tc.server.ip_address)
    add_command(tc, req5, cmd_cookie, cmd, tc.client, False)

    cmd_cookie = "send bad data from Server TCP-RSVD-FLAGS-BIT-1: Re-use case"
    cmd = "hping3 --count 1 --baseport {} --destport {} --setseq {} --setack {}\
           --ymas --data 10 {}".format(tc.server_port, tc.client_port,
                                       tc.post_ctrckinf.i_tcpacknum + 20,
                                       tc.post_ctrckinf.i_tcpseqnum,
                                       tc.client.ip_address)
    add_command(tc, req5, cmd_cookie, cmd, tc.server, False)

    tc.resp5 = api.Trigger(req5)
    for command in tc.resp5.commands:
        api.PrintCommandResults(command)

    #
    # Do "show session" command
    #
    req6 = api.Trigger_CreateExecuteCommandsRequest(serial=True)
    tc.cookie_idx_6 = tc.cookie_idx
    cmd_cookie = "show session"
    cmd = "/nic/bin/halctl show session"
    add_naples_command(tc, req6, cmd_cookie, cmd, tc.naples)

    #
    # Trigger "metrics get IPv4FlowDropMetrics" output
    #
    cmd_cookie = "show flow-drop: Re-use case"
    cmd = "PATH=$PATH:/platform/bin/;\
           LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/platform/lib/:/nic/lib/;\
           export PATH; export LD_LIBRARY_PATH;\
           /nic/bin/delphictl metrics get IPv4FlowDropMetrics"

    add_naples_command(tc, req6, cmd_cookie, cmd, tc.naples)

    tc.resp6 = api.Trigger(req6)
    for command in tc.resp6.commands:
        api.PrintCommandResults(command)

    #
    # Do "show session" command after doing Sleep for 45secs
    #
    api.Trigger_TerminateAllCommands(resp_nc2)
    req7 = api.Trigger_CreateExecuteCommandsRequest(serial=True)
    tc.cookie_idx_7 = tc.cookie_idx
    cmd_cookie = "clear session"
    cmd = "/nic/bin/halctl clear session; sleep 45"
    add_naples_command(tc, req7, cmd_cookie, cmd, tc.naples)

    cmd_cookie = "show session"
    cmd = "/nic/bin/halctl show session"
    add_naples_command(tc, req7, cmd_cookie, cmd, tc.naples)

    #
    # Trigger "metrics get IPv4FlowDropMetrics" output
    #
    cmd_cookie = "show flow-drop: Delete case"
    cmd = "PATH=$PATH:/platform/bin/;\
           LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/platform/lib/:/nic/lib/;\
           export PATH; export LD_LIBRARY_PATH;\
           /nic/bin/delphictl metrics get IPv4FlowDropMetrics"

    add_naples_command(tc, req7, cmd_cookie, cmd, tc.naples)

    tc.resp7 = api.Trigger(req7)
    for command in tc.resp7.commands:
        api.PrintCommandResults(command)

    return api.types.status.SUCCESS
Пример #24
0
def Trigger(tc):
    pairs = api.GetLocalWorkloadPairs()
    server = pairs[0][0]
    client = pairs[0][1]
    tc.cmd_cookies = []

    naples = server
    if not server.IsNaples():
        naples = client
        if not client.IsNaples():
            return api.types.status.SUCCESS

    req = api.Trigger_CreateExecuteCommandsRequest(serial=True)
    tc.cmd_descr = "Server: %s(%s) <--> Client: %s(%s)" %\
                   (server.workload_name, server.ip_address, client.workload_name, client.ip_address)
    api.Logger.info("Starting MSRPC test from %s" % (tc.cmd_descr))

    api.Trigger_AddCommand(req,
                           server.node_name,
                           server.workload_name,
                           "tcpdump -nni %s > out.txt" % (server.interface),
                           background=True)
    tc.cmd_cookies.append("tcpdump")

    api.Trigger_AddCommand(req,
                           client.node_name,
                           client.workload_name,
                           "tcpdump -nni %s > out.txt" % (client.interface),
                           background=True)
    tc.cmd_cookies.append("tcpdump")

    api.Trigger_AddCommand(req,
                           server.node_name,
                           server.workload_name,
                           "sudo nc -l %s" % (MSRPC_PORT),
                           background=True)
    tc.cmd_cookies.append("netcat start-server")

    client_port = api.AllocateTcpPort()
    api.Trigger_AddCommand(req,
                           client.node_name,
                           client.workload_name,
                           "sudo nc %s %s -p %s" %
                           (server.ip_address, MSRPC_PORT, client_port),
                           background=True)
    tc.cmd_cookies.append("netcat start-client")

    msrpcscript = dir_path + '/' + "msrpcscapy.py"
    resp = api.CopyToWorkload(server.node_name, server.workload_name,
                              [msrpcscript], 'msrpcdir')
    if resp is None:
        return api.types.status.SUCCESS

    resp = api.CopyToWorkload(client.node_name, client.workload_name,
                              [msrpcscript], 'msrpcdir')
    if resp is None:
        return api.types.status.SUCCESS

    api.Trigger_AddNaplesCommand(
        req, naples.node_name,
        "/nic/bin/halctl show session --dstport 135 --yaml")
    tc.cmd_cookies.append("show session yaml")
    sesssetup = api.Trigger(req)
    cmd = sesssetup.commands[-1]
    api.PrintCommandResults(cmd)
    tc.pre_ctrckinf = get_conntrackinfo(cmd)

    req = api.Trigger_CreateExecuteCommandsRequest(serial=True)

    msrpcpcap = dir_path + '/' + "msrpc_first_bind.pcap"
    pkts = rdpcap(msrpcpcap)
    clientidx = 0
    serveridx = 0
    client_ack = tc.pre_ctrckinf.i_tcpacknum
    server_ack = tc.pre_ctrckinf.r_tcpacknum
    filename = None
    for pkt in pkts:
        node = client.node_name
        workload = client.workload_name
        if pkt[IP].src == "172.31.9.1":
            filename = ("msrpcscapy" + "%s" + ".pcap") % (clientidx)
            msrpcscapy = dir_path + '/' + filename
            pkt[Ether].src = client.mac_address
            pkt[IP].src = client.ip_address
            pkt[Ether].dst = server.mac_address
            pkt[IP].dst = server.ip_address
            pkt[TCP].sport = client_port
            pkt[TCP].dport = 135
            if clientidx == 0:
                client_start_seq = pkt[TCP].seq
                client_start_ack = pkt[TCP].ack
            pkt[TCP].seq = tc.pre_ctrckinf.i_tcpseqnum + (pkt[TCP].seq -
                                                          client_start_seq)
            pkt[TCP].ack = client_ack
            server_ack = pkt[TCP].seq + len(pkt[TCP].payload) + 1
            clientidx += 1
        else:
            filename = ("msrpcscapy" + "%s" + ".pcap") % (serveridx)
            msrpcscapy = dir_path + '/' + filename
            pkt[Ether].src = server.mac_address
            pkt[IP].src = server.ip_address
            pkt[Ether].dst = client.mac_address
            pkt[IP].dst = client.ip_address
            node = server.node_name
            workload = server.workload_name
            pkt[TCP].dport = client_port
            pkt[TCP].sport = 135
            if serveridx == 0:
                server_start_seq = pkt[TCP].seq
                server_start_ack = pkt[TCP].ack
            pkt[TCP].seq = tc.pre_ctrckinf.r_tcpseqnum + (pkt[TCP].seq -
                                                          server_start_seq)
            pkt[TCP].ack = server_ack
            client_ack = pkt[TCP].seq + len(pkt[TCP].payload) + 1
            serveridx += 1
        del pkt[IP].chksum
        del pkt[TCP].chksum
        a = pkt.show(dump=True)
        print(a)
        wrpcap(msrpcscapy, pkt)
        resp = api.CopyToWorkload(node, workload, [msrpcscapy], 'msrpcdir')
        if resp is None:
            continue
        api.Trigger_AddCommand(
            req, node, workload,
            "sh -c 'cd msrpcdir && chmod +x msrpcscapy.py && ./msrpcscapy.py %s'"
            % (filename))
        tc.cmd_cookies.append("running #%s on node %s workload %s" %
                              (filename, node, workload))
        os.remove(msrpcscapy)

    api.Trigger_AddNaplesCommand(
        req, naples.node_name,
        "/nic/bin/halctl show session --dstport 135 --yaml")
    tc.cmd_cookies.append("show session yaml")
    firstbind = api.Trigger(req)
    cmd = firstbind.commands[-1]
    api.PrintCommandResults(cmd)
    tc.post_ctrckinf = get_conntrackinfo(cmd)

    req = api.Trigger_CreateExecuteCommandsRequest(serial=True)

    msrpcpcap = dir_path + '/' + "msrpc_second_bind.pcap"
    pkts = rdpcap(msrpcpcap)
    client_ack = tc.post_ctrckinf.i_tcpacknum
    server_ack = tc.post_ctrckinf.r_tcpacknum
    clientidx = 0
    serveridx = 0
    filename = None
    for pkt in pkts:
        node = client.node_name
        workload = client.workload_name
        if pkt[IP].src == "172.31.9.1":
            filename = ("msrpcscapy" + "%s" + ".pcap") % (clientidx)
            msrpcscapy = dir_path + '/' + filename
            pkt[Ether].src = client.mac_address
            pkt[IP].src = client.ip_address
            pkt[Ether].dst = server.mac_address
            pkt[IP].dst = server.ip_address
            pkt[TCP].sport = client_port
            pkt[TCP].dport = 135
            if clientidx == 0:
                client_start_seq = pkt[TCP].seq
                client_start_ack = pkt[TCP].ack
            pkt[TCP].seq = tc.post_ctrckinf.i_tcpseqnum + (pkt[TCP].seq -
                                                           client_start_seq)
            pkt[TCP].ack = client_ack
            server_ack = pkt[TCP].seq + len(pkt[TCP].payload) + 1
            clientidx += 1
        else:
            filename = ("msrpcscapy" + "%s" + ".pcap") % (serveridx)
            msrpcscapy = dir_path + '/' + filename
            pkt[Ether].src = server.mac_address
            pkt[IP].src = server.ip_address
            pkt[Ether].dst = client.mac_address
            pkt[IP].dst = client.ip_address
            node = server.node_name
            workload = server.workload_name
            pkt[TCP].dport = client_port
            pkt[TCP].sport = 135
            if serveridx == 0:
                server_start_seq = pkt[TCP].seq
                server_start_ack = pkt[TCP].ack
            pkt[TCP].seq = tc.post_ctrckinf.r_tcpseqnum + (pkt[TCP].seq -
                                                           server_start_seq)
            pkt[TCP].ack = server_ack
            client_ack = pkt[TCP].seq + len(pkt[TCP].payload) + 1
            serveridx += 1
        del pkt[IP].chksum
        del pkt[TCP].chksum
        a = pkt.show(dump=True)
        print(a)
        wrpcap(msrpcscapy, pkt)
        resp = api.CopyToWorkload(node, workload, [msrpcscapy], 'msrpcdir')
        if resp is None:
            continue
        api.Trigger_AddCommand(
            req, node, workload,
            "sh -c 'cd msrpcdir && chmod +x msrpcscapy.py && ./msrpcscapy.py %s'"
            % (filename))
        tc.cmd_cookies.append("running #%s on node %s workload %s" %
                              (filename, node, workload))
        os.remove(msrpcscapy)

    api.Trigger_AddCommand(req,
                           server.node_name,
                           server.workload_name,
                           "sudo nc -l 49134",
                           background=True)
    tc.cmd_cookies.append("msrpc start-server")

    api.Trigger_AddCommand(req,
                           client.node_name,
                           client.workload_name,
                           "sudo nc %s 49134 -p 59374" % (server.ip_address),
                           background=True)
    tc.cmd_cookies.append("msrpc start-client")

    # Add Naples command validation
    #api.Trigger_AddNaplesCommand(req, naples.node_name,
    #                       "/nic/bin/halctl show security flow-gate | grep MSRPC")
    #tc.cmd_cookies.append("show security flow-gate")

    api.Trigger_AddNaplesCommand(req, naples.node_name,
                                 "/nic/bin/halctl show session")
    tc.cmd_cookies.append("show session")

    trig_resp = api.Trigger(req)
    term_resp = api.Trigger_TerminateAllCommands(trig_resp)
    tc.resp2 = api.Trigger_AggregateCommandsResponse(trig_resp, term_resp)

    term_resp = api.Trigger_TerminateAllCommands(firstbind)
    tc.resp1 = api.Trigger_AggregateCommandsResponse(firstbind, term_resp)

    term_resp = api.Trigger_TerminateAllCommands(sesssetup)
    tc.resp = api.Trigger_AggregateCommandsResponse(sesssetup, term_resp)

    #GetTcpdumpData(client)
    #GetTcpdumpData(server)

    return api.types.status.SUCCESS
Пример #25
0
def runIperfTest(tc, srv, cli):
    if api.IsDryrun():
        return api.types.status.SUCCESS

    req1 = api.Trigger_CreateExecuteCommandsRequest(serial=True)
    req2 = api.Trigger_CreateExecuteCommandsRequest(serial=False)

    proto = getattr(tc.iterators, "proto", 'tcp')

    number_of_iperf_threads = getattr(tc.args, "iperfthreads", 1)

    pktsize = getattr(tc.iterators, "pktsize", 512)
    ipproto = getattr(tc.iterators, "ipproto", 'v4')

    if ipproto == 'v6':
        srv_ip_address = srv.ipv6_address
        tc.cmd_descr = " Server: %s(%s) <--> Client: %s(%s)" %\
            (srv.interface, srv.ipv6_address, cli.interface, cli.ipv6_address)
    else:
        srv_ip_address = srv.ip_address
        tc.cmd_descr = " Server: %s(%s) <--> Client: %s(%s)" %\
            (srv.interface, srv.ip_address, cli.interface, cli.ip_address)

    api.Logger.info("Starting TSO test %s" % (tc.cmd_descr))

    tc.srv_bad_csum = ionic_stats.getNetstatBadCsum(srv, proto)
    tc.cli_bad_csum = ionic_stats.getNetstatBadCsum(cli, proto)

    for i in range(number_of_iperf_threads):
        if proto == 'tcp':
            port = api.AllocateTcpPort()
        else:
            port = api.AllocateUdpPort()

        file_name_suffix = "_instance" + \
            str(i) + proto + "_" + ipproto + "_" + str(pktsize)

        file_name = '/tmp/' + 'srv_' + srv.interface + file_name_suffix
        iperf_server_cmd = cmd_builder.iperf_server_cmd(port=port)
        api.Trigger_AddCommand(req1,
                               srv.node_name,
                               srv.workload_name,
                               iperf_server_cmd,
                               background=True)

        file_name = '/tmp/' + 'cli_' + cli.interface + file_name_suffix
        iperf_file_name = file_name + ".log"
        iperf_client_cmd = cmd_builder.iperf_client_cmd(
            server_ip=srv_ip_address,
            port=port,
            proto=proto,
            pktsize=pktsize,
            ipproto=ipproto)

        # Once iperf JSON support  is available, we don't need this hacks.
        api.Trigger_AddCommand(
            req2, cli.node_name, cli.workload_name,
            iperf_client_cmd + " -J | tee " + iperf_file_name)
        # Read the retransmission counter from the log
        api.Trigger_AddCommand(
            req2, cli.node_name, cli.workload_name, 'grep retrans ' +
            iperf_file_name + '| tail -1| cut -d ":" -f 2 | cut -d "," -f 1')
        # Read the bandwidth numbers.
        api.Trigger_AddCommand(
            req2, cli.node_name, cli.workload_name, 'cat ' + iperf_file_name +
            ' | grep bits_per_second | tail -1 |  cut -d ":" -f 2 | cut -d "," -f 1'
        )
        # Read the bytes transferred numbers.
        api.Trigger_AddCommand(
            req2, cli.node_name, cli.workload_name, 'cat ' + iperf_file_name +
            ' | grep bytes | tail -1 |  cut -d ":" -f 2 | cut -d "," -f 1')

    trig_resp1 = api.Trigger(req1)
    if trig_resp1 is None:
        api.Logger.error("Failed to run iperf server")
        return api.types.status.FAILURE

    tc.resp = api.Trigger(req2)
    if tc.resp is None:
        api.Logger.error("Failed to run iperf client")
        return api.types.status.FAILURE

    for cmd in tc.resp.commands:
        if cmd.exit_code != 0:
            api.Logger.error("Failed to start client iperf\n")
            api.PrintCommandResults(cmd)
            return api.types.status.FAILURE

    status, retran, bw = verifySingle(tc)
    vlan = getattr(tc.iterators, 'vlantag', 'off')
    vxlan = getattr(tc.iterators, 'vxlan', 'off')
    tso = getattr(tc.iterators, 'tso_offload', 'off')

    api.Logger.info(
        "Result TSO: %s VLAN: %s VXLAN: %s Proto: %s/%s Pkt size: %d Threads: %d"
        " Bandwidth: %d Mbps" % (tso, vlan, vxlan, proto, ipproto, pktsize,
                                 number_of_iperf_threads, bw))
    return status
Пример #26
0
def Trigger(tc):

    #==============================================================
    # trigger the commands
    #==============================================================
    krping_req = api.Trigger_CreateExecuteCommandsRequest(serial=True)
    dmesg_req = api.Trigger_CreateExecuteCommandsRequest(serial=True)
    ping_req = api.Trigger_CreateExecuteCommandsRequest(serial=True)

    # load krping on both nodes
    tc.nodes = api.GetNaplesHostnames()
    tc.os = api.GetNodeOs(tc.nodes[0])
    for n in tc.nodes:
        if tc.os == host.OS_TYPE_LINUX:
            api.Trigger_AddHostCommand(
                krping_req, n, "(lsmod | grep -w rdma_krping >/dev/null) || " +
                "insmod {path}/krping/rdma_krping.ko".format(
                    path=tc.iota_path))
        else:
            api.Trigger_AddHostCommand(
                krping_req, n, "(kldstat | grep -w krping >/dev/null) || " +
                "kldload {path}/krping/krping.ko".format(path=tc.iota_path))

    w1 = tc.w[0]
    w2 = tc.w[1]

    options = "port=9999,verbose,validate,size=65536,"

    # cmd for server
    if api.GetNodeOs(w1.node_name) == host.OS_TYPE_LINUX:
        krpfile = "/proc/krping"
    else:
        krpfile = "/dev/krping"

    tc.cmd_descr = "Server: %s(%s) <--> Client: %s(%s)" % \
                   (w1.workload_name, w1.ip_address,      \
                    w2.workload_name, w2.ip_address)

    api.Logger.info("Starting krping_rdma test from %s" % (tc.cmd_descr))

    # If the client fails to connect or otherwise misbehaves, IOTA
    # can get stuck with a server waiting forever. This causes
    # module unload to fail and cascading failures after that.
    # Tell the server to only wait this long before giving up.
    server_options = options + "wait=30,"

    cmd = "sudo echo -n 'server,addr={addr},{opstr}' > {kfile}".format(
        addr=w1.ip_address, opstr=server_options, kfile=krpfile)
    api.Trigger_AddCommand(krping_req,
                           w1.node_name,
                           w1.workload_name,
                           cmd,
                           background=True)

    # It takes a few seconds before the server starts listening.
    cmd = 'sleep 3'
    api.Trigger_AddCommand(krping_req, w1.node_name, w1.workload_name, cmd)

    cmd = "sudo echo -n 'client,addr={addr},{opstr}' > {kfile}".format(
        addr=w1.ip_address, opstr=options, kfile=krpfile)
    api.Trigger_AddCommand(krping_req,
                           w2.node_name,
                           w2.workload_name,
                           cmd,
                           background=True)

    krping_trig_resp = api.Trigger(krping_req)

    #Run RDMA while krping is running
    rdma_perf.Trigger(tc)

    krping_term_resp = api.Trigger_TerminateAllCommands(krping_trig_resp)
    tc.krping_resp = api.Trigger_AggregateCommandsResponse(
        krping_trig_resp, krping_term_resp)

    # dmesg commands
    api.Trigger_AddCommand(dmesg_req, w1.node_name, w1.workload_name,
                           "dmesg | tail -20 | grep rdma-ping-")
    api.Trigger_AddCommand(dmesg_req, w2.node_name, w2.workload_name,
                           "dmesg | tail -20 | grep rdma-ping-")

    dmesg_trig_resp = api.Trigger(dmesg_req)
    dmesg_term_resp = api.Trigger_TerminateAllCommands(dmesg_trig_resp)
    tc.dmesg_resp = api.Trigger_AggregateCommandsResponse(
        dmesg_trig_resp, dmesg_term_resp)

    # ping to check if the pipeline is stuck
    api.Trigger_AddCommand(ping_req, w1.node_name, w1.workload_name,
                           "ping -c 10 -t 10 " + w2.ip_address)
    api.Trigger_AddCommand(ping_req, w2.node_name, w2.workload_name,
                           "ping -c 10 -t 10 " + w1.ip_address)

    ping_trig_resp = api.Trigger(ping_req)
    ping_term_resp = api.Trigger_TerminateAllCommands(ping_trig_resp)
    tc.ping_resp = api.Trigger_AggregateCommandsResponse(
        ping_trig_resp, ping_term_resp)
    return api.types.status.SUCCESS
Пример #27
0
def Verify(tc):
    if tc.skip:
        return api.types.status.SUCCESS

    if tc.pktloss_verify:
        if tc.background and tc.bg_cmd_resp is None:
            api.Logger.error("Failed in background Ping cmd trigger")
            return api.types.status.FAILURE

    if tc.resp is None:
        api.Logger.error("Received empty response for config request")
        return api.types.status.FAILURE
    else:
        for cmd in tc.resp.commands:
            api.PrintCommandResults(cmd)
            if cmd.exit_code != 0:
                api.Logger.error("Rollout request failed")
                return api.types.status.FAILURE

    # wait for upgrade to complete. status can be found from the presence of /update/pds_upg_status.txt
    api.Logger.info("Sleep for 70 secs before checking for /update/pds_upg_status.txt")
    misc_utils.Sleep(70)
    status_in_progress = True
    while status_in_progress:
        misc_utils.Sleep(1)
        req = api.Trigger_CreateExecuteCommandsRequest(serial=False)
        for node in tc.Nodes:
            api.Trigger_AddNaplesCommand(req, node, "grep -v in-progress /update/pds_upg_status.txt", timeout=2)
        api.Logger.info("Checking for status not in-progress in file /update/pds_upg_status.txt")
        resp = api.Trigger(req)

        status_in_progress = False
        for cmd_resp in resp.commands:
            #api.PrintCommandResults(cmd_resp)
            if cmd_resp.exit_code != 0:
                status_in_progress = True
                #api.Logger.info("File /update/pds_upg_status.txt not found")
            else:
                api.Logger.info("Status other than in-progress found in /update/pds_upg_status.txt")

#    # push interface config updates after upgrade completes
#    UpdateConfigAfterUpgrade(tc)

    for i in range(10):
        api.Logger.info("Sending ARPing, retry count %s"%i)
        # Send Grat Arp for learning
        arping.SendGratArp(tc.wloads)
        misc_utils.Sleep(1)

    result = CheckRolloutStatus(tc)

    # ensure connectivity after upgrade
    if VerifyConnectivity(tc) != api.types.status.SUCCESS:
        api.Logger.error("Failed in Connectivity Check Post Upgrade.")
        result = api.types.status.FAILURE

    error_str = None
    if tc.pktloss_verify:
        # If rollout status is failure, then no need to wait for traffic test
        if result == api.types.status.SUCCESS:
            api.Logger.info("Sleep for %s secs for traffic test to complete"%tc.sleep)
            misc_utils.Sleep(tc.sleep)

        pkt_loss_duration = 0
        # terminate background traffic and calculate packet loss duration
        if tc.background:
            if ping.TestTerminateBackgroundPing(tc, tc.pktsize,\
                  pktlossverif=tc.pktlossverif) != api.types.status.SUCCESS:
                api.Logger.error("Failed in Ping background command termination.")
                result = api.types.status.FAILURE
            # calculate max packet loss duration for background ping
            pkt_loss_duration = ping.GetMaxPktLossDuration(tc, interval=tc.interval)
            if pkt_loss_duration != 0:
                indent = "-" * 10
                if tc.pktlossverif:
                    result = api.types.status.FAILURE
                api.Logger.error(f"{indent} Packet Loss duration during UPGRADE of {tc.Nodes} is {pkt_loss_duration} secs {indent}")
                if tc.allowed_down_time and (pkt_loss_duration > tc.allowed_down_time):
                    api.Logger.error(f"{indent} Exceeded allowed Loss Duration {tc.allowed_down_time} secs {indent}")
                    result = api.types.status.FAILURE
            else:
                api.Logger.info("No Packet Loss Found during UPGRADE Test")

    if upgrade_utils.VerifyUpgLog(tc.Nodes, tc.GetLogsDir()):
        api.Logger.error("Failed to verify the upgrade logs")
        result = api.types.status.FAILURE
    return result
Пример #28
0
def Trigger(tc):
    # check that there have been packets showing up after previous tests
    #       ip -s link show <pf>

    packet_count = 1000
    mode = tc.iterators.mode
    if mode == "RX":
        pkt_cnt_before = GetVFRxStats(tc.host1, tc.pf_1, tc.vfid)
    else:
        pkt_cnt_before = GetVFTxStats(tc.host1, tc.pf_1, tc.vfid)

    if pkt_cnt_before == -1:
        api.Logger.error("Getting VF %s stats failed" % mode)
        return api.types.status.ERROR

    reverse = True if mode == "RX" else False
    servercmd = iperf.ServerCmd(server_ip=tc.remote_ip, port=7777)
    clientcmd = iperf.ClientCmd(tc.remote_ip,
                                packet_count=packet_count,
                                client_ip=tc.vf_ip,
                                jsonOut=True,
                                port=7777,
                                proto='udp',
                                reverse=reverse)

    sreq = api.Trigger_CreateExecuteCommandsRequest(serial=True)
    creq = api.Trigger_CreateExecuteCommandsRequest(serial=True)

    api.Trigger_AddHostCommand(sreq, tc.host2, servercmd, background=True)
    api.Trigger_AddHostCommand(creq, tc.host1, clientcmd, timeout=30)

    server_resp = api.Trigger(sreq)
    if not server_resp:
        api.Logger.error("Unable to execute server command")
        return api.types.status.ERROR
    time.sleep(5)

    client_resp = api.Trigger(creq)
    if not client_resp:
        api.Logger.error("Unable to execute client command")
        return api.types.status.ERROR
    resp = client_resp.commands.pop()
    if resp.exit_code != 0:
        api.Logger.error("Iperf client failed with exit code %d" %
                         resp.exit_code)
        api.PrintCommandResults(resp)
        return api.types.status.ERROR
    if not iperf.Success(resp.stdout):
        api.Logger.error("Iperf failed with error: %s" %
                         iperf.Error(resp.stdout))
        return api.types.status.ERROR

    if mode == "RX":
        pkt_cnt_after = GetVFRxStats(tc.host1, tc.pf_1, tc.vfid)
    else:
        pkt_cnt_after = GetVFTxStats(tc.host1, tc.pf_1, tc.vfid)

    delta_pkt_cnt = pkt_cnt_after - pkt_cnt_before
    if packet_count > delta_pkt_cnt:
        api.Logger.error("Incorrect %s stats, expected %d observed %d" %
                         (mode, packet_count, delta_pkt_cnt))
        return api.types.status.FAILURE

    return api.types.status.SUCCESS
Пример #29
0
def Trigger(tc):
    triplet = GetThreeWorkloads()
    server = triplet[0][0]
    client1 = triplet[0][1]
    client2 = triplet[0][2]
    tc.cmd_cookies = []

    naples = server
    if not server.IsNaples():
        naples = client1
        if not client1.IsNaples():
            naples = client2
            if not client2.IsNaples():
                return api.types.status.SUCCESS

    req = api.Trigger_CreateExecuteCommandsRequest(serial=True)
    tc.cmd_descr = "Server: %s(%s) <--> Client: %s(%s)" %\
                   (server.workload_name, server.ip_address, client1.workload_name, client1.ip_address)
    api.Logger.info("Starting SUNRPC test from %s" % (tc.cmd_descr))

    SetupNFSServer(server, client1)

    api.Trigger_AddCommand(
        req, server.node_name, server.workload_name,
        "sh -c 'ls -al /home/sunrpcmntdir | sudo grep sunrpc_file.txt'")
    tc.cmd_cookies.append("Before rpc")

    api.Trigger_AddCommand(
        req, client1.node_name, client1.workload_name,
        "sudo sh -c 'mkdir -p /home/sunrpcdir && sudo mount %s:/home/sunrpcmntdir /home/sunrpcdir' "
        % (server.ip_address))
    tc.cmd_cookies.append("Create mount point")

    api.Trigger_AddCommand(req, client1.node_name, client1.workload_name,
                           "sudo chmod 777 /home/sunrpcdir")
    tc.cmd_cookies.append("add permission")

    api.Trigger_AddCommand(req, client1.node_name, client1.workload_name,
                           "mv sunrpcdir/sunrpc_file.txt /home/sunrpcdir/")
    tc.cmd_cookies.append("Create file")

    api.Trigger_AddCommand(req, client1.node_name, client1.workload_name,
                           "ls -al /home/sunrpcdir")
    tc.cmd_cookies.append("verify file")

    api.Trigger_AddCommand(req, server.node_name, server.workload_name,
                           "ls -al /home/sunrpcmntdir/")
    tc.cmd_cookies.append("After rpc")

    api.Trigger_AddCommand(req, server.node_name, server.workload_name,
                           "sh -c 'cat /home/sunrpcmntdir/sunrpc_file.txt'")
    tc.cmd_cookies.append("After rpc")

    # Add Naples command validation
    api.Trigger_AddNaplesCommand(req, naples.node_name,
                                 "/nic/bin/halctl show session --alg sun_rpc")
    tc.cmd_cookies.append("show session")
    api.Trigger_AddNaplesCommand(req, naples.node_name,
                                 "/nic/bin/halctl show nwsec flow-gate")
    tc.cmd_cookies.append("show security flow-gate")

    trig_resp = api.Trigger(req)
    term_resp = api.Trigger_TerminateAllCommands(trig_resp)
    tc.resp = api.Trigger_AggregateCommandsResponse(trig_resp, term_resp)

    # Get it from flow gate
    dport = 2049
    req2 = api.Trigger_CreateExecuteCommandsRequest(serial=True)

    api.Trigger_AddCommand(
        req2, server.node_name, server.workload_name,
        "hping3 -c 1 -s 45535 -p {} -d 0 -S {}".format(dport,
                                                       client1.ip_address))
    tc.cmd_cookies.append("Hping from different direction")

    # Get the timeout from the config
    api.Trigger_AddNaplesCommand(
        req2, naples.node_name,
        "/nic/bin/halctl show session --srcip %s | grep SYN" %
        (client1.ip_address))
    tc.cmd_cookies.append("show session reverse direction")

    trig_resp2 = api.Trigger(req2)
    term_resp2 = api.Trigger_TerminateAllCommands(trig_resp2)
    tc.resp2 = api.Trigger_AggregateCommandsResponse(trig_resp2, term_resp2)

    CleanupNFSServer(server, client1)

    return api.types.status.SUCCESS
Пример #30
0
def send_pkt(tc, node, pkt_gen, flow, pkt_cnt):

    # ==========================================
    # Send and Receive packets in H2S direction
    # ==========================================
    pkt_gen.set_dir_('h2s')
    pkt_gen.set_sip(flow.sip)
    pkt_gen.set_dip(flow.dip)
    pkt_gen.set_nat_flows_h2s_tx(nat_flows_h2s_tx)
    pkt_gen.set_nat_flows_h2s_rx(nat_flows_h2s_rx)

    if flow.proto == 'UDP' or flow.proto == 'TCP':
        pkt_gen.set_sport(flow.sport)
        pkt_gen.set_dport(flow.dport)

    h2s_req = api.Trigger_CreateExecuteCommandsRequest(serial=False)

    # ==========
    # Rx Packet
    # ==========
    pkt_gen.set_encap(True)
    pkt_gen.set_Rx(True)
    pkt_gen.set_vlan(tc.up0_vlan)
    pkt_gen.set_smac(tc.up1_mac)
    pkt_gen.set_dmac(tc.up0_mac)

    pkt_gen.setup_pkt()
    recv_cmd = "./recv_pkt.py --intf_name %s --pcap_fname %s "\
                "--timeout %s --pkt_cnt %d" % (tc.up0_intf,
                pktgen.DEFAULT_H2S_RECV_PKT_FILENAME,
                str(SNIFF_TIMEOUT), pkt_cnt)

    api.Trigger_AddHostCommand(h2s_req, node.Name(), recv_cmd, background=True)

    # ==========
    # Tx Packet
    # ==========
    pkt_gen.set_encap(False)
    pkt_gen.set_Rx(False)
    pkt_gen.set_smac(tc.up1_mac)
    pkt_gen.set_dmac(tc.up0_mac)
    pkt_gen.set_vlan(tc.up1_vlan)

    pkt_gen.setup_pkt()
    send_cmd = "./send_pkt.py --intf_name %s --pcap_fname %s "\
                "--pkt_cnt %d" % (tc.up1_intf,
                pktgen.DEFAULT_H2S_GEN_PKT_FILENAME, pkt_cnt)

    api.Trigger_AddHostCommand(h2s_req, node.Name(), 'sleep 0.5')
    api.Trigger_AddHostCommand(h2s_req, node.Name(), send_cmd)

    trig_resp = api.Trigger(h2s_req)
    time.sleep(SNIFF_TIMEOUT)
    term_resp = api.Trigger_TerminateAllCommands(trig_resp)

    h2s_resp = api.Trigger_AggregateCommandsResponse(trig_resp, term_resp)
    tc.resp.append(h2s_resp)

    # ==========================================
    # Send and Receive packets in S2H direction
    # ==========================================
    pkt_gen.set_dir_('s2h')
    pkt_gen.set_sip(flow.dip)
    pkt_gen.set_dip(flow.sip)
    pkt_gen.set_nat_flows_s2h_tx(nat_flows_s2h_tx)
    pkt_gen.set_nat_flows_s2h_rx(nat_flows_s2h_rx)

    if flow.proto == 'UDP' or flow.proto == 'TCP':
        pkt_gen.set_sport(flow.dport)
        pkt_gen.set_dport(flow.sport)

    s2h_req = api.Trigger_CreateExecuteCommandsRequest(serial=False)

    # ==========
    # Rx Packet
    # ==========
    pkt_gen.set_encap(False)
    pkt_gen.set_Rx(True)
    pkt_gen.set_smac(tc.up0_mac)
    pkt_gen.set_dmac(tc.up1_mac)
    pkt_gen.set_vlan(tc.up1_vlan)

    pkt_gen.setup_pkt()
    recv_cmd = "./recv_pkt.py --intf_name %s --pcap_fname %s "\
                "--timeout %s --pkt_cnt %d" % (tc.up1_intf,
                pktgen.DEFAULT_S2H_RECV_PKT_FILENAME,
                str(SNIFF_TIMEOUT), pkt_cnt)

    api.Trigger_AddHostCommand(s2h_req, node.Name(), recv_cmd, background=True)

    # ==========
    # Tx Packet
    # ==========
    pkt_gen.set_encap(True)
    pkt_gen.set_Rx(False)
    pkt_gen.set_smac(tc.up0_mac)
    pkt_gen.set_dmac(tc.up1_mac)
    pkt_gen.set_vlan(tc.up0_vlan)

    pkt_gen.setup_pkt()
    send_cmd = "./send_pkt.py --intf_name %s --pcap_fname %s "\
                "--pkt_cnt %d" % (tc.up0_intf,
                pktgen.DEFAULT_S2H_GEN_PKT_FILENAME, pkt_cnt)

    api.Trigger_AddHostCommand(s2h_req, node.Name(), 'sleep 0.5')
    api.Trigger_AddHostCommand(s2h_req, node.Name(), send_cmd)

    trig_resp = api.Trigger(s2h_req)
    time.sleep(SNIFF_TIMEOUT)
    term_resp = api.Trigger_TerminateAllCommands(trig_resp)

    s2h_resp = api.Trigger_AggregateCommandsResponse(trig_resp, term_resp)
    tc.resp.append(s2h_resp)