Exemplo n.º 1
0
def Setup(tc):
    tc.cmd_cookies = []
    # Determine the client, server and the naples nodes.
    pairs = api.GetRemoteWorkloadPairs()
    tc.server, tc.client = pairs[0]
    # Force the naples node to be the client always till the fte bug is fixed.
    if not tc.client.IsNaples():
        tc.client, tc.server = pairs[0]
        assert (tc.client.IsNaples())
    tc.naples = tc.client

    # Create the ShmDumpHelper object
    global shmDumpHelper
    shmDumpHelper = ShmDumpHelper()

    # Assign the ports
    global server_port, client_port
    server_port = api.AllocateUdpPort()
    client_port = api.AllocateUdpPort()
    if server_port and client_port:
        api.Logger.info("Using %d as client port and %d as server port" %
                        (client_port, server_port))

    stopServer(tc)
    return startServer(tc)
Exemplo n.º 2
0
def Trigger(tc):
    req = api.Trigger_CreateExecuteCommandsRequest(serial=True)
    tc.cmd_cookies = []

    for pair in tc.workload_pairs:
        server = pair[0]
        client = pair[1]
        naples = server
        if not server.IsNaples():
            naples = client
            if not client.IsNaples():
                continue

        cmd_cookie = "%s(%s) --> %s(%s)" %\
                     (server.workload_name, server.ip_address, client.workload_name, client.ip_address)
        api.Logger.info("Starting UDP Single flow test from %s" % (cmd_cookie))

        timeout = get_timeout('udp-timeout')
        server_port = api.AllocateUdpPort()
        client_port = api.AllocateUdpPort()

        for idx in range(0, 5):
            api.Trigger_AddCommand(
                req, client.node_name, client.workload_name,
                "sudo hping3 -c 1 -s %s -p %s --udp %s -d 10" %
                (client_port, server_port, server.ip_address))
            tc.cmd_cookies.append("Send data from server to client")

        if server.IsNaples():
            cmd_cookie = "Before aging show session"
            api.Trigger_AddNaplesCommand(
                req, server.node_name,
                "/nic/bin/halctl show session --dstport {} --dstip {} --srcip {} | grep UDP"
                .format(server_port, server.ip_address, client.ip_address))
            tc.cmd_cookies.append(cmd_cookie)

            #Get it from the config
            cmd_cookie = "sleep"
            api.Trigger_AddNaplesCommand(req,
                                         server.node_name,
                                         "sleep %s" % (int(timeout) % 5),
                                         timeout=300)
            tc.cmd_cookies.append(cmd_cookie)

            cmd_cookie = "After aging show session"
            api.Trigger_AddNaplesCommand(
                req, server.node_name,
                "/nic/bin/halctl show session --dstport {} --dstip {} --srcip {} | grep UDP"
                .format(server_port, server.ip_address, client.ip_address))
            tc.cmd_cookies.append(cmd_cookie)

    tc.resp = api.Trigger(req)
    return api.types.status.SUCCESS
Exemplo n.º 3
0
def Trigger(tc):
    pairs = api.GetLocalWorkloadPairs()
    tc.cmd_cookies = []
    server, client = pairs[0]
    naples = server
    if not server.IsNaples():
        naples = client
        if not client.IsNaples():
            return api.types.status.SUCCESS
        else:
            client, server = pairs[0]

    cmd_cookie = "%s(%s) --> %s(%s)" %\
                (server.workload_name, server.ip_address, client.workload_name, client.ip_address)
    api.Logger.info("Starting clear & show stress test from %s" % (cmd_cookie))

    basecmd = 'iperf -p %d ' % api.AllocateTcpPort()
    proto = 6
    timeout = 250
    #tc.secprof = sec_profile_obj.gl_securityprofile_json_template
    #timeout = int(tc.secprof['security-profiles'][0]['spec']['timeouts']['tcp']) + \
    #          int(tc.secprof['security-profiles'][0]['spec']['timeouts']['tcp-close'])
    if tc.iterators.proto == 'udp':
        basecmd = 'iperf -u -p %d ' % api.AllocateUdpPort()
        proto = 17
        timeout = 150
        #timeout = tc.security_profile['security-profiles'][0]['spec']['timeouts']['udp']

    req = api.Trigger_CreateExecuteCommandsRequest(serial=True)

    for cnt in range(tc.args.count):
        cmd_cookie = "iperf -s"
        api.Trigger_AddCommand(req,
                               server.node_name,
                               server.workload_name,
                               "%s-s -t 300" % basecmd,
                               background=True)
        tc.cmd_cookies.append(cmd_cookie)

        cmd_cookie = "iperf -c "
        api.Trigger_AddCommand(
            req, client.node_name, client.workload_name,
            "%s -c %s -P 100" % (basecmd, server.ip_address))
        tc.cmd_cookies.append(cmd_cookie)

        cmd_cookie = "Show session"
        api.Trigger_AddNaplesCommand(req, naples.node_name,
                                     "/nic/bin/halctl show session")
        tc.cmd_cookies.append(cmd_cookie)

        cmd_cookie = "Clear session"
        api.Trigger_AddNaplesCommand(req, naples.node_name,
                                     "/nic/bin/halctl clear session")
        tc.cmd_cookies.append(cmd_cookie)

    trig_resp = api.Trigger(req)
    term_resp = api.Trigger_TerminateAllCommands(trig_resp)

    tc.resp = api.Trigger_AggregateCommandsResponse(trig_resp, term_resp)
    return api.types.status.SUCCESS
Exemplo n.º 4
0
def Trigger(tc):
    tc.contexts = []
    ctxt = IperfTestContext()
    ctxt.req = api.Trigger_CreateAllParallelCommandsRequest()
    ctxt.cmd_cookies = []
    for tunnel in tc.tunnels:
        w1 = tunnel.ltep
        w2 = tunnel.rtep

        cmd_cookie = "Server: %s(%s) <--> Client: %s(%s)" %\
                       (w1.workload_name, w1.ip_address, w2.workload_name, w2.ip_address)
        api.Logger.info("Starting Iperf test from %s" % (cmd_cookie))

        basecmd = 'iperf -p %d ' % api.AllocateTcpPort()
        if tc.iterators.proto == 'udp':
            basecmd = 'iperf -u -p %d ' % api.AllocateUdpPort()
        api.Trigger_AddCommand(ctxt.req, w1.node_name, w1.workload_name,
                               "%s -s -t 300" % basecmd, background = True)
        api.Trigger_AddCommand(ctxt.req, w2.node_name, w2.workload_name,
                               "%s -c %s" % (basecmd, w1.ip_address))

        ctxt.cmd_cookies.append(cmd_cookie)
        ctxt.cmd_cookies.append(cmd_cookie)
    trig_resp = api.Trigger(ctxt.req)
    term_resp = api.Trigger_TerminateAllCommands(trig_resp)
    ctxt.resp = api.Trigger_AggregateCommandsResponse(trig_resp, term_resp)
    tc.context = ctxt

    return api.types.status.SUCCESS
Exemplo n.º 5
0
def Trigger(tc):
    if api.IsDryrun(): return api.types.status.SUCCESS

    srv = tc.workloads[0]
    cli = tc.workloads[1]
    
    # Determine where the commands will be run - host or Naples.
    test_type = getattr(tc.args, "test-type", INTF_TEST_TYPE_HOST)
    is_naples_cmd = True
    if test_type == INTF_TEST_TYPE_HOST:
        is_naples_cmd = False

    srv_req = api.Trigger_CreateExecuteCommandsRequest(serial = False)
    cli_req = api.Trigger_CreateExecuteCommandsRequest(serial = False)

    proto = getattr(tc.iterators, "proto", 'tcp')
    number_of_iperf_threads = getattr(tc.args, "iperfthreads", 1)
    pktsize = getattr(tc.iterators, "pktsize", None)
    ipproto = getattr(tc.iterators, "ipproto", 'v4')

    if ipproto == 'v4':
        server_ip = srv.ip_address
        client_ip = cli.ip_address
    else:
        server_ip = srv.ipv6_address
        client_ip = cli.ipv6_address
        
    tc.cmd_descr = "Server: %s(%s) <--> Client: %s(%s)" %\
                   (srv.interface, server_ip, cli.interface, client_ip)

    api.Logger.info("Starting Iperf(%s/%s) test from %s"
                    % (proto, ipproto, tc.cmd_descr))

    duration =  10
    for i in range(number_of_iperf_threads):
        if proto == 'tcp':
            port = api.AllocateTcpPort()
        else:
            port = api.AllocateUdpPort()
 
        iperf_server_cmd = iperf.ServerCmd(port, naples = is_naples_cmd)
        api.Trigger_AddCommand(srv_req, srv.node_name, srv.workload_name, iperf_server_cmd, background = True)

        iperf_client_cmd = iperf.ClientCmd(server_ip, port, time=duration,
                                 proto=proto, jsonOut=True, ipproto=ipproto,
                                 pktsize=pktsize, client_ip=client_ip, naples = is_naples_cmd)
        api.Trigger_AddCommand(cli_req, cli.node_name, cli.workload_name, iperf_client_cmd, timeout = 60)

    srv_resp = api.Trigger(srv_req)
    # Wait for iperf server to start.
    time.sleep(10)
    tc.cli_resp = api.Trigger(cli_req)
    # Wait for iperf clients to finish.
    time.sleep(2*duration)

    srv_resp1 = api.Trigger_TerminateAllCommands(srv_resp)

    return api.types.status.SUCCESS
Exemplo n.º 6
0
def Trigger(tc):
    if tc.skip: return api.types.status.SUCCESS

    tc.serverCmds = []
    tc.clientCmds = []
    tc.cmd_descr = []

    if not api.IsSimulation():
        serverReq = api.Trigger_CreateAllParallelCommandsRequest()
        clientReq = api.Trigger_CreateAllParallelCommandsRequest()
    else:
        serverReq = api.Trigger_CreateExecuteCommandsRequest(serial = False)
        clientReq = api.Trigger_CreateExecuteCommandsRequest(serial = False)

    for idx, pairs in enumerate(tc.workload_pairs):
        client = pairs[0]
        server = pairs[1]

        cmd_descr = "Server: %s(%s) <--> Client: %s(%s)" %\
                       (server.workload_name, server.ip_address, client.workload_name, client.ip_address)
        tc.cmd_descr.append(cmd_descr)
        num_sessions = int(getattr(tc.args, "num_sessions", 1))
        api.Logger.info("Starting Iperf test from %s num-sessions %d" % (cmd_descr, num_sessions))

        if tc.iterators.proto == 'udp':
            port = api.AllocateTcpPort()
            serverCmd = iperf.ServerCmd(port)
            clientCmd = iperf.ClientCmd(server.ip_address, port, proto='udp', jsonOut=True, num_of_streams = num_sessions)
        else:
            port = api.AllocateUdpPort()
            serverCmd = iperf.ServerCmd(port)
            clientCmd = iperf.ClientCmd(server.ip_address, port, jsonOut=True,  num_of_streams = num_sessions)

        tc.serverCmds.append(serverCmd)
        tc.clientCmds.append(clientCmd)

        api.Trigger_AddCommand(serverReq, server.node_name, server.workload_name,
                               serverCmd, background = True)

        api.Trigger_AddCommand(clientReq, client.node_name, client.workload_name,
                               clientCmd)

    server_resp = api.Trigger(serverReq)
    #Sleep for some time as bg may not have been started.
    time.sleep(30)

    tc.iperf_client_resp = api.Trigger(clientReq)
    #Its faster kill iperf servers

    #Still call terminate on all
    api.Trigger_TerminateAllCommands(server_resp)

    return api.types.status.SUCCESS
Exemplo n.º 7
0
def Trigger(tc):
    pairs = api.GetRemoteWorkloadPairs()
    w1 = pairs[0][1]
    w2 = pairs[0][0]

    group = "239.1.1.1"
    maddr = "01:00:5e:01:01:01"

    req = api.Trigger_CreateExecuteCommandsRequest(serial = True)
    tc.cmd_descr = "Server: %s(%s) <--> Client: %s(%s)" % \
                   (w1.workload_name, w1.ip_address, w2.workload_name, w2.ip_address)
    api.Logger.info("Starting Multicast outbound Iperf test from %s" % (tc.cmd_descr))

    basecmd = "ip maddress add %s dev %s" % (maddr, w1.interface)
    api.Trigger_AddCommand(req, w1.node_name, w1.workload_name,
                           basecmd, background = True)
    basecmd = "ip maddress add %s dev %s" % (maddr, w2.interface)
    api.Trigger_AddCommand(req, w2.node_name, w2.workload_name,
                           basecmd, background = True)
    basecmd = "ip route add %s/32 dev %s" % (group, w1.interface)
    api.Trigger_AddCommand(req, w1.node_name, w1.workload_name,
                           basecmd, background = True)
    basecmd = "ip route add %s/32 dev %s" % (group, w2.interface)
    api.Trigger_AddCommand(req, w2.node_name, w2.workload_name,
                           basecmd, background = True)

    basecmd = 'iperf -p  %d ' % api.AllocateTcpPort()
    if tc.iterators.proto == 'udp':
        basecmd = 'iperf -u -p %d ' % api.AllocateUdpPort()
    api.Trigger_AddCommand(req, w1.node_name, w1.workload_name,
                           "%s -s -t 300 -B %s -i 1" % (basecmd, group), background = True)
    api.Trigger_AddCommand(req, w2.node_name, w2.workload_name,
                           "%s -c %s -T 32 -t 3 -i 1" % (basecmd, group))

    basecmd = "ip maddress del %s dev %s" % (maddr, w1.interface)
    api.Trigger_AddCommand(req, w1.node_name, w1.workload_name,
                           basecmd, background = True)
    basecmd = "ip maddress del %s dev %s" % (maddr, w2.interface)
    api.Trigger_AddCommand(req, w2.node_name, w2.workload_name,
                           basecmd, background = True)
    basecmd = "ip route del %s/32 dev %s" % (group, w1.interface)
    api.Trigger_AddCommand(req, w1.node_name, w1.workload_name,
                           basecmd, background = True)
    basecmd = "ip route del %s/32 dev %s" % (group, w2.interface)
    api.Trigger_AddCommand(req, w2.node_name, w2.workload_name,
                           basecmd, background = True)

    trig_resp = api.Trigger(req)
    term_resp = api.Trigger_TerminateAllCommands(trig_resp)
    tc.resp = api.Trigger_AggregateCommandsResponse(trig_resp, term_resp)
    return api.types.status.SUCCESS
Exemplo n.º 8
0
def PingCmdBuilder(src_wl,
                   dest_ip,
                   proto='icmp',
                   af='ipv4',
                   pktsize=64,
                   args=None,
                   count=3):

    cmd = None
    dest_addr = " %s" % (dest_ip)
    if proto == 'arp':
        if not __is_ipv4(af):
            assert (0)
        if args == 'DAD':
            arp_base_cmd = __get_arp_base_cmd(src_wl, False, True, count)
        elif args == 'update':
            arp_base_cmd = __get_arp_base_cmd(src_wl, True, False, count)
        else:
            arp_base_cmd = __get_arp_base_cmd(src_wl, False, False, count)

        addr = __get_workload_address(src_wl, "ipv4")
        if args == 'update':
            dest_addr = " %s" % (addr)
        cmd = arp_base_cmd + dest_addr
    elif proto == 'icmp':
        ping_base_cmd = __get_ping_base_cmd(src_wl, af, pktsize, 3, 0.2, False)
        cmd = __ping_addr_substitution(ping_base_cmd, dest_addr)
    elif proto in ['tcp', 'udp']:
        if proto == 'udp':
            dest_port = api.AllocateUdpPort()
            # Skip over 'geneve' reserved port 6081
            if dest_port == 6081:
                dest_port = api.AllocateUdpPort()
        else:
            dest_port = api.AllocateTcpPort()
        cmd = hping.GetHping3Cmd(proto, src_wl, dest_ip, dest_port)

    return cmd
Exemplo n.º 9
0
def Trigger(tc):
    if api.IsDryrun(): return api.types.status.SUCCESS

    req1 = api.Trigger_CreateExecuteCommandsRequest(serial=True)
    req2 = api.Trigger_CreateExecuteCommandsRequest(serial=False)

    w1 = tc.workloads[0]
    w2 = tc.workloads[1]
    tc.cmd_descr = "Server: %s(%s) <--> Client: %s(%s)" %\
                   (w1.interface, w1.ip_address, w2.interface, w2.ip_address)
    api.Logger.info("Starting Iperf test from %s" % (tc.cmd_descr))

    proto = getattr(tc.iterators, "proto", 'tcp')

    number_of_iperf_threads = getattr(tc.args, "iperfthreads", 1)

    pktsize = getattr(tc.iterators, "pktsize", 512)
    ipproto = getattr(tc.iterators, "ipproto", 'v4')

    for i in range(number_of_iperf_threads):
        if proto == 'tcp':
            port = api.AllocateTcpPort()
        else:
            port = api.AllocateUdpPort()

        iperf_server_cmd = cmd_builder.iperf_server_cmd(port=port)
        api.Trigger_AddCommand(req1,
                               w1.node_name,
                               w1.workload_name,
                               iperf_server_cmd,
                               background=True)

        iperf_client_cmd = cmd_builder.iperf_client_cmd(
            server_ip=w1.ip_address,
            port=port,
            proto=proto,
            pktsize=pktsize,
            ipproto=ipproto)
        api.Trigger_AddCommand(req2, w2.node_name, w2.workload_name,
                               iperf_client_cmd)

    trig_resp1 = api.Trigger(req1)
    trig_resp2 = api.Trigger(req2)
    term_resp1 = api.Trigger_TerminateAllCommands(trig_resp1)

    response = api.Trigger_AggregateCommandsResponse(trig_resp1, term_resp1)
    tc.resp = api.Trigger_AggregateCommandsResponse(response, trig_resp2)

    return api.types.status.SUCCESS
Exemplo n.º 10
0
def Trigger(tc):
    if tc.args.type == 'local_only':
        pairs = api.GetLocalWorkloadPairs()
    else:
        pairs = api.GetRemoteWorkloadPairs()
    tc.cmd_cookies = []
    server, client = pairs[0]
    naples = server
    if not server.IsNaples():
        naples = client
        if not client.IsNaples():
            return api.types.status.SUCCESS
        else:
            client, server = pairs[0]

    req = api.Trigger_CreateExecuteCommandsRequest(serial=True)
    cmd_cookie = "%s(%s) --> %s(%s)" %\
                (server.workload_name, server.ip_address, client.workload_name, client.ip_address)
    api.Logger.info("Starting Iperf test from %s" % (cmd_cookie))

    cmd_cookie = "halctl clear session"
    api.Trigger_AddNaplesCommand(req, server.node_name,
                                 "/nic/bin/halctl clear session")
    tc.cmd_cookies.append(cmd_cookie)

    server_port = api.AllocateTcpPort()
    basecmd = 'iperf -p %d ' % server_port
    proto = 6
    timeout_str = 'tcp-drop'
    if tc.iterators.proto == 'udp':
        server_port = api.AllocateUdpPort()
        basecmd = 'iperf -u -p %d ' % server_port
        proto = 17
        timeout_str = 'udp-drop'
    #Step 0: Update the timeout in the config object
    update_timeout(timeout_str, tc.iterators.timeout)
    update_sgpolicy(client.ip_address, server.ip_address, tc.iterators.proto,
                    server_port)

    profilereq = api.Trigger_CreateExecuteCommandsRequest(serial=True)
    api.Trigger_AddNaplesCommand(profilereq, naples.node_name,
                                 "/nic/bin/halctl show nwsec profile --id 16")
    profcommandresp = api.Trigger(profilereq)
    cmd = profcommandresp.commands[-1]
    for command in profcommandresp.commands:
        api.PrintCommandResults(command)
    timeout = get_haltimeout(timeout_str, cmd)
    api.Logger.info("Hal timeout %s, tc timeout %s" %
                    (timeout, timetoseconds(tc.iterators.timeout)))
    if (timeout != timetoseconds(tc.iterators.timeout)):
        api.Logger.errror("Found mismatch in HAL and testcase timeout")
        tc.config_update_fail = 1
        return api.types.status.FAILURE

    timeout += GRACE_TIME
    cmd_cookie = "iperf -s"
    api.Trigger_AddCommand(req,
                           server.node_name,
                           server.workload_name,
                           "%s -s -t 300 " % basecmd,
                           background=True)
    tc.cmd_cookies.append(cmd_cookie)

    cmd_cookie = "iperf -c "
    api.Trigger_AddCommand(req, client.node_name, client.workload_name,
                           "%s -c %s " % (basecmd, server.ip_address))
    tc.cmd_cookies.append(cmd_cookie)

    cmd_cookie = "Before aging show session"
    api.Trigger_AddNaplesCommand(
        req, naples.node_name,
        "/nic/bin/halctl show session --ipproto %s | grep %s" %
        (proto, naples.ip_address))
    tc.cmd_cookies.append(cmd_cookie)

    #Get it from the config
    cmd_cookie = "sleep"
    api.Trigger_AddNaplesCommand(req,
                                 naples.node_name,
                                 "sleep %s" % timeout,
                                 timeout=300)
    tc.cmd_cookies.append(cmd_cookie)

    cmd_cookie = "After aging show session"
    api.Trigger_AddNaplesCommand(
        req, naples.node_name,
        "/nic/bin/halctl show session --ipproto %s | grep %s" %
        (proto, naples.ip_address))
    tc.cmd_cookies.append(cmd_cookie)

    trig_resp = api.Trigger(req)
    term_resp = api.Trigger_TerminateAllCommands(trig_resp)

    tc.resp = api.Trigger_AggregateCommandsResponse(trig_resp, term_resp)

    return api.types.status.SUCCESS
Exemplo n.º 11
0
def iperfWorkloads(workload_pairs,
                   af="ipv4",
                   proto="tcp",
                   packet_size=64,
                   bandwidth="100G",
                   time=1,
                   num_of_streams=None,
                   sleep_time=30,
                   background=False):
    serverCmds = []
    clientCmds = []
    cmdDesc = []
    ipproto = __get_ipproto(af)

    if not api.IsSimulation():
        serverReq = api.Trigger_CreateAllParallelCommandsRequest()
        clientReq = api.Trigger_CreateAllParallelCommandsRequest()
    else:
        serverReq = api.Trigger_CreateExecuteCommandsRequest(serial=False)
        clientReq = api.Trigger_CreateExecuteCommandsRequest(serial=False)

    for idx, pairs in enumerate(workload_pairs):
        client = pairs[0]
        server = pairs[1]
        server_addr = __get_workload_address(server, af)
        client_addr = __get_workload_address(client, af)
        if proto == 'udp':
            port = api.AllocateUdpPort()
            if port == 6081:
                port = api.AllocateUdpPort()
        else:
            port = api.AllocateTcpPort()

        serverCmd = iperf.ServerCmd(port, jsonOut=True)
        clientCmd = iperf.ClientCmd(server_addr,
                                    port,
                                    time,
                                    packet_size,
                                    proto,
                                    None,
                                    ipproto,
                                    bandwidth,
                                    num_of_streams,
                                    jsonOut=True)

        cmd_cookie = "Server: %s(%s:%s:%d) <--> Client: %s(%s)" %\
                     (server.workload_name, server_addr, proto, port,\
                      client.workload_name, client_addr)
        api.Logger.info("Starting Iperf test %s" % cmd_cookie)
        serverCmds.append(serverCmd)
        clientCmds.append(clientCmd)
        cmdDesc.append(cmd_cookie)

        api.Trigger_AddCommand(serverReq,
                               server.node_name,
                               server.workload_name,
                               serverCmd,
                               background=True)
        api.Trigger_AddCommand(clientReq,
                               client.node_name,
                               client.workload_name,
                               clientCmd,
                               background=background)

    server_resp = api.Trigger(serverReq)
    #Sleep for some time as bg may not have been started.
    api.Logger.info(
        f"Waiting {sleep_time} sec to start iperf server in background")
    __sleep(sleep_time)
    client_resp = api.Trigger(clientReq)
    __sleep(3)

    if background:
        return [cmdDesc, serverCmds, clientCmds], server_resp, client_resp
    else:
        api.Trigger_TerminateAllCommands(server_resp)
        return [cmdDesc, serverCmds, clientCmds], client_resp
Exemplo n.º 12
0
def Trigger(tc):
    if tc.skip == True:
        return api.types.status.FAILURE

    #
    # Set-up Test Environment
    #
    tc.cmd_cookies = []
    tc.cookie_idx = 0
    cmd_cookie = "%s(%s) --> %s(%s)" %\
                 (tc.server.workload_name, tc.server.ip_address,
                  tc.client.workload_name, tc.client.ip_address)
    api.Logger.info("Starting Multiple-IPv4-UDP-Flow-Drops test from %s" %\
                   (cmd_cookie))

    #
    # Start TCPDUMP in background on Server/Client
    #
    req1 = api.Trigger_CreateExecuteCommandsRequest(serial=True)
    #cmd_cookie = "start tcpdump on Server"
    #cmd = "sudo tcpdump -nnSXi {} > out.txt".format(tc.server.interface)
    #add_command(tc, req1, cmd_cookie, cmd, tc.server, True)

    #cmd_cookie = "start tcpdump on Client"
    #cmd = "sudo tcpdump -nnSXi {} > out.txt".format(tc.client.interface)
    #add_command(tc, req1, cmd_cookie, cmd, tc.client, True)

    #
    # Start with a clean slate by clearing all sessions/flows
    #
    cmd_cookie = "clear session"
    cmd = "/nic/bin/halctl clear session"
    add_naples_command(tc, req1, cmd_cookie, cmd, tc.naples)

    #
    # Make sure that Client<=>Server Forwarding is set up
    #
    cmd_cookie = "trigger ping: Create case"
    cmd = "ping -c1 %s -I %s" %\
          (tc.server.ip_address, tc.client.interface)
    add_command(tc, req1, cmd_cookie, cmd, tc.client, False)

    #
    # Allocate UDP-portnum for Server and start the service on the Server
    #
    tc.server_port = api.AllocateUdpPort()
    #cmd_cookie = "start server"
    #cmd = "iperf --udp --port %s --server" % (tc.server_port)
    #add_command(tc, req1, cmd_cookie, cmd, tc.server, True)

    #
    # Allocate UDP-portnum for Client and start the service on the Client
    #
    tc.client_port = api.AllocateUdpPort()
    #cmd_cookie = "start client"
    #cmd = "iperf --udp --port %s --server" % (tc.client_port)
    #add_command(tc, req1, cmd_cookie, cmd, tc.client, True)

    #
    # Send Good-Data from Client and Server
    #
    cmd_cookie = "send good data from Client: Create case"
    cmd = "hping3 --udp --count {} --interval u{} --baseport {}\
           --destport {} {}"\
          .format(tc.iterators.sessions, tc.iterators.interval,
                  tc.client_port, tc.server_port, tc.server.ip_address)
    add_command(tc, req1, cmd_cookie, cmd, tc.client, False)

    cmd_cookie = "send good data from Server: Create case"
    cmd = "hping3 --udp --count {} --interval  u{} --baseport {}\
           --destport {} {}"\
          .format(tc.iterators.sessions, tc.iterators.interval,
                  tc.server_port, tc.client_port, tc.client.ip_address)
    add_command(tc, req1, cmd_cookie, cmd, tc.server, False)

    #
    # Send Bad-Data (TTL=0) from Client and Server
    #
    cmd_cookie = "send bad data from Client TTL=0: Create case"
    cmd = "hping3 --udp --ttl 0 --count {} --interval u{} --baseport {}\
           --destport {} {}"\
          .format(tc.iterators.sessions, tc.iterators.interval,
                  tc.client_port, tc.server_port, tc.server.ip_address)
    add_command(tc, req1, cmd_cookie, cmd, tc.client, False)

    cmd_cookie = "send bad data from Server TTL=0: Create case"
    cmd = "hping3 --udp --ttl 0 --count {} --interval u{} --baseport {}\
           --destport {} {}"\
          .format(tc.iterators.sessions, tc.iterators.interval,
                  tc.server_port, tc.client_port, tc.client.ip_address)
    add_command(tc, req1, cmd_cookie, cmd, tc.server, False)

    #
    # Do "show session" command
    #
    cmd_cookie = "show session"
    cmd = "/nic/bin/halctl show session"
    add_naples_command(tc, req1, cmd_cookie, cmd, tc.naples)

    #
    # Trigger "metrics get IPv4FlowDropMetrics" output
    #
    cmd_cookie = "show flow-drop: Create case"
    cmd = "PATH=$PATH:/platform/bin/;\
           LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/platform/lib/:/nic/lib/;\
           export PATH; export LD_LIBRARY_PATH;\
           /nic/bin/delphictl metrics get IPv4FlowDropMetrics"

    add_naples_command(tc, req1, cmd_cookie, cmd, tc.naples)

    tc.resp1 = api.Trigger(req1)
    for command in tc.resp1.commands:
        api.PrintCommandResults(command)

    #
    # Clearing all sessions/flows
    #
    req2 = api.Trigger_CreateExecuteCommandsRequest(serial=True)
    cmd_cookie = "clear session"
    cmd = "/nic/bin/halctl clear session"
    add_naples_command(tc, req2, cmd_cookie, cmd, tc.naples)

    #
    # Make sure that Client<=>Server Forwarding is set up
    #
    cmd_cookie = "trigger ping: Re-use case"
    cmd = "ping -c1 %s -I %s" %\
          (tc.server.ip_address, tc.client.interface)
    add_command(tc, req2, cmd_cookie, cmd, tc.client, False)

    #
    # Re-send Good-Data from Client and Server
    #
    cmd_cookie = "re-send good data from Client: Re-use case"
    cmd = "hping3 --udp --count {} --interval u{} --baseport {}\
           --destport {} {}"\
          .format(tc.iterators.sessions, tc.iterators.interval,
                  tc.client_port, tc.server_port, tc.server.ip_address)
    add_command(tc, req2, cmd_cookie, cmd, tc.client, False)

    cmd_cookie = "re-send good data from Server: Re-use case"
    cmd = "hping3 --udp --count {} --interval u{} --baseport {}\
           --destport {} {}"\
          .format(tc.iterators.sessions, tc.iterators.interval,
                  tc.server_port, tc.client_port, tc.client.ip_address)
    add_command(tc, req2, cmd_cookie, cmd, tc.server, False)

    #
    # Re-send Bad-Data (TTL=0) from Client and Server
    #
    cmd_cookie = "re-send bad data from Client TTL=0: Re-use case"
    cmd = "hping3 --udp --ttl 0 --count {} --interval u{} --baseport {}\
           --destport {} {}"\
          .format(tc.iterators.sessions, tc.iterators.interval,
                  tc.client_port, tc.server_port, tc.server.ip_address)
    add_command(tc, req2, cmd_cookie, cmd, tc.client, False)

    cmd_cookie = "re-send bad data from Server TTL=0: Re-use case"
    cmd = "hping3 --udp --ttl 0 --count {} --interval u{} --baseport {}\
           --destport {} {}"\
          .format(tc.iterators.sessions, tc.iterators.interval,
                  tc.server_port, tc.client_port, tc.client.ip_address)
    add_command(tc, req2, cmd_cookie, cmd, tc.server, False)

    #
    # Do "show session" command
    #
    cmd_cookie = "show session"
    cmd = "/nic/bin/halctl show session"
    add_naples_command(tc, req2, cmd_cookie, cmd, tc.naples)

    #
    # Trigger "metrics get IPv4FlowDropMetrics" output
    #
    cmd_cookie = "show flow-drop: Re-use case"
    cmd = "PATH=$PATH:/platform/bin/;\
           LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/platform/lib/:/nic/lib/;\
           export PATH; export LD_LIBRARY_PATH;\
           /nic/bin/delphictl metrics get IPv4FlowDropMetrics"

    add_naples_command(tc, req2, cmd_cookie, cmd, tc.naples)

    tc.resp2 = api.Trigger(req2)
    for command in tc.resp2.commands:
        api.PrintCommandResults(command)

    #
    # Clearing all sessions/flows and Sleep for 45secs
    #
    req3 = api.Trigger_CreateExecuteCommandsRequest(serial=True)
    cmd_cookie = "clear session"
    cmd = "/nic/bin/halctl clear session; sleep 45"
    add_naples_command(tc, req3, cmd_cookie, cmd, tc.naples)

    #
    # Do "show session" command
    #
    cmd_cookie = "show session"
    cmd = "/nic/bin/halctl show session"
    add_naples_command(tc, req3, cmd_cookie, cmd, tc.naples)

    #
    # Trigger "metrics get IPv4FlowDropMetrics" output
    #
    cmd_cookie = "show flow-drop: Delete case"
    cmd = "PATH=$PATH:/platform/bin/;\
           LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/platform/lib/:/nic/lib/;\
           export PATH; export LD_LIBRARY_PATH;\
           /nic/bin/delphictl metrics get IPv4FlowDropMetrics"

    add_naples_command(tc, req3, cmd_cookie, cmd, tc.naples)

    tc.resp3 = api.Trigger(req3)
    for command in tc.resp3.commands:
        api.PrintCommandResults(command)

    return api.types.status.SUCCESS
Exemplo n.º 13
0
def Trigger(tc):
    if tc.args.type == 'local_only':
        local_pairs = api.GetLocalWorkloadPairs()
    else:
        remote_pairs = api.GetRemoteWorkloadPairs()
    tc.skip_security_prof = getattr(tc.args, "skip_security_prof", False)
    tc.cmd_cookies       = []
    tc.cmd_cookies_after = []
    server,client  = local_pairs[0]
    wl1 = server
    wl2 = client
    tc.wl       = wl1
    tc.old_node = server.node_name

    if not server.IsNaples():
        return api.types.status.SUCCESS

    # remove the node being used to pick wl for traffic
    # pick the next node in the naples nodes
    naples_nodes = tc.Nodes[:]
    naples_nodes.remove(server.node_name)
    assert(len(naples_nodes) >= 1)
    new_node     = naples_nodes[0]
    tc.new_node  = new_node
    move_info    = vm_utils.MoveInfo()
    move_info.new_node = new_node
    move_info.wl       = wl1
    move_info.old_node = wl1.node_name
    tc.move_info.append(move_info)


    req = api.Trigger_CreateExecuteCommandsRequest(serial = True)
    cmd_cookie = "%s(%s) --> %s(%s)" %\
                (server.workload_name, server.ip_address, client.workload_name, client.ip_address)
    api.Logger.info("Starting Iperf test from %s" % (cmd_cookie))

    cmd_cookie = "halctl clear session"
    api.Trigger_AddNaplesCommand(req, server.node_name, "/nic/bin/halctl clear session")
    tc.cmd_cookies.append(cmd_cookie)

    server_port = api.AllocateUdpPort()
    basecmd = 'iperf -u '
    timeout_str = 'udp-timeout'
    if not tc.skip_security_prof:
        timeout = get_timeout(timeout_str)
    else:
        timeout = VMOTION_UDP_TIMEOUT

    #Step 0: Update the timeout in the config object
    if not tc.skip_security_prof:
        update_timeout(timeout_str, tc.iterators.timeout)

    #profilereq = api.Trigger_CreateExecuteCommandsRequest(serial = True)
    #api.Trigger_AddNaplesCommand(profilereq, wl1.node_name, "/nic/bin/halctl show nwsec profile --id 11")
    #profcommandresp = api.Trigger(profilereq)
    #cmd = profcommandresp.commands[-1]
    #for command in profcommandresp.commands:
    #    api.PrintCommandResults(command)
    #timeout = get_haltimeout(timeout_str, cmd)
    #tc.config_update_fail = 0
    #if (timeout != timetoseconds(tc.iterators.timeout)):
    #    tc.config_update_fail = 1

    cmd_cookie = "start server"
    api.Trigger_AddCommand(req, server.node_name, server.workload_name,
                           "%s -p %s -s -t 180" % (basecmd, server_port), background = True)
    tc.cmd_cookies.append(cmd_cookie)

    cmd_cookie = "start client"
    api.Trigger_AddCommand(req, client.node_name, client.workload_name,
                           "%s -p %s -c %s -t 180" % (basecmd, server_port, server.ip_address), background = True)
    tc.cmd_cookies.append(cmd_cookie)

    cmd_cookie = "Before move show session"
    api.Trigger_AddNaplesCommand(req, wl1.node_name, "/nic/bin/halctl show session --dstip %s | grep UDP" % (server.ip_address))
    tc.cmd_cookies.append(cmd_cookie)

    trig_resp = api.Trigger(req)

     # vm_utils.do_vmotion(tc, server, new_node)
     vm_utils.do_vmotion(tc, True)
Exemplo n.º 14
0
def runIperfTest(tc, srv, cli):
    if api.IsDryrun():
        return api.types.status.SUCCESS

    req1 = api.Trigger_CreateExecuteCommandsRequest(serial=True)
    req2 = api.Trigger_CreateExecuteCommandsRequest(serial=False)

    proto = getattr(tc.iterators, "proto", 'tcp')

    number_of_iperf_threads = getattr(tc.args, "iperfthreads", 1)

    pktsize = getattr(tc.iterators, "pktsize", 512)
    ipproto = getattr(tc.iterators, "ipproto", 'v4')

    if ipproto == 'v6':
        srv_ip_address = srv.ipv6_address
        tc.cmd_descr = " Server: %s(%s) <--> Client: %s(%s)" %\
            (srv.interface, srv.ipv6_address, cli.interface, cli.ipv6_address)
    else:
        srv_ip_address = srv.ip_address
        tc.cmd_descr = " Server: %s(%s) <--> Client: %s(%s)" %\
            (srv.interface, srv.ip_address, cli.interface, cli.ip_address)

    api.Logger.info("Starting TSO test %s" % (tc.cmd_descr))

    tc.srv_bad_csum = ionic_stats.getNetstatBadCsum(srv, proto)
    tc.cli_bad_csum = ionic_stats.getNetstatBadCsum(cli, proto)

    for i in range(number_of_iperf_threads):
        if proto == 'tcp':
            port = api.AllocateTcpPort()
        else:
            port = api.AllocateUdpPort()

        file_name_suffix = "_instance" + \
            str(i) + proto + "_" + ipproto + "_" + str(pktsize)

        file_name = '/tmp/' + 'srv_' + srv.interface + file_name_suffix
        iperf_server_cmd = cmd_builder.iperf_server_cmd(port=port)
        api.Trigger_AddCommand(req1,
                               srv.node_name,
                               srv.workload_name,
                               iperf_server_cmd,
                               background=True)

        file_name = '/tmp/' + 'cli_' + cli.interface + file_name_suffix
        iperf_file_name = file_name + ".log"
        iperf_client_cmd = cmd_builder.iperf_client_cmd(
            server_ip=srv_ip_address,
            port=port,
            proto=proto,
            pktsize=pktsize,
            ipproto=ipproto)

        # Once iperf JSON support  is available, we don't need this hacks.
        api.Trigger_AddCommand(
            req2, cli.node_name, cli.workload_name,
            iperf_client_cmd + " -J | tee " + iperf_file_name)
        # Read the retransmission counter from the log
        api.Trigger_AddCommand(
            req2, cli.node_name, cli.workload_name, 'grep retrans ' +
            iperf_file_name + '| tail -1| cut -d ":" -f 2 | cut -d "," -f 1')
        # Read the bandwidth numbers.
        api.Trigger_AddCommand(
            req2, cli.node_name, cli.workload_name, 'cat ' + iperf_file_name +
            ' | grep bits_per_second | tail -1 |  cut -d ":" -f 2 | cut -d "," -f 1'
        )
        # Read the bytes transferred numbers.
        api.Trigger_AddCommand(
            req2, cli.node_name, cli.workload_name, 'cat ' + iperf_file_name +
            ' | grep bytes | tail -1 |  cut -d ":" -f 2 | cut -d "," -f 1')

    trig_resp1 = api.Trigger(req1)
    if trig_resp1 is None:
        api.Logger.error("Failed to run iperf server")
        return api.types.status.FAILURE

    tc.resp = api.Trigger(req2)
    if tc.resp is None:
        api.Logger.error("Failed to run iperf client")
        return api.types.status.FAILURE

    for cmd in tc.resp.commands:
        if cmd.exit_code != 0:
            api.Logger.error("Failed to start client iperf\n")
            api.PrintCommandResults(cmd)
            return api.types.status.FAILURE

    status, retran, bw = verifySingle(tc)
    vlan = getattr(tc.iterators, 'vlantag', 'off')
    vxlan = getattr(tc.iterators, 'vxlan', 'off')
    tso = getattr(tc.iterators, 'tso_offload', 'off')

    api.Logger.info(
        "Result TSO: %s VLAN: %s VXLAN: %s Proto: %s/%s Pkt size: %d Threads: %d"
        " Bandwidth: %d Mbps" % (tso, vlan, vxlan, proto, ipproto, pktsize,
                                 number_of_iperf_threads, bw))
    return status
Exemplo n.º 15
0
def Trigger(tc):
    serverCmd = None
    clientCmd = None
    IPERF_TIMEOUT = 86400
    try:
        serverReq = api.Trigger_CreateExecuteCommandsRequest()
        clientReq = api.Trigger_CreateExecuteCommandsRequest()

        client = tc.wl_pair[0]
        server = tc.wl_pair[1]

        tc.cmd_descr = "Server: %s(%s) <--> Client: %s(%s)" %\
                       (server.workload_name, server.ip_address,
                        client.workload_name, client.ip_address)
        num_streams = int(getattr(tc.args, "num_streams", 2))
        api.Logger.info("Starting Iperf test from %s num-sessions %d" %
                        (tc.cmd_descr, num_streams))

        if tc.iterators.proto == 'tcp':
            port = api.AllocateTcpPort()
            serverCmd = iperf.ServerCmd(port, jsonOut=True, run_core=3)
            clientCmd = iperf.ClientCmd(server.ip_address,
                                        port,
                                        time=IPERF_TIMEOUT,
                                        jsonOut=True,
                                        num_of_streams=num_streams,
                                        run_core=3)
        else:
            port = api.AllocateUdpPort()
            serverCmd = iperf.ServerCmd(port, jsonOut=True, run_core=3)
            clientCmd = iperf.ClientCmd(server.ip_address,
                                        port,
                                        proto='udp',
                                        jsonOut=True,
                                        num_of_streams=num_streams,
                                        run_core=3)

        tc.serverCmd = serverCmd
        tc.clientCmd = clientCmd

        api.Trigger_AddCommand(serverReq,
                               server.node_name,
                               server.workload_name,
                               serverCmd,
                               background=True,
                               timeout=IPERF_TIMEOUT)

        api.Trigger_AddCommand(clientReq,
                               client.node_name,
                               client.workload_name,
                               clientCmd,
                               background=True,
                               timeout=IPERF_TIMEOUT)

        tc.server_resp = api.Trigger(serverReq)
        time.sleep(5)
        tc.iperf_client_resp = api.Trigger(clientReq)

        _run_vmedia_traffic(tc.node_name)
    except:
        api.Logger.error(traceback.format_exc())
        return api.types.status.FAILURE

    return api.types.status.SUCCESS
Exemplo n.º 16
0
def Trigger(tc):
    max_pings = int(getattr(tc.args, "max_pings", 60))
    num_runs = int(getattr(tc.args, "num_runs", 1))
    serverCmd = None
    clientCmd = None
    mode = tc.initial_mode
    IPERF_TIMEOUT = 86400
    try:
        serverReq = api.Trigger_CreateExecuteCommandsRequest()
        clientReq = api.Trigger_CreateExecuteCommandsRequest()
        
        client = tc.wl_pair[0]
        server = tc.wl_pair[1]

        tc.cmd_descr = "Server: %s(%s) <--> Client: %s(%s)" %\
                       (server.workload_name, server.ip_address,
                        client.workload_name, client.ip_address)
        num_streams = int(getattr(tc.args, "num_streams", 2))
        api.Logger.info("Starting Iperf test from %s num-sessions %d"
                        % (tc.cmd_descr, num_streams))

        if tc.iterators.proto == 'tcp':
            port = api.AllocateTcpPort()
            serverCmd = iperf.ServerCmd(port, jsonOut=True, run_core=3)
            clientCmd = iperf.ClientCmd(server.ip_address, port, time=IPERF_TIMEOUT,
                                        jsonOut=True, num_of_streams=num_streams,
                                        run_core=3)
        else:
            port = api.AllocateUdpPort()
            serverCmd = iperf.ServerCmd(port, jsonOut=True, run_core=3)
            clientCmd = iperf.ClientCmd(server.ip_address, port, proto='udp',
                                        jsonOut=True, num_of_streams=num_streams,
                                        run_core=3)

        tc.serverCmd = serverCmd
        tc.clientCmd = clientCmd

        api.Trigger_AddCommand(serverReq, server.node_name, server.workload_name,
                               serverCmd, background=True, timeout=IPERF_TIMEOUT)

        api.Trigger_AddCommand(clientReq, client.node_name, client.workload_name,
                               clientCmd, background=True, timeout=IPERF_TIMEOUT)
        
        tc.server_resp = api.Trigger(serverReq)
        time.sleep(5)
        tc.iperf_client_resp = api.Trigger(clientReq)

        for _i in range(num_runs):
            RF = get_redfish_obj(tc.cimc_info, mode=mode)
            obs_mode = get_nic_mode(RF)
            api.Logger.info("Iteration %d: curr_mode %s" % (_i, obs_mode))
            if mode != obs_mode:
                raise RuntimeError("Expected NIC mode %s, observed %s" % (mode, obs_mode))

            next_mode = "dedicated" if mode == "ncsi" else "ncsi"
            if next_mode == "ncsi":
                ret = set_ncsi_mode(RF, mode="dhcp")
            else:
                ret = set_dedicated_mode(RF, mode="dhcp")
            if ret != api.types.status.SUCCESS:
                api.Logger.error("Mode switch from %s -> %s failed" %(mode, next_mode))
                return api.types.status.FAILURE

            api.Logger.info("Switched mode to %s" % (next_mode))
            time.sleep(5)
            if ret == api.types.status.SUCCESS:
                curr_ilo_ip = tc.ilo_ip if next_mode == "dedicated" else tc.ilo_ncsi_ip
                ret = ping(curr_ilo_ip, max_pings)
                if ret != api.types.status.SUCCESS:
                    RF.logout()
                    raise RuntimeError('Unable to ping ILO, Port Switch fail from'
                                      ' %s -> %s' % (mode, next_mode))
                api.Logger.info("Mode switch from %s -> %s successful" % (mode, next_mode))
            else:
                raise RuntimeError('Mode switch config failed')
            mode = next_mode
    except:
        api.Logger.error(traceback.format_exc())
        return api.types.status.FAILURE

    return api.types.status.SUCCESS
Exemplo n.º 17
0
def Trigger(tc):
    if tc.args.type == 'local_only':
        pairs = api.GetLocalWorkloadPairs()
    else:
        pairs = api.GetRemoteWorkloadPairs()
    tc.cmd_cookies = []
    server, client = pairs[0]
    naples = server
    if not server.IsNaples():
        naples = client
        if not client.IsNaples():
            return api.types.status.SUCCESS
        else:
            client, server = pairs[0]

    tc.client = client
    tc.server = server

    addPktFltrRuleOnEp(tc, True)
    req = api.Trigger_CreateExecuteCommandsRequest(serial=True)
    cmd_cookie = "%s(%s) --> %s(%s)" %\
                (server.workload_name, server.ip_address, client.workload_name, client.ip_address)
    api.Logger.info("Starting UDP Aging test from %s" % (cmd_cookie))

    cmd_cookie = "halctl clear session"
    api.Trigger_AddNaplesCommand(req, server.node_name,
                                 "/nic/bin/halctl clear session")
    tc.cmd_cookies.append(cmd_cookie)

    server_port = api.AllocateUdpPort()
    timeout_str = 'udp-timeout'
    if tc.args.skip_security_prof == False:
        timeout = get_timeout(timeout_str)
    else:
        timeout = DEFAULT_UDP_TIMEOUT

    #Step 0: Update the timeout in the config object
    if tc.args.skip_security_prof == False:
        update_timeout(timeout_str, tc.iterators.timeout)

    if tc.args.skip_security_prof == False:
        timeout = get_timeout(timeout_str)
    else:
        timeout = DEFAULT_UDP_TIMEOUT

    #profilereq = api.Trigger_CreateExecuteCommandsRequest(serial = True)
    #api.Trigger_AddNaplesCommand(profilereq, naples.node_name, "/nic/bin/halctl show nwsec profile --id 11")
    #profcommandresp = api.Trigger(profilereq)
    #cmd = profcommandresp.commands[-1]
    #for command in profcommandresp.commands:
    #    api.PrintCommandResults(command)
    #timeout = get_haltimeout(timeout_str, cmd)
    #tc.config_update_fail = 0
    #if (timeout != timetoseconds(tc.iterators.timeout)):
    #    tc.config_update_fail = 1

    cmd_cookie = "start server"
    api.Trigger_AddCommand(req,
                           server.node_name,
                           server.workload_name,
                           "sudo hping3 -9 %s" % (server_port),
                           background=True)
    tc.cmd_cookies.append(cmd_cookie)

    cmd_cookie = "start client"
    api.Trigger_AddCommand(
        req, client.node_name, client.workload_name,
        "sudo hping3 -2 %s -p %s -c 1" % (server.ip_address, server_port))
    tc.cmd_cookies.append(cmd_cookie)

    cmd_cookie = "Before aging show session"
    api.Trigger_AddNaplesCommand(
        req, naples.node_name,
        "/nic/bin/halctl show session --dstip %s | grep UDP" %
        (server.ip_address))
    tc.cmd_cookies.append(cmd_cookie)

    #Get it from the config
    cmd_cookie = "sleep"
    api.Trigger_AddNaplesCommand(req,
                                 naples.node_name,
                                 "sleep %s" % (timeout + GRACE_TIME),
                                 timeout=300)
    tc.cmd_cookies.append(cmd_cookie)

    cmd_cookie = "After aging show session"
    api.Trigger_AddNaplesCommand(
        req, naples.node_name,
        "/nic/bin/halctl show session --dstip %s | grep UDP" %
        (server.ip_address))
    tc.cmd_cookies.append(cmd_cookie)

    trig_resp = api.Trigger(req)
    term_resp = api.Trigger_TerminateAllCommands(trig_resp)

    tc.resp = api.Trigger_AggregateCommandsResponse(trig_resp, term_resp)
    return api.types.status.SUCCESS
Exemplo n.º 18
0
def iperf_test(tc):
    
    tc.flow_hit_cnt_before = {}
    tc.flow_hit_cnt_after = {}
    tc.flow_match_before = {}
    tc.flow_match_after = {}
    tc.conntrack_state = {}

    tc.is_ct_test = False
    if tc.duration is not None and tc.stateful:
        tc.is_ct_test = True
        api.Logger.info("Start Connection Tracking e2e iPerf Test")

    wl_nodes = [pair[0] for pair in tc.wl_node_nic_pairs]
    node1, node2 = wl_nodes[0], wl_nodes[1]
    node1_ath_nic = tc.athena_node_nic_pairs[0][1]
    node2_ath_nic = tc.athena_node_nic_pairs[1][1]
    tc.node_info = {'node1': node1, 'node2': node2,
                    'node1_ath_nic': node1_ath_nic,
                    'node2_ath_nic': node2_ath_nic}

    # cl_node => client node
    for cl_node in wl_nodes:
        tc.flow_hit_cnt_before[cl_node] = []
        tc.flow_hit_cnt_after[cl_node] = []
        tc.flow_match_before[cl_node] = []
        tc.flow_match_after[cl_node] = []
        tc.conntrack_state[cl_node] = []

        sintf, dintf, smac, dmac, sip, dip = _get_client_server_info(
                                                            cl_node, tc)    
        if tc.proto == 'UDP':
            sport = api.AllocateUdpPort()
            dport = api.AllocateUdpPort()
        else:
            sport = api.AllocateTcpPort()
            dport = api.AllocateTcpPort()

        flow_n1, flow_n2 = _get_bitw_flows(cl_node, tc.proto, sip, dip, 
                                            sport = sport, dport = dport)

        def _get_flow_match_info(flow_match_dict):
            for node, nic, vnic_id, flow in [
                    (node1, node1_ath_nic, tc.node1_vnic_id, flow_n1),
                    (node2, node2_ath_nic, tc.node2_vnic_id, flow_n2)]:
                
                rc, num_match_ent = utils.match_dynamic_flows(node, vnic_id, 
                                                                flow, nic)
                if rc != api.types.status.SUCCESS:
                    return rc
                
                flow_match_dict[cl_node].append((flow, num_match_ent))

            return (api.types.status.SUCCESS)        

        
        # check if flow installed on both athena nics before sending traffic
        rc = _get_flow_match_info(tc.flow_match_before) 
        if rc != api.types.status.SUCCESS:
            return rc

        # Send iperf traffic 
        if cl_node == 'node1':
            client_wl = tc.wl[0]
            server_wl = tc.wl[1]
        else:
            client_wl = tc.wl[1]
            server_wl = tc.wl[0]

        cmd_descr = "Client %s(%s) <--> Server %s(%s)" % (sip, sintf, 
                                                        dip, dintf)
        tc.cmd_descr.append(cmd_descr)
        api.Logger.info("Starting Iperf test: %s" % cmd_descr)
        
        serverCmd = iperf.ServerCmd(dport, server_ip = dip)
        
        if tc.proto == 'UDP':
            clientCmd = iperf.ClientCmd(dip, dport, proto = 'udp',
                                        jsonOut = True, client_ip = sip,
                                        client_port = sport, 
                                        pktsize = tc.pyld_size,
                                        packet_count = tc.pkt_cnt)
        else:
            if tc.is_ct_test:
                clientCmd = iperf.ClientCmd(dip, dport, jsonOut = True,
                                            client_ip = sip, client_port = sport,
                                            pktsize = tc.pyld_size,
                                            time = tc.duration)
            else:
                clientCmd = iperf.ClientCmd(dip, dport, jsonOut = True,
                                            client_ip = sip, client_port = sport,
                                            pktsize = tc.pyld_size,
                                            packet_count = tc.pkt_cnt)

        tc.serverCmds.append(serverCmd)
        tc.clientCmds.append(clientCmd)

        serverReq = api.Trigger_CreateExecuteCommandsRequest()
        clientReq = api.Trigger_CreateExecuteCommandsRequest()
        
        api.Trigger_AddCommand(serverReq, server_wl.node_name, 
                                server_wl.workload_name, serverCmd, 
                                background = True)

        api.Trigger_AddCommand(clientReq, client_wl.node_name, 
                                client_wl.workload_name, clientCmd,
                                background = True)

        server_resp = api.Trigger(serverReq)
        # sleep for bg iperf servers to be started
        time.sleep(3)

        tc.iperf_client_resp.append(api.Trigger(clientReq))

        if tc.is_ct_test:
            iperf_ct_test(tc, cl_node, flow_n1, flow_n2)

        api.Trigger_TerminateAllCommands(server_resp)

        # check if flow installed on both athena nics after sending traffic
        rc = _get_flow_match_info(tc.flow_match_after)
        if rc != api.types.status.SUCCESS:
            return rc

    return (api.types.status.SUCCESS)