Esempio n. 1
0
def GetHping3Cmd(protocol,
                 src_wl,
                 destination_ip,
                 destination_port,
                 src_port=0,
                 count=3,
                 options=''):
    if protocol == 'tcp':
        if src_port == 0:
            src_port = api.AllocateTcpPort()
        if options == '':
            cmd = (
                f"hping3 -S -s {int(src_port)} -k -p {int(destination_port)} -c {count} {destination_ip} -I {src_wl.interface}"
            )
        else:
            cmd = (
                f"hping3 -s {int(src_port)} -k -p {int(destination_port)} -c {count} {destination_ip} -I {src_wl.interface} {options}"
            )
    elif protocol == 'udp':
        if src_port == 0:
            src_port = api.AllocateTcpPort()
        cmd = (
            f"hping3 --{protocol.lower()} -s {int(src_port)} -k -p {int(destination_port)} -c {count} {destination_ip} -I {src_wl.interface}"
        )
    else:
        cmd = (
            f"hping3 --{protocol.lower()} -c {count} {destination_ip} -I {src_wl.interface}"
        )

    return cmd
Esempio n. 2
0
def Trigger(tc):
    tc.contexts = []
    ctxt = IperfTestContext()
    ctxt.req = api.Trigger_CreateAllParallelCommandsRequest()
    ctxt.cmd_cookies = []
    for tunnel in tc.tunnels:
        w1 = tunnel.ltep
        w2 = tunnel.rtep

        cmd_cookie = "Server: %s(%s) <--> Client: %s(%s)" %\
                       (w1.workload_name, w1.ip_address, w2.workload_name, w2.ip_address)
        api.Logger.info("Starting Iperf test from %s" % (cmd_cookie))

        basecmd = 'iperf -p %d ' % api.AllocateTcpPort()
        if tc.iterators.proto == 'udp':
            basecmd = 'iperf -u -p %d ' % api.AllocateUdpPort()
        api.Trigger_AddCommand(ctxt.req, w1.node_name, w1.workload_name,
                               "%s -s -t 300" % basecmd, background = True)
        api.Trigger_AddCommand(ctxt.req, w2.node_name, w2.workload_name,
                               "%s -c %s" % (basecmd, w1.ip_address))

        ctxt.cmd_cookies.append(cmd_cookie)
        ctxt.cmd_cookies.append(cmd_cookie)
    trig_resp = api.Trigger(ctxt.req)
    term_resp = api.Trigger_TerminateAllCommands(trig_resp)
    ctxt.resp = api.Trigger_AggregateCommandsResponse(trig_resp, term_resp)
    tc.context = ctxt

    return api.types.status.SUCCESS
Esempio n. 3
0
def Trigger(tc):
    pairs = api.GetLocalWorkloadPairs()
    tc.cmd_cookies = []
    server, client = pairs[0]
    naples = server
    if not server.IsNaples():
        naples = client
        if not client.IsNaples():
            return api.types.status.SUCCESS
        else:
            client, server = pairs[0]

    cmd_cookie = "%s(%s) --> %s(%s)" %\
                (server.workload_name, server.ip_address, client.workload_name, client.ip_address)
    api.Logger.info("Starting clear & show stress test from %s" % (cmd_cookie))

    basecmd = 'iperf -p %d ' % api.AllocateTcpPort()
    proto = 6
    timeout = 250
    #tc.secprof = sec_profile_obj.gl_securityprofile_json_template
    #timeout = int(tc.secprof['security-profiles'][0]['spec']['timeouts']['tcp']) + \
    #          int(tc.secprof['security-profiles'][0]['spec']['timeouts']['tcp-close'])
    if tc.iterators.proto == 'udp':
        basecmd = 'iperf -u -p %d ' % api.AllocateUdpPort()
        proto = 17
        timeout = 150
        #timeout = tc.security_profile['security-profiles'][0]['spec']['timeouts']['udp']

    req = api.Trigger_CreateExecuteCommandsRequest(serial=True)

    for cnt in range(tc.args.count):
        cmd_cookie = "iperf -s"
        api.Trigger_AddCommand(req,
                               server.node_name,
                               server.workload_name,
                               "%s-s -t 300" % basecmd,
                               background=True)
        tc.cmd_cookies.append(cmd_cookie)

        cmd_cookie = "iperf -c "
        api.Trigger_AddCommand(
            req, client.node_name, client.workload_name,
            "%s -c %s -P 100" % (basecmd, server.ip_address))
        tc.cmd_cookies.append(cmd_cookie)

        cmd_cookie = "Show session"
        api.Trigger_AddNaplesCommand(req, naples.node_name,
                                     "/nic/bin/halctl show session")
        tc.cmd_cookies.append(cmd_cookie)

        cmd_cookie = "Clear session"
        api.Trigger_AddNaplesCommand(req, naples.node_name,
                                     "/nic/bin/halctl clear session")
        tc.cmd_cookies.append(cmd_cookie)

    trig_resp = api.Trigger(req)
    term_resp = api.Trigger_TerminateAllCommands(trig_resp)

    tc.resp = api.Trigger_AggregateCommandsResponse(trig_resp, term_resp)
    return api.types.status.SUCCESS
Esempio n. 4
0
def Trigger(tc):
    if api.IsDryrun(): return api.types.status.SUCCESS

    srv = tc.workloads[0]
    cli = tc.workloads[1]
    
    # Determine where the commands will be run - host or Naples.
    test_type = getattr(tc.args, "test-type", INTF_TEST_TYPE_HOST)
    is_naples_cmd = True
    if test_type == INTF_TEST_TYPE_HOST:
        is_naples_cmd = False

    srv_req = api.Trigger_CreateExecuteCommandsRequest(serial = False)
    cli_req = api.Trigger_CreateExecuteCommandsRequest(serial = False)

    proto = getattr(tc.iterators, "proto", 'tcp')
    number_of_iperf_threads = getattr(tc.args, "iperfthreads", 1)
    pktsize = getattr(tc.iterators, "pktsize", None)
    ipproto = getattr(tc.iterators, "ipproto", 'v4')

    if ipproto == 'v4':
        server_ip = srv.ip_address
        client_ip = cli.ip_address
    else:
        server_ip = srv.ipv6_address
        client_ip = cli.ipv6_address
        
    tc.cmd_descr = "Server: %s(%s) <--> Client: %s(%s)" %\
                   (srv.interface, server_ip, cli.interface, client_ip)

    api.Logger.info("Starting Iperf(%s/%s) test from %s"
                    % (proto, ipproto, tc.cmd_descr))

    duration =  10
    for i in range(number_of_iperf_threads):
        if proto == 'tcp':
            port = api.AllocateTcpPort()
        else:
            port = api.AllocateUdpPort()
 
        iperf_server_cmd = iperf.ServerCmd(port, naples = is_naples_cmd)
        api.Trigger_AddCommand(srv_req, srv.node_name, srv.workload_name, iperf_server_cmd, background = True)

        iperf_client_cmd = iperf.ClientCmd(server_ip, port, time=duration,
                                 proto=proto, jsonOut=True, ipproto=ipproto,
                                 pktsize=pktsize, client_ip=client_ip, naples = is_naples_cmd)
        api.Trigger_AddCommand(cli_req, cli.node_name, cli.workload_name, iperf_client_cmd, timeout = 60)

    srv_resp = api.Trigger(srv_req)
    # Wait for iperf server to start.
    time.sleep(10)
    tc.cli_resp = api.Trigger(cli_req)
    # Wait for iperf clients to finish.
    time.sleep(2*duration)

    srv_resp1 = api.Trigger_TerminateAllCommands(srv_resp)

    return api.types.status.SUCCESS
Esempio n. 5
0
def Setup(tc):
    tc.server_port = api.AllocateTcpPort()
    tc.client_port = api.AllocateTcpPort()
    api.Logger.info("Setup.")
    if tc.iterators.kind == "remote":
        pairs = api.GetRemoteWorkloadPairs()
        if not pairs:
            api.Logger.info("no remtote eps")
            return api.types.status.SUCCESS
    else:
        pairs = api.GetLocalWorkloadPairs()

    tc.resp_flow = getattr(tc.args, "resp_flow", 0)
    tc.cmd_cookies = {}
    req = api.Trigger_CreateExecuteCommandsRequest(serial = True)

    #for w1,w2 in pairs:
    if pairs[0][0].IsNaples():
        tc.client,tc.server = pairs[0]
    else:
        tc.server,tc.client = pairs[0]
    
    cmd_cookie = start_nc_server(tc.server, tc.server_port)
    add_command(req, tc, 'server', tc.server, cmd_cookie, True) 


    cmd_cookie = start_nc_client(tc.server, tc.client_port, tc.server_port)
    add_command(req, tc, 'client', tc.client, cmd_cookie, True)
       
    cmd_cookie = "/nic/bin/halctl show session --dstport {} --dstip {} --yaml".format(tc.server_port, tc.server.ip_address)
    add_command(req, tc, 'show before', tc.client, cmd_cookie, naples=True)

    
    tc.setup_cmd_resp = api.Trigger(req)
    cmd = tc.setup_cmd_resp.commands[-1] 
    api.PrintCommandResults(cmd)
    tc.pre_ctrckinf = get_conntrackinfo(cmd)
    if getattr(tc.args, 'vmotion_enable', False):
        vmotion_utils.PrepareWorkloadVMotion(tc, [tc.client])

    return api.types.status.SUCCESS
Esempio n. 6
0
def Trigger(tc):
    if tc.skip: return api.types.status.SUCCESS

    tc.serverCmds = []
    tc.clientCmds = []
    tc.cmd_descr = []

    if not api.IsSimulation():
        serverReq = api.Trigger_CreateAllParallelCommandsRequest()
        clientReq = api.Trigger_CreateAllParallelCommandsRequest()
    else:
        serverReq = api.Trigger_CreateExecuteCommandsRequest(serial = False)
        clientReq = api.Trigger_CreateExecuteCommandsRequest(serial = False)

    for idx, pairs in enumerate(tc.workload_pairs):
        client = pairs[0]
        server = pairs[1]

        cmd_descr = "Server: %s(%s) <--> Client: %s(%s)" %\
                       (server.workload_name, server.ip_address, client.workload_name, client.ip_address)
        tc.cmd_descr.append(cmd_descr)
        num_sessions = int(getattr(tc.args, "num_sessions", 1))
        api.Logger.info("Starting Iperf test from %s num-sessions %d" % (cmd_descr, num_sessions))

        if tc.iterators.proto == 'udp':
            port = api.AllocateTcpPort()
            serverCmd = iperf.ServerCmd(port)
            clientCmd = iperf.ClientCmd(server.ip_address, port, proto='udp', jsonOut=True, num_of_streams = num_sessions)
        else:
            port = api.AllocateUdpPort()
            serverCmd = iperf.ServerCmd(port)
            clientCmd = iperf.ClientCmd(server.ip_address, port, jsonOut=True,  num_of_streams = num_sessions)

        tc.serverCmds.append(serverCmd)
        tc.clientCmds.append(clientCmd)

        api.Trigger_AddCommand(serverReq, server.node_name, server.workload_name,
                               serverCmd, background = True)

        api.Trigger_AddCommand(clientReq, client.node_name, client.workload_name,
                               clientCmd)

    server_resp = api.Trigger(serverReq)
    #Sleep for some time as bg may not have been started.
    time.sleep(30)

    tc.iperf_client_resp = api.Trigger(clientReq)
    #Its faster kill iperf servers

    #Still call terminate on all
    api.Trigger_TerminateAllCommands(server_resp)

    return api.types.status.SUCCESS
Esempio n. 7
0
def Trigger(tc):
    #Run all commands in parallel.
    req = api.Trigger_CreateExecuteCommandsRequest(serial=False)
    #Start traffic commands in background
    wloads = []
    for pair in tc.workload_pairs:
        w1 = pair[0]
        w2 = pair[1]
        tc.cmd_descr = "Server: %s(%s) <--> Client: %s(%s)" %\
                       (w1.workload_name, w1.ip_address, w2.workload_name, w2.ip_address)
        api.Logger.info("Starting Iperf test from %s" % (tc.cmd_descr))

        basecmd = 'iperf -p %d ' % api.AllocateTcpPort()
        api.Trigger_AddCommand(req,
                               w1.node_name,
                               w1.workload_name,
                               "%s -s -t 300" % basecmd,
                               background=True)
        api.Trigger_AddCommand(req,
                               w2.node_name,
                               w2.workload_name,
                               "%s -c %s" % (basecmd, w1.ip_address),
                               background=True)

        wloads.append(w1)
        wloads.append(w2)

    #now bring up workloads
    ret = api.BringUpWorkloads(wloads)
    if ret != api.types.status.SUCCESS:
        return api.types.status.FAILURE

    #Now Send all the commands
    trig_resp = api.Trigger(req)

    #Sleep for some time for traffic to stabalize
    time.sleep(10)
    #Teardown workloads
    ret = api.TeardownWorkloads(wloads)
    if ret != api.types.status.SUCCESS:
        api.Trigger_TerminateAllCommands(trig_resp)
        return api.types.status.FAILURE

    #this will fail as workload is already delete and all commands are stopped
    tc.resp = api.Trigger_TerminateAllCommands(trig_resp)

    #Bring up the same workoad loads
    ret = api.BringUpWorkloads(wloads)
    if ret != api.types.status.SUCCESS:
        return api.types.status.FAILURE

    return api.types.status.SUCCESS
Esempio n. 8
0
def Trigger(tc):
    pairs = api.GetRemoteWorkloadPairs()
    w1 = pairs[0][1]
    w2 = pairs[0][0]

    group = "239.1.1.1"
    maddr = "01:00:5e:01:01:01"

    req = api.Trigger_CreateExecuteCommandsRequest(serial = True)
    tc.cmd_descr = "Server: %s(%s) <--> Client: %s(%s)" % \
                   (w1.workload_name, w1.ip_address, w2.workload_name, w2.ip_address)
    api.Logger.info("Starting Multicast outbound Iperf test from %s" % (tc.cmd_descr))

    basecmd = "ip maddress add %s dev %s" % (maddr, w1.interface)
    api.Trigger_AddCommand(req, w1.node_name, w1.workload_name,
                           basecmd, background = True)
    basecmd = "ip maddress add %s dev %s" % (maddr, w2.interface)
    api.Trigger_AddCommand(req, w2.node_name, w2.workload_name,
                           basecmd, background = True)
    basecmd = "ip route add %s/32 dev %s" % (group, w1.interface)
    api.Trigger_AddCommand(req, w1.node_name, w1.workload_name,
                           basecmd, background = True)
    basecmd = "ip route add %s/32 dev %s" % (group, w2.interface)
    api.Trigger_AddCommand(req, w2.node_name, w2.workload_name,
                           basecmd, background = True)

    basecmd = 'iperf -p  %d ' % api.AllocateTcpPort()
    if tc.iterators.proto == 'udp':
        basecmd = 'iperf -u -p %d ' % api.AllocateUdpPort()
    api.Trigger_AddCommand(req, w1.node_name, w1.workload_name,
                           "%s -s -t 300 -B %s -i 1" % (basecmd, group), background = True)
    api.Trigger_AddCommand(req, w2.node_name, w2.workload_name,
                           "%s -c %s -T 32 -t 3 -i 1" % (basecmd, group))

    basecmd = "ip maddress del %s dev %s" % (maddr, w1.interface)
    api.Trigger_AddCommand(req, w1.node_name, w1.workload_name,
                           basecmd, background = True)
    basecmd = "ip maddress del %s dev %s" % (maddr, w2.interface)
    api.Trigger_AddCommand(req, w2.node_name, w2.workload_name,
                           basecmd, background = True)
    basecmd = "ip route del %s/32 dev %s" % (group, w1.interface)
    api.Trigger_AddCommand(req, w1.node_name, w1.workload_name,
                           basecmd, background = True)
    basecmd = "ip route del %s/32 dev %s" % (group, w2.interface)
    api.Trigger_AddCommand(req, w2.node_name, w2.workload_name,
                           basecmd, background = True)

    trig_resp = api.Trigger(req)
    term_resp = api.Trigger_TerminateAllCommands(trig_resp)
    tc.resp = api.Trigger_AggregateCommandsResponse(trig_resp, term_resp)
    return api.types.status.SUCCESS
Esempio n. 9
0
def Trigger(tc):
    if api.IsDryrun(): return api.types.status.SUCCESS

    req1 = api.Trigger_CreateExecuteCommandsRequest(serial=True)
    req2 = api.Trigger_CreateExecuteCommandsRequest(serial=False)

    w1 = tc.workloads[0]
    w2 = tc.workloads[1]
    tc.cmd_descr = "Server: %s(%s) <--> Client: %s(%s)" %\
                   (w1.interface, w1.ip_address, w2.interface, w2.ip_address)
    api.Logger.info("Starting Iperf test from %s" % (tc.cmd_descr))

    proto = getattr(tc.iterators, "proto", 'tcp')

    number_of_iperf_threads = getattr(tc.args, "iperfthreads", 1)

    pktsize = getattr(tc.iterators, "pktsize", 512)
    ipproto = getattr(tc.iterators, "ipproto", 'v4')

    for i in range(number_of_iperf_threads):
        if proto == 'tcp':
            port = api.AllocateTcpPort()
        else:
            port = api.AllocateUdpPort()

        iperf_server_cmd = cmd_builder.iperf_server_cmd(port=port)
        api.Trigger_AddCommand(req1,
                               w1.node_name,
                               w1.workload_name,
                               iperf_server_cmd,
                               background=True)

        iperf_client_cmd = cmd_builder.iperf_client_cmd(
            server_ip=w1.ip_address,
            port=port,
            proto=proto,
            pktsize=pktsize,
            ipproto=ipproto)
        api.Trigger_AddCommand(req2, w2.node_name, w2.workload_name,
                               iperf_client_cmd)

    trig_resp1 = api.Trigger(req1)
    trig_resp2 = api.Trigger(req2)
    term_resp1 = api.Trigger_TerminateAllCommands(trig_resp1)

    response = api.Trigger_AggregateCommandsResponse(trig_resp1, term_resp1)
    tc.resp = api.Trigger_AggregateCommandsResponse(response, trig_resp2)

    return api.types.status.SUCCESS
Esempio n. 10
0
def SendTraffic(tc):
    req = api.Trigger_CreateExecuteCommandsRequest(serial=True)
    tc.cmd_descr = "Server: %s(%s) <--> Client: %s(%s)" % (
        tc.intf1.Name(), tc.intf1.GetIP(), tc.intf2.Name(), tc.intf2.GetIP())
    api.Logger.info("Starting Iperf  from %s" % (tc.cmd_descr))

    port = api.AllocateTcpPort()
    iperf_server_cmd = iperf.ServerCmd(port=port)
    tc.intf1.AddCommand(req, iperf_server_cmd, background=True)
    iperf_client_cmd = iperf.ClientCmd(server_ip=tc.intf1.GetIP(), port=port,
                                       proto='tcp', pktsize=512, ipproto='v4')
    tc.intf2.AddCommand(req, iperf_client_cmd)

    trig_resp = api.Trigger(req)
    term_resp = api.Trigger_TerminateAllCommands(trig_resp)

    tc.resp = api.Trigger_AggregateCommandsResponse(trig_resp, term_resp)
    return api.types.status.SUCCESS
Esempio n. 11
0
def PingCmdBuilder(src_wl,
                   dest_ip,
                   proto='icmp',
                   af='ipv4',
                   pktsize=64,
                   args=None,
                   count=3):

    cmd = None
    dest_addr = " %s" % (dest_ip)
    if proto == 'arp':
        if not __is_ipv4(af):
            assert (0)
        if args == 'DAD':
            arp_base_cmd = __get_arp_base_cmd(src_wl, False, True, count)
        elif args == 'update':
            arp_base_cmd = __get_arp_base_cmd(src_wl, True, False, count)
        else:
            arp_base_cmd = __get_arp_base_cmd(src_wl, False, False, count)

        addr = __get_workload_address(src_wl, "ipv4")
        if args == 'update':
            dest_addr = " %s" % (addr)
        cmd = arp_base_cmd + dest_addr
    elif proto == 'icmp':
        ping_base_cmd = __get_ping_base_cmd(src_wl, af, pktsize, 3, 0.2, False)
        cmd = __ping_addr_substitution(ping_base_cmd, dest_addr)
    elif proto in ['tcp', 'udp']:
        if proto == 'udp':
            dest_port = api.AllocateUdpPort()
            # Skip over 'geneve' reserved port 6081
            if dest_port == 6081:
                dest_port = api.AllocateUdpPort()
        else:
            dest_port = api.AllocateTcpPort()
        cmd = hping.GetHping3Cmd(proto, src_wl, dest_ip, dest_port)

    return cmd
Esempio n. 12
0
def Trigger(tc):
    max_pings = int(getattr(tc.args, "max_pings", 60))
    num_runs = int(getattr(tc.args, "num_runs", 1))
    serverCmd = None
    clientCmd = None
    mode = tc.initial_mode
    IPERF_TIMEOUT = 86400
    try:
        serverReq = api.Trigger_CreateExecuteCommandsRequest()
        clientReq = api.Trigger_CreateExecuteCommandsRequest()
        
        client = tc.wl_pair[0]
        server = tc.wl_pair[1]

        tc.cmd_descr = "Server: %s(%s) <--> Client: %s(%s)" %\
                       (server.workload_name, server.ip_address,
                        client.workload_name, client.ip_address)
        num_streams = int(getattr(tc.args, "num_streams", 2))
        api.Logger.info("Starting Iperf test from %s num-sessions %d"
                        % (tc.cmd_descr, num_streams))

        if tc.iterators.proto == 'tcp':
            port = api.AllocateTcpPort()
            serverCmd = iperf.ServerCmd(port, jsonOut=True, run_core=3)
            clientCmd = iperf.ClientCmd(server.ip_address, port, time=IPERF_TIMEOUT,
                                        jsonOut=True, num_of_streams=num_streams,
                                        run_core=3)
        else:
            port = api.AllocateUdpPort()
            serverCmd = iperf.ServerCmd(port, jsonOut=True, run_core=3)
            clientCmd = iperf.ClientCmd(server.ip_address, port, proto='udp',
                                        jsonOut=True, num_of_streams=num_streams,
                                        run_core=3)

        tc.serverCmd = serverCmd
        tc.clientCmd = clientCmd

        api.Trigger_AddCommand(serverReq, server.node_name, server.workload_name,
                               serverCmd, background=True, timeout=IPERF_TIMEOUT)

        api.Trigger_AddCommand(clientReq, client.node_name, client.workload_name,
                               clientCmd, background=True, timeout=IPERF_TIMEOUT)
        
        tc.server_resp = api.Trigger(serverReq)
        time.sleep(5)
        tc.iperf_client_resp = api.Trigger(clientReq)

        for _i in range(num_runs):
            RF = get_redfish_obj(tc.cimc_info, mode=mode)
            obs_mode = get_nic_mode(RF)
            api.Logger.info("Iteration %d: curr_mode %s" % (_i, obs_mode))
            if mode != obs_mode:
                raise RuntimeError("Expected NIC mode %s, observed %s" % (mode, obs_mode))

            next_mode = "dedicated" if mode == "ncsi" else "ncsi"
            if next_mode == "ncsi":
                ret = set_ncsi_mode(RF, mode="dhcp")
            else:
                ret = set_dedicated_mode(RF, mode="dhcp")
            if ret != api.types.status.SUCCESS:
                api.Logger.error("Mode switch from %s -> %s failed" %(mode, next_mode))
                return api.types.status.FAILURE

            api.Logger.info("Switched mode to %s" % (next_mode))
            time.sleep(5)
            if ret == api.types.status.SUCCESS:
                curr_ilo_ip = tc.ilo_ip if next_mode == "dedicated" else tc.ilo_ncsi_ip
                ret = ping(curr_ilo_ip, max_pings)
                if ret != api.types.status.SUCCESS:
                    RF.logout()
                    raise RuntimeError('Unable to ping ILO, Port Switch fail from'
                                      ' %s -> %s' % (mode, next_mode))
                api.Logger.info("Mode switch from %s -> %s successful" % (mode, next_mode))
            else:
                raise RuntimeError('Mode switch config failed')
            mode = next_mode
    except:
        api.Logger.error(traceback.format_exc())
        return api.types.status.FAILURE

    return api.types.status.SUCCESS
Esempio n. 13
0
def start_fuz(tc):
    ret = copy_fuz(tc)
    if ret != api.types.status.SUCCESS:
        return api.types.status.FAILURE

    tc.serverCmds = []
    tc.clientCmds = []
    tc.cmd_descr = []

    serverReq = None
    clientReq = None

    serverReq = api.Trigger_CreateExecuteCommandsRequest(serial=False)
    clientReq = api.Trigger_CreateExecuteCommandsRequest(serial=False)

    # ping test above sets the workload pairs to remote only
    # setting wl_pairs as per arg selected in testbundle

    if tc.args.type == 'local_only':
        api.Logger.info("local_only test")
        tc.workload_pairs = api.GetLocalWorkloadPairs()
    elif tc.args.type == 'both':
        api.Logger.info(" both local and remote test")
        tc.workload_pairs = api.GetLocalWorkloadPairs()
        tc.workload_pairs.extend(api.GetRemoteWorkloadPairs())
    else:
        api.Logger.info("remote_only test")
        tc.workload_pairs = api.GetRemoteWorkloadPairs()

    for idx, pairs in enumerate(tc.workload_pairs):
        client = pairs[0]
        server = pairs[1]
        cmd_descr = "Server: %s(%s) <--> Client: %s(%s)" %\
                       (server.workload_name, server.ip_address, client.workload_name, client.ip_address)
        tc.cmd_descr.append(cmd_descr)
        num_sessions = int(getattr(tc.args, "num_sessions", 1))
        api.Logger.info("Starting Fuz test from %s num-sessions %d" %
                        (cmd_descr, num_sessions))

        serverCmd = None
        clientCmd = None
        port = api.AllocateTcpPort()

        serverCmd = tc.fuz_exec[server.workload_name] + " -port " + str(port)
        clientCmd = tc.fuz_exec[client.workload_name] + " -conns " + str(
            num_sessions
        ) + " -duration " + str(
            __fuz_run_time
        ) + " -attempts 1 -read-timeout 100 -talk " + server.ip_address + ":" + str(
            port)

        tc.serverCmds.append(serverCmd)
        tc.clientCmds.append(clientCmd)

        api.Trigger_AddCommand(serverReq,
                               server.node_name,
                               server.workload_name,
                               serverCmd,
                               background=True)

        api.Trigger_AddCommand(clientReq,
                               client.node_name,
                               client.workload_name,
                               clientCmd,
                               background=True)

    tc.server_resp = api.Trigger(serverReq)
    #Sleep for some time as bg may not have been started.
    time.sleep(5)
    tc.fuz_client_resp = api.Trigger(clientReq)
    return api.types.status.SUCCESS
Esempio n. 14
0
def iperfWorkloads(workload_pairs,
                   af="ipv4",
                   proto="tcp",
                   packet_size=64,
                   bandwidth="100G",
                   time=1,
                   num_of_streams=None,
                   sleep_time=30,
                   background=False):
    serverCmds = []
    clientCmds = []
    cmdDesc = []
    ipproto = __get_ipproto(af)

    if not api.IsSimulation():
        serverReq = api.Trigger_CreateAllParallelCommandsRequest()
        clientReq = api.Trigger_CreateAllParallelCommandsRequest()
    else:
        serverReq = api.Trigger_CreateExecuteCommandsRequest(serial=False)
        clientReq = api.Trigger_CreateExecuteCommandsRequest(serial=False)

    for idx, pairs in enumerate(workload_pairs):
        client = pairs[0]
        server = pairs[1]
        server_addr = __get_workload_address(server, af)
        client_addr = __get_workload_address(client, af)
        if proto == 'udp':
            port = api.AllocateUdpPort()
            if port == 6081:
                port = api.AllocateUdpPort()
        else:
            port = api.AllocateTcpPort()

        serverCmd = iperf.ServerCmd(port, jsonOut=True)
        clientCmd = iperf.ClientCmd(server_addr,
                                    port,
                                    time,
                                    packet_size,
                                    proto,
                                    None,
                                    ipproto,
                                    bandwidth,
                                    num_of_streams,
                                    jsonOut=True)

        cmd_cookie = "Server: %s(%s:%s:%d) <--> Client: %s(%s)" %\
                     (server.workload_name, server_addr, proto, port,\
                      client.workload_name, client_addr)
        api.Logger.info("Starting Iperf test %s" % cmd_cookie)
        serverCmds.append(serverCmd)
        clientCmds.append(clientCmd)
        cmdDesc.append(cmd_cookie)

        api.Trigger_AddCommand(serverReq,
                               server.node_name,
                               server.workload_name,
                               serverCmd,
                               background=True)
        api.Trigger_AddCommand(clientReq,
                               client.node_name,
                               client.workload_name,
                               clientCmd,
                               background=background)

    server_resp = api.Trigger(serverReq)
    #Sleep for some time as bg may not have been started.
    api.Logger.info(
        f"Waiting {sleep_time} sec to start iperf server in background")
    __sleep(sleep_time)
    client_resp = api.Trigger(clientReq)
    __sleep(3)

    if background:
        return [cmdDesc, serverCmds, clientCmds], server_resp, client_resp
    else:
        api.Trigger_TerminateAllCommands(server_resp)
        return [cmdDesc, serverCmds, clientCmds], client_resp
Esempio n. 15
0
def Trigger(tc):
    api.Logger.info("Trigger.")
    pairs = api.GetLocalWorkloadPairs(naples=True)
    tc.cmd_cookies1 = []
    tc.cmd_cookies2 = []
    req = api.Trigger_CreateExecuteCommandsRequest(serial = True)

    #for w1,w2 in pairs:
    server,client  = pairs[0]
    naples = server
    if not server.IsNaples():
       if not client.IsNaples():
          naples = client
          return api.types.status.SUCCESS
       else:
          client, server = pairs[0]

    #TBD remove this once agent side profile update is implemented
    timeout = get_timeout("tcp-close")

    #Step 0: Update the timeout in the config object
    update_timeout("tcp-close", tc.iterators.timeout)
    
    #profilereq = api.Trigger_CreateExecuteCommandsRequest(serial = True)
    #api.Trigger_AddNaplesCommand(profilereq, naples.node_name, "/nic/bin/halctl show nwsec profile --id 11")
    #profcommandresp = api.Trigger(profilereq)
    #cmd = profcommandresp.commands[-1]
    #for command in profcommandresp.commands:
    #    api.PrintCommandResults(command)
    #timeout = get_haltimeout("tcp-close", cmd)
    #tc.config_update_fail = 0
    #if (timeout != timetoseconds(tc.iterators.timeout)):
    #    tc.config_update_fail = 1
    timeout = timetoseconds(tc.iterators.timeout)

    #Step 1: Start TCP Server
    server_port = api.AllocateTcpPort()
    api.Trigger_AddCommand(req, server.node_name, server.workload_name, "nc -l %s"%(server_port), background=True)
    tc.cmd_cookies1.append("start server")

    #Step 2: Start TCP Client
    client_port = api.AllocateTcpPort()
    api.Trigger_AddCommand(req, client.node_name, client.workload_name, 
                        "nc {} {} -p {}".format(server.ip_address, server_port, client_port), background=True)
    tc.cmd_cookies1.append("start client")

    #Step 3: Get the session out from naples
    api.Trigger_AddNaplesCommand(req, naples.node_name, 
                "/nic/bin/halctl show session --dstport {} --dstip {} --yaml".format(server_port, server.ip_address))
    trig_resp1 = api.Trigger(req)
    cmd = trig_resp1.commands[-1]
    for command in trig_resp1.commands:
        api.PrintCommandResults(command)
    tc.ctrckinf = get_conntrackinfo(cmd)
    tc.cmd_cookies1.append("show session detail")

    req = api.Trigger_CreateExecuteCommandsRequest(serial = True)

    #Step 4: Start TCPDUMP in background at the client
    api.Trigger_AddCommand(req, client.node_name, client.workload_name, "tcpdump -i {} > out.txt".format(client.interface), background=True)
    tc.cmd_cookies2.append("tcpdump client");

    api.Trigger_AddCommand(req, server.node_name, server.workload_name, "tcpdump -i {} > out.txt".format(server.interface), background=True)
    tc.cmd_cookies2.append("tcpdump server");

    #Step 5: Cook up a FIN and send
    api.Trigger_AddCommand(req, client.node_name, client.workload_name, 
               "hping3 -c 1 -s {} -p {} -M {}  -L {} --ack --tcp-timestamp {} -d 0 -F".format(client_port, server_port, tc.ctrckinf.i_tcpseqnum+1, 
                                                                                              tc.ctrckinf.i_tcpacknum, server.ip_address))
    tc.cmd_cookies2.append("Send FIN")

    #Step 6: Send FIN ACK now from other side after timeout
    api.Trigger_AddCommand(req, server.node_name, server.workload_name,
               "hping3 -c 1 -s {} -p {} -M {}  -L {} --ack --tcp-timestamp {} -d 0 -F".format(server_port, client_port, tc.ctrckinf.r_tcpseqnum+1, 
                                                                                              tc.ctrckinf.r_tcpacknum, client.ip_address))
    tc.cmd_cookies2.append("Send FIN ACK")

    api.Trigger_AddCommand(req, client.node_name, client.workload_name,
               "hping3 -c 1 -s {} -p {} -M {}  -L {} --ack --tcp-timestamp {} -d 0".format(server_port, client_port, tc.ctrckinf.i_tcpseqnum, 
                                                                                           tc.ctrckinf.i_tcpacknum+1, server.ip_address))
    tc.cmd_cookies2.append("Send ACK")

    #Step 6: Check if session is up in FIN_RCVD state
    api.Trigger_AddNaplesCommand(req, naples.node_name, "/nic/bin/halctl show session --dstport {} --dstip {} --srcip {} | grep BIDIR_FIN".format(server_port, server.ip_address, client.ip_address))
    tc.cmd_cookies2.append("Before timeout");

    #Sleep for connection setup timeout
    ######TBD -- uncomment this once agent update fix is in!!!
    #timeout = timetoseconds(tc.iterators.timeout)
    cmd_cookie = "sleep"
    api.Trigger_AddNaplesCommand(req, naples.node_name, "sleep %s" % timeout, timeout=300)
    tc.cmd_cookies2.append("sleep")

    #Step 7: Validate if session is gone. Note that we could have session in INIT state in this case as the server would retransmit. 
    #Idea is to make sure we have removed the session that was in FIN_RCVD state
    api.Trigger_AddNaplesCommand(req, naples.node_name, "/nic/bin/halctl show session --dstport {} --dstip {} --srcip {} | grep BIDIR_FIN".format(server_port, server.ip_address, client.ip_address))
    tc.cmd_cookies2.append("After timeout")

    trig_resp = api.Trigger(req)
    term_resp = api.Trigger_TerminateAllCommands(trig_resp)
    tc.resp = api.Trigger_AggregateCommandsResponse(trig_resp, term_resp)

    return api.types.status.SUCCESS
Esempio n. 16
0
def Trigger(tc):
    if tc.skip == True:
        return api.types.status.FAILURE

    #
    # Set-up Test Environment
    #
    tc.cmd_cookies = []
    tc.cookie_idx = 0
    cmd_cookie = "%s(%s) --> %s(%s)" %\
                 (tc.server.workload_name, tc.server.ip_address,
                  tc.client.workload_name, tc.client.ip_address)
    api.Logger.info("Starting Multiple-IPv4-TCP-Flow-Drops test from %s" %\
                   (cmd_cookie))

    #
    # Start TCPDUMP in background on Server/Client
    #
    req = api.Trigger_CreateExecuteCommandsRequest(serial=True)
    #cmd_cookie = "start tcpdump on Server"
    #cmd = "sudo tcpdump -nnSXi {} > out.txt".format(tc.server.interface)
    #add_command(tc, req, cmd_cookie, cmd, tc.server, True)

    #cmd_cookie = "start tcpdump on Client"
    #cmd = "sudo tcpdump -nnSXi {} > out.txt".format(tc.client.interface)
    #add_command(tc, req, cmd_cookie, cmd, tc.client, True)

    #
    # Start with a clean slate by clearing all sessions/flows
    #
    cmd_cookie = "clear session"
    cmd = "/nic/bin/halctl clear session"
    add_naples_command(tc, req, cmd_cookie, cmd, tc.naples)

    #
    # Make sure that Client<=>Server Forwarding is set up
    #
    cmd_cookie = "trigger ping: Create case"
    cmd = "ping -c1 %s -I %s" %\
          (tc.server.ip_address, tc.client.interface)
    add_command(tc, req, cmd_cookie, cmd, tc.client, False)

    tc.resp = api.Trigger(req)
    for command in tc.resp.commands:
        api.PrintCommandResults(command)

    #
    # Allocate TCP-portnum for Server/Client and start the service on the Server
    #
    tc.server_port = api.AllocateTcpPort()
    tc.client_port = api.AllocateTcpPort()
    req_nc1 = api.Trigger_CreateExecuteCommandsRequest(serial=True)
    iseq_num = 0
    iack_num = 0
    rseq_num = 0
    rack_num = 0
    idx = 0
    while (idx < tc.iterators.sessions):
        #
        # Establish TCP-connection
        #
        cmd_cookie = "start server"
        cmd = "nc --listen {}".format(tc.server_port + idx)
        add_command(tc, req_nc1, cmd_cookie, cmd, tc.server, True)

        cmd_cookie = "start client"
        cmd = "nc {} {} --source-port {} "\
        .format(tc.server.ip_address, tc.server_port+idx, tc.client_port)
        add_command(tc, req_nc1, cmd_cookie, cmd, tc.client, True)
        idx += 1

    resp_nc1 = api.Trigger(req_nc1)
    for command in resp_nc1.commands:
        api.PrintCommandResults(command)

    idx = 0
    while (idx < tc.iterators.sessions):
        #
        # Do applicable "show session" commands and
        # retrieve Seq-num and Ack-num associated with the session
        #
        #       req1 = api.Trigger_CreateExecuteCommandsRequest(serial = True)
        #       cmd_cookie = "show session"
        #       cmd = "/nic/bin/halctl show session"
        #       add_naples_command(tc, req1, cmd_cookie, cmd, tc.naples)

        #       cmd_cookie = "show session detail"
        #       cmd = "/nic/bin/halctl show session --dstport {} --dstip {} --yaml"\
        #       .format(tc.server_port+idx, tc.server.ip_address)
        #       add_naples_command(tc, req1, cmd_cookie, cmd, tc.naples)

        #       tc.resp1 = api.Trigger(req1)
        #       cmd = tc.resp1.commands[-1]
        #       for command in tc.resp1.commands:
        #           api.PrintCommandResults(command)
        #       iseq_num, iack_num, iwindosz, iwinscale, rseq_num, rack_num, rwindosz, rwinscale = get_conntrackinfo(cmd)

        #
        # Send Bad Data with TTL=0 from both Client and Server
        #
        req2 = api.Trigger_CreateExecuteCommandsRequest(serial=True)
        cmd_cookie = "send bad data from Client TTL=0: Create case"
        cmd = "hping3 --count 1 --baseport {} --destport {}\
              --setseq {} --setack {} --ttl 0 --data 10 {}"\
              .format(tc.client_port, tc.server_port+idx, rack_num, rseq_num,
                      tc.server.ip_address)
        add_command(tc, req2, cmd_cookie, cmd, tc.client, False)

        cmd_cookie = "send bad data from Server TTL=0: Create case"
        cmd = "hping3 --count 1 --baseport {} --destport {}\
              --setseq {} --setack {} --ttl 0 --data 10 {}"\
              .format(tc.server_port+idx, tc.client_port, iack_num, iseq_num,
                      tc.client.ip_address)
        add_command(tc, req2, cmd_cookie, cmd, tc.server, False)

        #
        # Send Bad Data with TCP-RSVD-FLAGS-BIT-0 set from Client and Server
        #
        cmd_cookie = "send bad data from Client TCP-RSVD-FLAGS-BIT-0: Create"
        cmd = "hping3 --count 1 --baseport {} --destport {}\
              --setseq {} --setack {} --xmas --data 10 {}"\
              .format(tc.client_port, tc.server_port+idx, rack_num+10, rseq_num,
                      tc.server.ip_address)
        add_command(tc, req2, cmd_cookie, cmd, tc.client, False)

        cmd_cookie = "send bad data from Server TCP-RSVD-FLAGS-BIT-0: Create"
        cmd = "hping3 --count 1 --baseport {} --destport {}\
              --setseq {} --setack {} --xmas --data 10 {}"\
              .format(tc.server_port+idx, tc.client_port, iack_num+10, iseq_num,
                      tc.client.ip_address)
        add_command(tc, req2, cmd_cookie, cmd, tc.server, False)

        #
        # Send Bad Data with TCP-RSVD-FLAGS-BIT-1 set from Client and Server
        #
        cmd_cookie = "send bad data from Client TCP-RSVD-FLAGS-BIT-1: Create"
        cmd = "hping3 --count 1 --baseport {} --destport {}\
              --setseq {} --setack {} --ymas --data 10 {}"\
              .format(tc.client_port, tc.server_port+idx, rack_num+20, rseq_num,
                      tc.server.ip_address)
        add_command(tc, req2, cmd_cookie, cmd, tc.client, False)

        cmd_cookie = "send bad data from Server TCP-RSVD-FLAGS-BIT-1: Create"
        cmd = "hping3 --count 1 --baseport {} --destport {}\
              --setseq {} --setack {} --ymas --data 10 {}"\
              .format(tc.server_port+idx, tc.client_port, iack_num+20, iseq_num,
                      tc.client.ip_address)
        add_command(tc, req2, cmd_cookie, cmd, tc.server, False)

        tc.resp2 = api.Trigger(req2)
        for command in tc.resp2.commands:
            api.PrintCommandResults(command)


#       api.Trigger_TerminateAllCommands(tc.resp1)
        api.Trigger_TerminateAllCommands(tc.resp2)
        idx += 1

    #
    # Do "show session" command
    #
    req3 = api.Trigger_CreateExecuteCommandsRequest(serial=True)
    tc.cookie_idx_3 = tc.cookie_idx
    cmd_cookie = "show session"
    cmd = "/nic/bin/halctl show session"
    add_naples_command(tc, req3, cmd_cookie, cmd, tc.naples)

    #
    # Trigger "metrics get IPv4FlowDropMetrics" output
    #
    cmd_cookie = "show flow-drop: Create case"
    cmd = "PATH=$PATH:/platform/bin/;\
           LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/platform/lib/:/nic/lib/;\
           export PATH; export LD_LIBRARY_PATH;\
           /nic/bin/delphictl metrics get IPv4FlowDropMetrics"

    add_naples_command(tc, req3, cmd_cookie, cmd, tc.naples)

    tc.resp3 = api.Trigger(req3)
    for command in tc.resp3.commands:
        api.PrintCommandResults(command)

    #
    # Re-establish TCP-connection
    #
    api.Trigger_TerminateAllCommands(resp_nc1)
    #time.sleep(5)
    #   req_nc2 = api.Trigger_CreateExecuteCommandsRequest(serial = True)

    #
    # Make sure that Client<=>Server Forwarding is set up
    #
    #   cmd_cookie = "trigger ping: Re-use case"
    #   cmd = "ping -c1 %s -I %s" %\
    #         (tc.server.ip_address, tc.client.interface)
    #   add_command(tc, req_nc2, cmd_cookie, cmd, tc.client, False)

    #   idx = 0
    #   while (idx < tc.iterators.sessions):
    #       cmd_cookie = "restart server"
    #       cmd = "nc --listen {}".format(tc.server_port+idx)
    #       add_command(tc, req_nc2, cmd_cookie, cmd, tc.server, True)

    #       cmd_cookie = "restart client"
    #       cmd = "nc {} {} --source-port {} "\
    #       .format(tc.server.ip_address, tc.server_port+idx, tc.client_port)
    #       add_command(tc, req_nc2, cmd_cookie, cmd, tc.client, True)
    #       idx += 1

    #   resp_nc2 = api.Trigger(req_nc2)
    #   for command in resp_nc2.commands:
    #       api.PrintCommandResults(command)

    #   idx = 0
    #   while (idx < tc.iterators.sessions):
    #       #
    #       # Do applicable "show session" commands and
    #       # retrieve Seq-num and Ack-num associated with the session
    #       #
    #       req4 = api.Trigger_CreateExecuteCommandsRequest(serial = True)
    #       cmd_cookie = "show session"
    #       cmd = "/nic/bin/halctl show session"
    #       add_naples_command(tc, req4, cmd_cookie, cmd, tc.naples)

    #       cmd_cookie = "show session detail"
    #       cmd = "/nic/bin/halctl show session --dstport {} --dstip {} --yaml"\
    #       .format(tc.server_port+idx, tc.server.ip_address)
    #       add_naples_command(tc, req4, cmd_cookie, cmd, tc.naples)

    #       tc.resp4 = api.Trigger(req4)
    #       cmd = tc.resp4.commands[-1]
    #       for command in tc.resp4.commands:
    #           api.PrintCommandResults(command)
    #       iseq_num, iack_num, iwindosz, iwinscale, rseq_num, rack_num, rwindosz, rwinscale = get_conntrackinfo(cmd)

    #       #
    #       # Re-send Bad Data with TTL=0 from both Client and Server
    #       #
    #       req5 = api.Trigger_CreateExecuteCommandsRequest(serial = True)
    #       cmd_cookie = "send bad data from Client TTL=0: Re-use case"
    #       cmd = "hping3 --count 1 --baseport {} --destport {}\
    #             --setseq {} --setack {} --ttl 0 --data 10 {}"\
    #             .format(tc.client_port, tc.server_port+idx, rack_num, rseq_num,
    #                     tc.server.ip_address)
    #       add_command(tc, req5, cmd_cookie, cmd, tc.client, False)

    #       cmd_cookie = "send bad data from Server TTL=0: Re-use case"
    #       cmd = "hping3 --count 1 --baseport {} --destport {}\
    #             --setseq {} --setack {} --ttl 0 --data 10 {}"\
    #             .format(tc.server_port+idx, tc.client_port, iack_num, iseq_num,
    #                     tc.client.ip_address)
    #       add_command(tc, req5, cmd_cookie, cmd, tc.server, False)

    #       #
    #       # Re-send Bad Data with TCP-RSVD-FLAGS-BIT-0 set from Client and Server
    #       #
    #       cmd_cookie = "send bad data from Client TCP-RSVD-FLAGS-BIT-0: Re-use"
    #       cmd = "hping3 --count 1 --baseport {} --destport {}\
    #             --setseq {} --setack {} --xmas --data 10 {}"\
    #             .format(tc.client_port, tc.server_port+idx, rack_num+10, rseq_num,
    #                     tc.server.ip_address)
    #       add_command(tc, req5, cmd_cookie, cmd, tc.client, False)

    #       cmd_cookie = "send bad data from Server TCP-RSVD-FLAGS-BIT-0: Re-use"
    #       cmd = "hping3 --count 1 --baseport {} --destport {}\
    #             --setseq {} --setack {} --xmas --data 10 {}"\
    #             .format(tc.server_port+idx, tc.client_port, iack_num+10, iseq_num,
    #             tc.client.ip_address)
    #       add_command(tc, req5, cmd_cookie, cmd, tc.server, False)

    #       #
    #       # Re-send Bad Data with TCP-RSVD-FLAGS-BIT-1 set from Client and Server
    #       #
    #       cmd_cookie = "send bad data from Client TCP-RSVD-FLAGS-BIT-1: Re-use"
    #       cmd = "hping3 --count 1 --baseport {} --destport {}\
    #             --setseq {} --setack {} --ymas --data 10 {}"\
    #             .format(tc.client_port, tc.server_port+idx, rack_num+20, rseq_num,
    #                     tc.server.ip_address)
    #       add_command(tc, req5, cmd_cookie, cmd, tc.client, False)

    #       cmd_cookie = "send bad data from Server TCP-RSVD-FLAGS-BIT-1: Re-use"
    #       cmd = "hping3 --count 1 --baseport {} --destport {}\
    #             --setseq {} --setack {} --ymas --data 10 {}"\
    #             .format(tc.server_port+idx, tc.client_port, iack_num+20, iseq_num,
    #                     tc.client.ip_address)
    #       add_command(tc, req5, cmd_cookie, cmd, tc.server, False)

    #       tc.resp5 = api.Trigger(req5)
    #       for command in tc.resp5.commands:
    #           api.PrintCommandResults(command)

    #       api.Trigger_TerminateAllCommands(tc.resp4)
    #       api.Trigger_TerminateAllCommands(tc.resp5)
    #       idx += 1

    #   #
    #   # Do "show session" command
    #   #
    #   req6 = api.Trigger_CreateExecuteCommandsRequest(serial = True)
    #   tc.cookie_idx_6 = tc.cookie_idx
    #   cmd_cookie = "show session"
    #   cmd = "/nic/bin/halctl show session"
    #   add_naples_command(tc, req6, cmd_cookie, cmd, tc.naples)

    #   #
    #   # Trigger "metrics get IPv4FlowDropMetrics" output
    #   #
    #   cmd_cookie = "show flow-drop: Re-use case"
    #   cmd = "PATH=$PATH:/platform/bin/;\
    #          LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/platform/lib/:/nic/lib/;\
    #          export PATH; export LD_LIBRARY_PATH;\
    #          /nic/bin/delphictl metrics get IPv4FlowDropMetrics"
    #   add_naples_command(tc, req6, cmd_cookie, cmd, tc.naples)

    #   tc.resp6 = api.Trigger(req6)
    #   for command in tc.resp6.commands:
    #       api.PrintCommandResults(command)

    #
    # Do "show session" command after doing Sleep for 45secs
    #
    #   api.Trigger_TerminateAllCommands(resp_nc2)
    req7 = api.Trigger_CreateExecuteCommandsRequest(serial=True)
    tc.cookie_idx_7 = tc.cookie_idx
    cmd_cookie = "clear session"
    cmd = "/nic/bin/halctl clear session; sleep 45"
    add_naples_command(tc, req7, cmd_cookie, cmd, tc.naples)

    cmd_cookie = "show session"
    cmd = "/nic/bin/halctl show session"
    add_naples_command(tc, req7, cmd_cookie, cmd, tc.naples)

    #
    # Trigger "metrics get IPv4FlowDropMetrics" output
    #
    cmd_cookie = "show flow-drop: Delete case"
    cmd = "PATH=$PATH:/platform/bin/;\
           LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/platform/lib/:/nic/lib/;\
           export PATH; export LD_LIBRARY_PATH;\
           /nic/bin/delphictl metrics get IPv4FlowDropMetrics"

    add_naples_command(tc, req7, cmd_cookie, cmd, tc.naples)

    tc.resp7 = api.Trigger(req7)
    for command in tc.resp7.commands:
        api.PrintCommandResults(command)

    return api.types.status.SUCCESS
Esempio n. 17
0
def Trigger(tc):
    tc.cmd_cookies = []
    client, server, naples = tc.client, tc.server, tc.naples

    req = api.Trigger_CreateExecuteCommandsRequest(serial=True)
    cmd_cookie = "%s(%s) --> %s(%s)" %\
                (server.workload_name, server.ip_address, client.workload_name, client.ip_address)
    api.Logger.info("Starting TCP aging test from %s" % (cmd_cookie))

    cmd_cookie = "halctl clear session"
    api.Trigger_AddNaplesCommand(req, server.node_name,
                                 "/nic/bin/halctl clear session")
    tc.cmd_cookies.append(cmd_cookie)

    #Step 0: Update the timeout in the config object
    if not tc.args.skip_security_prof:
        update_timeout('tcp-timeout', tc.iterators.timeout)
        timeout = timetoseconds(tc.iterators.timeout)
    else:
        timeout = DEFAULT_TCP_TIMEOUT

    #profilereq = api.Trigger_CreateExecuteCommandsRequest(serial = True)
    #api.Trigger_AddNaplesCommand(profilereq, naples.node_name, "/nic/bin/halctl show nwsec profile --id 11")
    #profcommandresp = api.Trigger(profilereq)
    #cmd = profcommandresp.commands[-1]
    #for command in profcommandresp.commands:
    #    api.PrintCommandResults(command)
    #timeout = get_haltimeout('tcp-timeout', cmd)
    #tc.config_update_fail = 0
    #if (timeout != timetoseconds(tc.iterators.timeout)):
    #    tc.config_update_fail = 1

    server_port = api.AllocateTcpPort()
    client_port = api.AllocateTcpPort()

    cmd_cookie = "Send SYN from client"
    api.Trigger_AddCommand(
        req, client.node_name, client.workload_name,
        "hping3 -c 1 -s %s -p %s -M 0 -L 0 -S %s" %
        (client_port, server_port, server.ip_address))
    tc.cmd_cookies.append(cmd_cookie)

    cmd_cookie = "Send SYN, ACK and SEQ from server"
    api.Trigger_AddCommand(
        req, server.node_name, server.workload_name,
        "hping3 -c 1 -s %s -p %s -M 0 -A -L 1 -S %s" %
        (server_port, client_port, client.ip_address))
    tc.cmd_cookies.append(cmd_cookie)

    #Get Seq + Ack
    api.Trigger_AddNaplesCommand(
        req, naples.node_name,
        "/nic/bin/halctl show session ".format(server_port, server.ip_address))
    tc.cmd_cookies.append("show session")
    api.Trigger_AddNaplesCommand(
        req, naples.node_name,
        "/nic/bin/halctl show session --dstport {} --dstip {} --yaml".format(
            server_port, server.ip_address))
    tc.cmd_cookies.append("show session detail")
    trig_resp1 = api.Trigger(req)
    cmd = trig_resp1.commands[-1]
    for command in trig_resp1.commands:
        api.PrintCommandResults(command)
    tc.ctrckinf = get_conntrackinfo(cmd)

    #Send ACK
    req2 = api.Trigger_CreateExecuteCommandsRequest(serial=True)
    api.Trigger_AddCommand(
        req2, client.node_name, client.workload_name,
        "hping3 -c 1 -s {} -p {} -M {}  -L {} --ack {}".format(
            client_port, server_port, tc.ctrckinf.r_tcpacknum,
            tc.ctrckinf.r_tcpseqnum, server.ip_address))
    tc.cmd_cookies.append("Send ACK")

    cmd_cookie = "Before aging show session"
    api.Trigger_AddNaplesCommand(
        req2, naples.node_name,
        "/nic/bin/halctl show session --dstport {} --dstip {} | grep ESTABLISHED"
        .format(server_port, server.ip_address))
    tc.cmd_cookies.append(cmd_cookie)

    #Get it from the config
    if not tc.args.skip_security_prof:
        timeout += get_timeout('tcp-close') + (TCP_TICKLE_GAP *
                                               NUM_TICKLES) + GRACE_TIME
    else:
        timeout += GRACE_TIME

    api.Logger.info("Sleeping for %s sec... " % timeout)
    cmd_cookie = "sleep"
    api.Trigger_AddNaplesCommand(req2,
                                 naples.node_name,
                                 "sleep %s" % timeout,
                                 timeout=300)
    tc.cmd_cookies.append(cmd_cookie)

    cmd_cookie = "After aging show session"
    api.Trigger_AddNaplesCommand(
        req2, naples.node_name,
        "/nic/bin/halctl show session --dstport {} --dstip {} | grep ESTABLISHED"
        .format(server_port, server.ip_address))
    tc.cmd_cookies.append(cmd_cookie)

    cmd_cookie = "show session yaml"
    api.Trigger_AddNaplesCommand(
        req2, naples.node_name,
        "/nic/bin/halctl show session --yaml --ipproto 6".format(
            server_port, server.ip_address))
    tc.cmd_cookies.append(cmd_cookie)

    trig_resp2 = api.Trigger(req2)
    term_resp2 = api.Trigger_TerminateAllCommands(trig_resp2)
    tc.resp2 = api.Trigger_AggregateCommandsResponse(trig_resp2, term_resp2)

    term_resp1 = api.Trigger_TerminateAllCommands(trig_resp1)
    tc.resp = api.Trigger_AggregateCommandsResponse(trig_resp1, term_resp1)
    return api.types.status.SUCCESS
Esempio n. 18
0
def start_fuz(tc,
              fuz_run_time=__fuz_run_time,
              read_timeout=__read_timeout,
              copyfuz=True):
    '''
    if arping.ArPing(tc) != api.types.status.SUCCESS:
        api.Logger.info("arping failed on setup")
    tc1 = ping.TestPing(tc, 'local_only', 'ipv4', 64)
    tc2 = ping.TestPing(tc, 'remote_only', 'ipv4', 64)
    if tc1 != api.types.status.SUCCESS or tc2 != api.types.status.SUCCESS:
        api.Logger.info("ping test failed on setup")
        return api.types.status.FAILURE
    '''
    if copyfuz:
        ret = copy_fuz(tc)
        if ret != api.types.status.SUCCESS:
            return api.types.status.FAILURE

    tc.serverCmds = []
    tc.clientCmds = []
    tc.cmd_descr = []

    serverReq = None
    clientReq = None

    serverReq = api.Trigger_CreateExecuteCommandsRequest(serial=False)
    clientReq = api.Trigger_CreateExecuteCommandsRequest(serial=False)

    # ping test above sets the workload pairs to remote only
    # setting wl_pairs as per arg selected in testbundle

    workload_pairs = []
    if tc.args.type == 'local_only':
        api.Logger.info("local_only test")
        workload_pairs = api.GetLocalWorkloadPairs()
    elif tc.args.type == 'both':
        api.Logger.info(" both local and remote test")
        workload_pairs = api.GetLocalWorkloadPairs()
        workload_pairs.extend(api.GetRemoteWorkloadPairs())
    else:
        api.Logger.info("remote_only test")
        workload_pairs = api.GetRemoteWorkloadPairs()

    wl_under_move = []
    for wl_info in tc.move_info:
        wl_under_move.append(wl_info.wl)

    tc.workload_pairs = []
    for pairs in workload_pairs:
        if pairs[0] in wl_under_move or pairs[1] in wl_under_move:
            api.Logger.info("Adding %s and %s for fuz test" %
                            (pairs[0].workload_name, pairs[1].workload_name))
            tc.workload_pairs.append(pairs)

    for idx, pairs in enumerate(tc.workload_pairs):
        client = pairs[0]
        server = pairs[1]
        cmd_descr = "Server: %s(%s) <--> Client: %s(%s)" %\
                       (server.workload_name, server.ip_address, client.workload_name, client.ip_address)
        tc.cmd_descr.append(cmd_descr)
        num_sessions = int(getattr(tc.args, "num_sessions", 1))

        serverCmd = None
        clientCmd = None
        port = api.AllocateTcpPort()

        api.Logger.info("Starting Fuz test from %s num-sessions %d port %d" %
                        (cmd_descr, num_sessions, port))

        serverCmd = tc.fuz_exec[server.workload_name] + " -port " + str(port)
        clientCmd = tc.fuz_exec[client.workload_name]  + " -conns " +\
                    str(num_sessions) + " -duration " + str(__fuz_run_time) +\
                     " -attempts 1 -read-timeout " + read_timeout +" -talk " +\
                     server.ip_address + ":" + str(port)

        api.Logger.info("Server command %s" % serverCmd)
        api.Logger.info("Client command %s" % clientCmd)

        tc.serverCmds.append(serverCmd)
        tc.clientCmds.append(clientCmd)

        api.Trigger_AddCommand(serverReq,
                               server.node_name,
                               server.workload_name,
                               serverCmd,
                               background=True)

        api.Trigger_AddCommand(clientReq,
                               client.node_name,
                               client.workload_name,
                               clientCmd,
                               background=True)

    tc.server_resp = api.Trigger(serverReq)
    #Sleep for some time as bg may not have been started.
    time.sleep(5)
    tc.fuz_client_resp = api.Trigger(clientReq)
    return api.types.status.SUCCESS
Esempio n. 19
0
def runIperfTest(tc, srv, cli):
    if api.IsDryrun():
        return api.types.status.SUCCESS

    req1 = api.Trigger_CreateExecuteCommandsRequest(serial=True)
    req2 = api.Trigger_CreateExecuteCommandsRequest(serial=False)

    proto = getattr(tc.iterators, "proto", 'tcp')

    number_of_iperf_threads = getattr(tc.args, "iperfthreads", 1)

    pktsize = getattr(tc.iterators, "pktsize", 512)
    ipproto = getattr(tc.iterators, "ipproto", 'v4')

    if ipproto == 'v6':
        srv_ip_address = srv.ipv6_address
        tc.cmd_descr = " Server: %s(%s) <--> Client: %s(%s)" %\
            (srv.interface, srv.ipv6_address, cli.interface, cli.ipv6_address)
    else:
        srv_ip_address = srv.ip_address
        tc.cmd_descr = " Server: %s(%s) <--> Client: %s(%s)" %\
            (srv.interface, srv.ip_address, cli.interface, cli.ip_address)

    api.Logger.info("Starting TSO test %s" % (tc.cmd_descr))

    tc.srv_bad_csum = ionic_stats.getNetstatBadCsum(srv, proto)
    tc.cli_bad_csum = ionic_stats.getNetstatBadCsum(cli, proto)

    for i in range(number_of_iperf_threads):
        if proto == 'tcp':
            port = api.AllocateTcpPort()
        else:
            port = api.AllocateUdpPort()

        file_name_suffix = "_instance" + \
            str(i) + proto + "_" + ipproto + "_" + str(pktsize)

        file_name = '/tmp/' + 'srv_' + srv.interface + file_name_suffix
        iperf_server_cmd = cmd_builder.iperf_server_cmd(port=port)
        api.Trigger_AddCommand(req1,
                               srv.node_name,
                               srv.workload_name,
                               iperf_server_cmd,
                               background=True)

        file_name = '/tmp/' + 'cli_' + cli.interface + file_name_suffix
        iperf_file_name = file_name + ".log"
        iperf_client_cmd = cmd_builder.iperf_client_cmd(
            server_ip=srv_ip_address,
            port=port,
            proto=proto,
            pktsize=pktsize,
            ipproto=ipproto)

        # Once iperf JSON support  is available, we don't need this hacks.
        api.Trigger_AddCommand(
            req2, cli.node_name, cli.workload_name,
            iperf_client_cmd + " -J | tee " + iperf_file_name)
        # Read the retransmission counter from the log
        api.Trigger_AddCommand(
            req2, cli.node_name, cli.workload_name, 'grep retrans ' +
            iperf_file_name + '| tail -1| cut -d ":" -f 2 | cut -d "," -f 1')
        # Read the bandwidth numbers.
        api.Trigger_AddCommand(
            req2, cli.node_name, cli.workload_name, 'cat ' + iperf_file_name +
            ' | grep bits_per_second | tail -1 |  cut -d ":" -f 2 | cut -d "," -f 1'
        )
        # Read the bytes transferred numbers.
        api.Trigger_AddCommand(
            req2, cli.node_name, cli.workload_name, 'cat ' + iperf_file_name +
            ' | grep bytes | tail -1 |  cut -d ":" -f 2 | cut -d "," -f 1')

    trig_resp1 = api.Trigger(req1)
    if trig_resp1 is None:
        api.Logger.error("Failed to run iperf server")
        return api.types.status.FAILURE

    tc.resp = api.Trigger(req2)
    if tc.resp is None:
        api.Logger.error("Failed to run iperf client")
        return api.types.status.FAILURE

    for cmd in tc.resp.commands:
        if cmd.exit_code != 0:
            api.Logger.error("Failed to start client iperf\n")
            api.PrintCommandResults(cmd)
            return api.types.status.FAILURE

    status, retran, bw = verifySingle(tc)
    vlan = getattr(tc.iterators, 'vlantag', 'off')
    vxlan = getattr(tc.iterators, 'vxlan', 'off')
    tso = getattr(tc.iterators, 'tso_offload', 'off')

    api.Logger.info(
        "Result TSO: %s VLAN: %s VXLAN: %s Proto: %s/%s Pkt size: %d Threads: %d"
        " Bandwidth: %d Mbps" % (tso, vlan, vxlan, proto, ipproto, pktsize,
                                 number_of_iperf_threads, bw))
    return status
Esempio n. 20
0
def Trigger(tc):
    naples_list = []
    tc.cmd_cookies = []
    tc.fin_fail = 0

    req = api.Trigger_CreateExecuteCommandsRequest(serial=True)

    idx = 0
    for pairs in tc.workload_pairs:
        server = pairs[0]
        client = pairs[1]

        naples = server
        if not server.IsNaples():
            naples = client
            if not client.IsNaples():
                continue

        found = False
        for info in naples_list:
            if info[0] == naples.node_name:
                found = True
        if found == False:
            naples_list.append((naples.node_name, pairs))

        tc.cmd_descr = "Server: %s(%s) <--> Client: %s(%s)" %\
                      (server.workload_name, server.ip_address, client.workload_name, client.ip_address)
        api.Logger.info("Starting Upgrade test from %s" % (tc.cmd_descr))

        #Step 1: Start TCPDUMP
        api.Trigger_AddCommand(req,
                               client.node_name,
                               client.workload_name,
                               "tcpdump -i {} > out.txt".format(
                                   client.interface),
                               background=True)
        tc.cmd_cookies.append("tcpdump on client")

        api.Trigger_AddCommand(req,
                               server.node_name,
                               server.workload_name,
                               "tcpdump -i {} > out.txt".format(
                                   server.interface),
                               background=True)
        tc.cmd_cookies.append("tcpdump on server")

        #Step 1: Start TCP Server
        server_port = api.AllocateTcpPort()
        api.Trigger_AddCommand(req,
                               server.node_name,
                               server.workload_name,
                               "nc -l %s" % (server_port),
                               background=True)
        tc.cmd_cookies.append("start server")

        #Step 2: Start TCP Client
        client_port = api.AllocateTcpPort()
        api.Trigger_AddCommand(req,
                               client.node_name,
                               client.workload_name,
                               "nc {} {} -p {}".format(server.ip_address,
                                                       server_port,
                                                       client_port),
                               background=True)
        tc.cmd_cookies.append("start client")

        api.Trigger_AddCommand(req,
                               server.node_name,
                               server.workload_name,
                               "ping {}".format(server.ip_address),
                               background=True)
        tc.cmd_cookies.append("Start ping")
        idx = idx + 1

    for node in naples_list:
        api.Trigger_AddNaplesCommand(req, node[0],
                                     "/nic/bin/halctl show session")
        tc.cmd_cookies.append("show session")
        api.Trigger_AddNaplesCommand(req, node[0],
                                     "/nic/bin/halctl debug test send-fin")
        tc.cmd_cookies.append("Send fin")
        api.Trigger_AddNaplesCommand(req, node[0],
                                     "/nic/bin/halctl show session --yaml")
        tc.cmd_cookies.append("show session after delete")

    trig_resp = api.Trigger(req)
    term_resp = api.Trigger_TerminateAllCommands(trig_resp)
    tc.resp = api.Trigger_AggregateCommandsResponse(trig_resp, term_resp)

    for node in naples_list:
        found = False
        api.CopyFromWorkload(node[1][0].node_name, node[1][0].workload_name,
                             ['out.txt'], dir_path)
        api.Logger.info("Copy from {} {}".format(node[0], node[1][0]))
        tcpout = dir_path + '/out.txt'
        for line in open(tcpout, 'r'):
            if re.search("\[F\.\]", line):
                found = True
                break
        if node[0] == node[1][0].node_name and found == False:
            tc.fin_fail = 1

        os.remove(tcpout)
        found = False
        api.CopyFromWorkload(node[1][1].node_name, node[1][1].workload_name,
                             ['out.txt'], dir_path)
        api.Logger.info("Copy from {} {}".format(node[0], node[1][0]))
        tcpout = dir_path + '/out.txt'
        for line in open(tcpout, 'r'):
            if re.search("\[F\.\]", line):
                found = True
        if node[0] == node[1][1].node_name and found == False:
            tc.fin_fail = 1
        os.remove(tcpout)

    return api.types.status.SUCCESS
Esempio n. 21
0
def iperf_test(tc):
    
    tc.flow_hit_cnt_before = {}
    tc.flow_hit_cnt_after = {}
    tc.flow_match_before = {}
    tc.flow_match_after = {}
    tc.conntrack_state = {}

    tc.is_ct_test = False
    if tc.duration is not None and tc.stateful:
        tc.is_ct_test = True
        api.Logger.info("Start Connection Tracking e2e iPerf Test")

    wl_nodes = [pair[0] for pair in tc.wl_node_nic_pairs]
    node1, node2 = wl_nodes[0], wl_nodes[1]
    node1_ath_nic = tc.athena_node_nic_pairs[0][1]
    node2_ath_nic = tc.athena_node_nic_pairs[1][1]
    tc.node_info = {'node1': node1, 'node2': node2,
                    'node1_ath_nic': node1_ath_nic,
                    'node2_ath_nic': node2_ath_nic}

    # cl_node => client node
    for cl_node in wl_nodes:
        tc.flow_hit_cnt_before[cl_node] = []
        tc.flow_hit_cnt_after[cl_node] = []
        tc.flow_match_before[cl_node] = []
        tc.flow_match_after[cl_node] = []
        tc.conntrack_state[cl_node] = []

        sintf, dintf, smac, dmac, sip, dip = _get_client_server_info(
                                                            cl_node, tc)    
        if tc.proto == 'UDP':
            sport = api.AllocateUdpPort()
            dport = api.AllocateUdpPort()
        else:
            sport = api.AllocateTcpPort()
            dport = api.AllocateTcpPort()

        flow_n1, flow_n2 = _get_bitw_flows(cl_node, tc.proto, sip, dip, 
                                            sport = sport, dport = dport)

        def _get_flow_match_info(flow_match_dict):
            for node, nic, vnic_id, flow in [
                    (node1, node1_ath_nic, tc.node1_vnic_id, flow_n1),
                    (node2, node2_ath_nic, tc.node2_vnic_id, flow_n2)]:
                
                rc, num_match_ent = utils.match_dynamic_flows(node, vnic_id, 
                                                                flow, nic)
                if rc != api.types.status.SUCCESS:
                    return rc
                
                flow_match_dict[cl_node].append((flow, num_match_ent))

            return (api.types.status.SUCCESS)        

        
        # check if flow installed on both athena nics before sending traffic
        rc = _get_flow_match_info(tc.flow_match_before) 
        if rc != api.types.status.SUCCESS:
            return rc

        # Send iperf traffic 
        if cl_node == 'node1':
            client_wl = tc.wl[0]
            server_wl = tc.wl[1]
        else:
            client_wl = tc.wl[1]
            server_wl = tc.wl[0]

        cmd_descr = "Client %s(%s) <--> Server %s(%s)" % (sip, sintf, 
                                                        dip, dintf)
        tc.cmd_descr.append(cmd_descr)
        api.Logger.info("Starting Iperf test: %s" % cmd_descr)
        
        serverCmd = iperf.ServerCmd(dport, server_ip = dip)
        
        if tc.proto == 'UDP':
            clientCmd = iperf.ClientCmd(dip, dport, proto = 'udp',
                                        jsonOut = True, client_ip = sip,
                                        client_port = sport, 
                                        pktsize = tc.pyld_size,
                                        packet_count = tc.pkt_cnt)
        else:
            if tc.is_ct_test:
                clientCmd = iperf.ClientCmd(dip, dport, jsonOut = True,
                                            client_ip = sip, client_port = sport,
                                            pktsize = tc.pyld_size,
                                            time = tc.duration)
            else:
                clientCmd = iperf.ClientCmd(dip, dport, jsonOut = True,
                                            client_ip = sip, client_port = sport,
                                            pktsize = tc.pyld_size,
                                            packet_count = tc.pkt_cnt)

        tc.serverCmds.append(serverCmd)
        tc.clientCmds.append(clientCmd)

        serverReq = api.Trigger_CreateExecuteCommandsRequest()
        clientReq = api.Trigger_CreateExecuteCommandsRequest()
        
        api.Trigger_AddCommand(serverReq, server_wl.node_name, 
                                server_wl.workload_name, serverCmd, 
                                background = True)

        api.Trigger_AddCommand(clientReq, client_wl.node_name, 
                                client_wl.workload_name, clientCmd,
                                background = True)

        server_resp = api.Trigger(serverReq)
        # sleep for bg iperf servers to be started
        time.sleep(3)

        tc.iperf_client_resp.append(api.Trigger(clientReq))

        if tc.is_ct_test:
            iperf_ct_test(tc, cl_node, flow_n1, flow_n2)

        api.Trigger_TerminateAllCommands(server_resp)

        # check if flow installed on both athena nics after sending traffic
        rc = _get_flow_match_info(tc.flow_match_after)
        if rc != api.types.status.SUCCESS:
            return rc

    return (api.types.status.SUCCESS)
Esempio n. 22
0
def Trigger(tc):
    if tc.skip == True:
        return api.types.status.FAILURE

    #
    # Set-up Test Environment
    #
    tc.cmd_cookies = []
    tc.cookie_idx = 0
    cmd_cookie = "%s(%s) --> %s(%s)" %\
                 (tc.server.workload_name, tc.server.ip_address,
                  tc.client.workload_name, tc.client.ip_address)
    api.Logger.info("Starting Single-IPv4-TCP-Flow-Drops test from %s" %\
                   (cmd_cookie))

    #
    # Start TCPDUMP in background on Server/Client
    #
    req = api.Trigger_CreateExecuteCommandsRequest(serial=True)
    #cmd_cookie = "start tcpdump on Server"
    #cmd = "sudo tcpdump -nnSXi {} > out.txt".format(tc.server.interface)
    #add_command(tc, req, cmd_cookie, cmd, tc.server, True)

    #cmd_cookie = "start tcpdump on Client"
    #cmd = "sudo tcpdump -nnSXi {} > out.txt".format(tc.client.interface)
    #add_command(tc, req, cmd_cookie, cmd, tc.client, True)

    #
    # Start with a clean slate by clearing all sessions/flows
    # 45 secs sleep is to ensure that residual FlowDrops induced by previous
    # non-flowstats testbundle (in this case Connectivity) is flushed out
    #
    cmd_cookie = "clear session"
    cmd = "/nic/bin/halctl clear session; sleep 45"
    add_naples_command(tc, req, cmd_cookie, cmd, tc.naples)

    #
    # Make sure that Client<=>Server Forwarding is set up
    #
    cmd_cookie = "trigger ping: Create case"
    cmd = "ping -c1 %s -I %s" %\
          (tc.server.ip_address, tc.client.interface)
    add_command(tc, req, cmd_cookie, cmd, tc.client, False)

    tc.resp = api.Trigger(req)
    for command in tc.resp.commands:
        api.PrintCommandResults(command)

    #
    # Allocate TCP-portnum for Server and start the service on the Server
    #
    req_nc1 = api.Trigger_CreateExecuteCommandsRequest(serial=True)
    tc.server_port = api.AllocateTcpPort()
    cmd_cookie = "start server"
    cmd = "nc --listen %s" % (tc.server_port)
    add_command(tc, req_nc1, cmd_cookie, cmd, tc.server, True)

    #
    # Allocate TCP-portnum for Client and establish TCP-connection
    #
    tc.client_port = api.AllocateTcpPort()
    cmd_cookie = "start client"
    cmd = "nc {} {} --source-port {} "\
    .format(tc.server.ip_address, tc.server_port, tc.client_port)
    add_command(tc, req_nc1, cmd_cookie, cmd, tc.client, True)

    resp_nc1 = api.Trigger(req_nc1)
    for command in resp_nc1.commands:
        api.PrintCommandResults(command)

    #
    # Do applicable "show session" commands and
    # retrieve Seq-num and Ack-num associated with the session
    #
    req1 = api.Trigger_CreateExecuteCommandsRequest(serial=True)
    cmd_cookie = "show session"
    cmd = "/nic/bin/halctl show session"
    add_naples_command(tc, req1, cmd_cookie, cmd, tc.naples)

    cmd_cookie = "show session detail"
    cmd = "/nic/bin/halctl show session --dstport {} --dstip {} --yaml"\
    .format(tc.server_port, tc.server.ip_address)
    add_naples_command(tc, req1, cmd_cookie, cmd, tc.naples)

    tc.resp1 = api.Trigger(req1)
    cmd = tc.resp1.commands[-1]
    for command in tc.resp1.commands:
        api.PrintCommandResults(command)
    tc.pre_ctrckinf = get_conntrackinfo(cmd)

    #
    # Send Bad Data with TTL=0 from both Client and Server
    #
    req2 = api.Trigger_CreateExecuteCommandsRequest(serial=True)
    cmd_cookie = "send bad data from Client TTL=0: Create case"
    cmd = "hping3 --count 1 --baseport {} --destport {} --setseq {} --setack {}\
           --ttl 0 --data 10 {}".format(tc.client_port, tc.server_port,
                                        tc.pre_ctrckinf.r_tcpacknum,
                                        tc.pre_ctrckinf.r_tcpseqnum,
                                        tc.server.ip_address)
    add_command(tc, req2, cmd_cookie, cmd, tc.client, False)

    cmd_cookie = "send bad data from Server TTL=0: Create case"
    cmd = "hping3 --count 1 --baseport {} --destport {} --setseq {} --setack {}\
           --ttl 0 --data 10 {}".format(tc.server_port, tc.client_port,
                                        tc.pre_ctrckinf.i_tcpacknum,
                                        tc.pre_ctrckinf.i_tcpseqnum,
                                        tc.client.ip_address)
    add_command(tc, req2, cmd_cookie, cmd, tc.server, False)

    #
    # Send Bad Data with TCP-RSVD-FLAGS-BIT-0 set from both Client and Server
    #
    cmd_cookie = "send bad data from Client TCP-RSVD-FLAGS-BIT-0: Create case"
    cmd = "hping3 --count 1 --baseport {} --destport {} --setseq {} --setack {}\
           --xmas --data 10 {}".format(tc.client_port, tc.server_port,
                                       tc.pre_ctrckinf.r_tcpacknum + 10,
                                       tc.pre_ctrckinf.r_tcpseqnum,
                                       tc.server.ip_address)
    add_command(tc, req2, cmd_cookie, cmd, tc.client, False)

    cmd_cookie = "send bad data from Server TCP-RSVD-FLAGS-BIT-0: Create case"
    cmd = "hping3 --count 1 --baseport {} --destport {} --setseq {} --setack {}\
           --xmas --data 10 {}".format(tc.server_port, tc.client_port,
                                       tc.pre_ctrckinf.i_tcpacknum + 10,
                                       tc.pre_ctrckinf.i_tcpseqnum,
                                       tc.client.ip_address)
    add_command(tc, req2, cmd_cookie, cmd, tc.server, False)

    #
    # Send Bad Data with TCP-RSVD-FLAGS-BIT-1 set from both Client and Server
    #
    cmd_cookie = "send bad data from Client TCP-RSVD-FLAGS-BIT-1: Create case"
    cmd = "hping3 --count 1 --baseport {} --destport {} --setseq {} --setack {}\
           --ymas --data 10 {}".format(tc.client_port, tc.server_port,
                                       tc.pre_ctrckinf.r_tcpacknum + 20,
                                       tc.pre_ctrckinf.r_tcpseqnum,
                                       tc.server.ip_address)
    add_command(tc, req2, cmd_cookie, cmd, tc.client, False)

    cmd_cookie = "send bad data from Server TCP-RSVD-FLAGS-BIT-1: Create case"
    cmd = "hping3 --count 1 --baseport {} --destport {} --setseq {} --setack {}\
           --ymas --data 10 {}".format(tc.server_port, tc.client_port,
                                       tc.pre_ctrckinf.i_tcpacknum + 20,
                                       tc.pre_ctrckinf.i_tcpseqnum,
                                       tc.client.ip_address)
    add_command(tc, req2, cmd_cookie, cmd, tc.server, False)

    tc.resp2 = api.Trigger(req2)
    for command in tc.resp2.commands:
        api.PrintCommandResults(command)

    #
    # Do "show session" command
    #
    req3 = api.Trigger_CreateExecuteCommandsRequest(serial=True)
    tc.cookie_idx_3 = tc.cookie_idx
    cmd_cookie = "show session"
    cmd = "/nic/bin/halctl show session"
    add_naples_command(tc, req3, cmd_cookie, cmd, tc.naples)

    #
    # Trigger "metrics get IPv4FlowDropMetrics" output
    #
    cmd_cookie = "show flow-drop: Create case"
    cmd = "PATH=$PATH:/platform/bin/;\
           LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/platform/lib/:/nic/lib/;\
           export PATH; export LD_LIBRARY_PATH;\
           /nic/bin/delphictl metrics get IPv4FlowDropMetrics"

    add_naples_command(tc, req3, cmd_cookie, cmd, tc.naples)

    tc.resp3 = api.Trigger(req3)
    for command in tc.resp3.commands:
        api.PrintCommandResults(command)

    #
    # Clear all sessions/flows
    #
    api.Trigger_TerminateAllCommands(resp_nc1)
    req_nc2 = api.Trigger_CreateExecuteCommandsRequest(serial=True)
    cmd_cookie = "clear session"
    cmd = "/nic/bin/halctl clear session"
    add_naples_command(tc, req_nc2, cmd_cookie, cmd, tc.naples)

    #
    # Make sure that Client<=>Server Forwarding is set up
    #
    cmd_cookie = "trigger ping: Re-use case"
    cmd = "ping -c1 %s -I %s" %\
          (tc.server.ip_address, tc.client.interface)
    add_command(tc, req_nc2, cmd_cookie, cmd, tc.client, False)

    #
    # Re-establish TCP-connection
    #
    cmd_cookie = "show session"
    cmd = "/nic/bin/halctl show session"
    add_naples_command(tc, req_nc2, cmd_cookie, cmd, tc.naples)

    cmd_cookie = "restart server"
    cmd = "nc --listen %s" % (tc.server_port)
    add_command(tc, req_nc2, cmd_cookie, cmd, tc.server, True)

    cmd_cookie = "restart client"
    cmd = "nc {} {} --source-port {} "\
    .format(tc.server.ip_address, tc.server_port, tc.client_port)
    add_command(tc, req_nc2, cmd_cookie, cmd, tc.client, True)

    resp_nc2 = api.Trigger(req_nc2)
    for command in resp_nc2.commands:
        api.PrintCommandResults(command)

    #
    # Do applicable "show session" commands and
    # retrieve Seq-num and Ack-num associated with the session
    #
    req4 = api.Trigger_CreateExecuteCommandsRequest(serial=True)
    cmd_cookie = "show session"
    cmd = "/nic/bin/halctl show session"
    add_naples_command(tc, req4, cmd_cookie, cmd, tc.naples)

    cmd_cookie = "show session detail"
    cmd = "/nic/bin/halctl show session --dstport {} --dstip {} --yaml"\
    .format(tc.server_port, tc.server.ip_address)
    add_naples_command(tc, req4, cmd_cookie, cmd, tc.naples)

    tc.resp4 = api.Trigger(req4)
    cmd = tc.resp4.commands[-1]
    for command in tc.resp4.commands:
        api.PrintCommandResults(command)
    tc.post_ctrckinf = get_conntrackinfo(cmd)

    #
    # Re-send Bad Data with TTL=0 from both Client and Server
    #
    req5 = api.Trigger_CreateExecuteCommandsRequest(serial=True)
    cmd_cookie = "send bad data from Client TTL=0: Re-use case"
    cmd = "hping3 --count 1 --baseport {} --destport {} --setseq {} --setack {}\
           --ttl 0 --data 10 {}".format(tc.client_port, tc.server_port,
                                        tc.post_ctrckinf.r_tcpacknum,
                                        tc.post_ctrckinf.r_tcpseqnum,
                                        tc.server.ip_address)
    add_command(tc, req5, cmd_cookie, cmd, tc.client, False)

    cmd_cookie = "send bad data from Server TTL=0: Re-use case"
    cmd = "hping3 --count 1 --baseport {} --destport {} --setseq {} --setack {}\
           --ttl 0 --data 10 {}".format(tc.server_port, tc.client_port,
                                        tc.post_ctrckinf.i_tcpacknum,
                                        tc.post_ctrckinf.i_tcpseqnum,
                                        tc.client.ip_address)
    add_command(tc, req5, cmd_cookie, cmd, tc.server, False)

    #
    # Re-send Bad Data with TCP-RSVD-FLAGS-BIT-0 set from both Client and Server
    #
    cmd_cookie = "send bad data from Client TCP-RSVD-FLAGS-BIT-0: Re-use case"
    cmd = "hping3 --count 1 --baseport {} --destport {} --setseq {} --setack {}\
           --xmas --data 10 {}".format(tc.client_port, tc.server_port,
                                       tc.post_ctrckinf.r_tcpacknum + 10,
                                       tc.post_ctrckinf.r_tcpseqnum,
                                       tc.server.ip_address)
    add_command(tc, req5, cmd_cookie, cmd, tc.client, False)

    cmd_cookie = "send bad data from Server TCP-RSVD-FLAGS-BIT-0: Re-use case"
    cmd = "hping3 --count 1 --baseport {} --destport {} --setseq {} --setack {}\
           --xmas --data 10 {}".format(tc.server_port, tc.client_port,
                                       tc.post_ctrckinf.i_tcpacknum + 10,
                                       tc.post_ctrckinf.i_tcpseqnum,
                                       tc.client.ip_address)
    add_command(tc, req5, cmd_cookie, cmd, tc.server, False)

    #
    # Re-send Bad Data with TCP-RSVD-FLAGS-BIT-1 set from both Client and Server
    #
    cmd_cookie = "send bad data from Client TCP-RSVD-FLAGS-BIT-1: Re-use case"
    cmd = "hping3 --count 1 --baseport {} --destport {} --setseq {} --setack {}\
           --ymas --data 10 {}".format(tc.client_port, tc.server_port,
                                       tc.post_ctrckinf.r_tcpacknum + 20,
                                       tc.post_ctrckinf.r_tcpseqnum,
                                       tc.server.ip_address)
    add_command(tc, req5, cmd_cookie, cmd, tc.client, False)

    cmd_cookie = "send bad data from Server TCP-RSVD-FLAGS-BIT-1: Re-use case"
    cmd = "hping3 --count 1 --baseport {} --destport {} --setseq {} --setack {}\
           --ymas --data 10 {}".format(tc.server_port, tc.client_port,
                                       tc.post_ctrckinf.i_tcpacknum + 20,
                                       tc.post_ctrckinf.i_tcpseqnum,
                                       tc.client.ip_address)
    add_command(tc, req5, cmd_cookie, cmd, tc.server, False)

    tc.resp5 = api.Trigger(req5)
    for command in tc.resp5.commands:
        api.PrintCommandResults(command)

    #
    # Do "show session" command
    #
    req6 = api.Trigger_CreateExecuteCommandsRequest(serial=True)
    tc.cookie_idx_6 = tc.cookie_idx
    cmd_cookie = "show session"
    cmd = "/nic/bin/halctl show session"
    add_naples_command(tc, req6, cmd_cookie, cmd, tc.naples)

    #
    # Trigger "metrics get IPv4FlowDropMetrics" output
    #
    cmd_cookie = "show flow-drop: Re-use case"
    cmd = "PATH=$PATH:/platform/bin/;\
           LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/platform/lib/:/nic/lib/;\
           export PATH; export LD_LIBRARY_PATH;\
           /nic/bin/delphictl metrics get IPv4FlowDropMetrics"

    add_naples_command(tc, req6, cmd_cookie, cmd, tc.naples)

    tc.resp6 = api.Trigger(req6)
    for command in tc.resp6.commands:
        api.PrintCommandResults(command)

    #
    # Do "show session" command after doing Sleep for 45secs
    #
    api.Trigger_TerminateAllCommands(resp_nc2)
    req7 = api.Trigger_CreateExecuteCommandsRequest(serial=True)
    tc.cookie_idx_7 = tc.cookie_idx
    cmd_cookie = "clear session"
    cmd = "/nic/bin/halctl clear session; sleep 45"
    add_naples_command(tc, req7, cmd_cookie, cmd, tc.naples)

    cmd_cookie = "show session"
    cmd = "/nic/bin/halctl show session"
    add_naples_command(tc, req7, cmd_cookie, cmd, tc.naples)

    #
    # Trigger "metrics get IPv4FlowDropMetrics" output
    #
    cmd_cookie = "show flow-drop: Delete case"
    cmd = "PATH=$PATH:/platform/bin/;\
           LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/platform/lib/:/nic/lib/;\
           export PATH; export LD_LIBRARY_PATH;\
           /nic/bin/delphictl metrics get IPv4FlowDropMetrics"

    add_naples_command(tc, req7, cmd_cookie, cmd, tc.naples)

    tc.resp7 = api.Trigger(req7)
    for command in tc.resp7.commands:
        api.PrintCommandResults(command)

    return api.types.status.SUCCESS
Esempio n. 23
0
def Trigger(tc):
    serverCmd = None
    clientCmd = None
    IPERF_TIMEOUT = 86400
    try:
        serverReq = api.Trigger_CreateExecuteCommandsRequest()
        clientReq = api.Trigger_CreateExecuteCommandsRequest()

        client = tc.wl_pair[0]
        server = tc.wl_pair[1]

        tc.cmd_descr = "Server: %s(%s) <--> Client: %s(%s)" %\
                       (server.workload_name, server.ip_address,
                        client.workload_name, client.ip_address)
        num_streams = int(getattr(tc.args, "num_streams", 2))
        api.Logger.info("Starting Iperf test from %s num-sessions %d" %
                        (tc.cmd_descr, num_streams))

        if tc.iterators.proto == 'tcp':
            port = api.AllocateTcpPort()
            serverCmd = iperf.ServerCmd(port, jsonOut=True, run_core=3)
            clientCmd = iperf.ClientCmd(server.ip_address,
                                        port,
                                        time=IPERF_TIMEOUT,
                                        jsonOut=True,
                                        num_of_streams=num_streams,
                                        run_core=3)
        else:
            port = api.AllocateUdpPort()
            serverCmd = iperf.ServerCmd(port, jsonOut=True, run_core=3)
            clientCmd = iperf.ClientCmd(server.ip_address,
                                        port,
                                        proto='udp',
                                        jsonOut=True,
                                        num_of_streams=num_streams,
                                        run_core=3)

        tc.serverCmd = serverCmd
        tc.clientCmd = clientCmd

        api.Trigger_AddCommand(serverReq,
                               server.node_name,
                               server.workload_name,
                               serverCmd,
                               background=True,
                               timeout=IPERF_TIMEOUT)

        api.Trigger_AddCommand(clientReq,
                               client.node_name,
                               client.workload_name,
                               clientCmd,
                               background=True,
                               timeout=IPERF_TIMEOUT)

        tc.server_resp = api.Trigger(serverReq)
        time.sleep(5)
        tc.iperf_client_resp = api.Trigger(clientReq)

        _run_vmedia_traffic(tc.node_name)
    except:
        api.Logger.error(traceback.format_exc())
        return api.types.status.FAILURE

    return api.types.status.SUCCESS
Esempio n. 24
0
def Trigger(tc):
    api.Logger.info("Trigger.")
    pairs = api.GetLocalWorkloadPairs(naples=True)
    tc.cmd_cookies1 = []
    tc.cmd_cookies2 = []
    req = api.Trigger_CreateExecuteCommandsRequest(serial=True)

    #for w1,w2 in pairs:
    server, client = pairs[0]
    naples = server
    if not server.IsNaples():
        if not client.IsNaples():
            naples = client
            return api.types.status.SUCCESS
        else:
            client, server = pairs[0]

    #Step 0: Update the timeout in the config object
    update_timeout("tcp-connection-setup", tc.iterators.timeout)

    #profilereq = api.Trigger_CreateExecuteCommandsRequest(serial = True)
    #api.Trigger_AddNaplesCommand(profilereq, naples.node_name, "/nic/bin/halctl show nwsec profile --id 11")
    #profcommandresp = api.Trigger(profilereq)
    #cmd = profcommandresp.commands[-1]
    #for command in profcommandresp.commands:
    #    api.PrintCommandResults(command)
    #timeout = get_haltimeout("tcp-connection-setup", cmd)
    #tc.config_update_fail = 0
    #if (timeout != timetoseconds(tc.iterators.timeout)):
    #    tc.config_update_fail = 1
    timeout = timetoseconds(tc.iterators.timeout)

    #Step 1: Start TCP Server
    server_port = api.AllocateTcpPort()
    api.Trigger_AddCommand(req,
                           server.node_name,
                           server.workload_name,
                           "nc -l %s -i 600s" % (server_port),
                           background=True)
    tc.cmd_cookies1.append("start server")

    #Step 2: Start TCPDUMP in background
    api.Trigger_AddCommand(req,
                           server.node_name,
                           server.workload_name,
                           "tcpdump -i {} -nn > out.txt".format(
                               server.interface),
                           background=True)
    tc.cmd_cookies1.append("tcpdump")

    #Step 3: Start Hping with SYN set
    client_port = api.AllocateTcpPort()
    api.Trigger_AddCommand(
        req, client.node_name, client.workload_name,
        "hping3 -c 1 -s {} -p {} -S -M 1000 {}".format(client_port,
                                                       server_port,
                                                       server.ip_address))
    tc.cmd_cookies1.append("Send SYN")

    #Step 4: Check if session is up in SYN_RCVD state
    api.Trigger_AddNaplesCommand(
        req, naples.node_name,
        "/nic/bin/halctl show session --dstport {}  --dstip {} --srcip {} | grep SYN"
        .format(server_port, server.ip_address, client.ip_address))
    tc.cmd_cookies1.append("Before timeout")

    #Sleep for connection setup timeout
    ######TBD -- uncomment this once agent update fix is in!!!
    #timeout = timetoseconds(tc.iterators.timeout)
    cmd_cookie = "sleep"
    api.Trigger_AddNaplesCommand(req,
                                 naples.node_name,
                                 "sleep %s" % timeout,
                                 timeout=300)
    tc.cmd_cookies1.append("sleep")

    #Step 5: Validate if session is gone. Note that we could have session in INIT state in this case as the server would retransmit.
    #Idea is to make sure we have removed the session that was in SYN state
    api.Trigger_AddNaplesCommand(
        req, naples.node_name,
        "/nic/bin/halctl show session --dstport {} --dstip {} --srcip {} | grep SYN"
        .format(server_port, server.ip_address, client.ip_address))
    tc.cmd_cookies1.append("After timeout")

    #Step 6: Send an ACK now from the same port
    api.Trigger_AddCommand(
        req, client.node_name, client.workload_name,
        "hping3 -c 1 -s {} -p {} -A -M 1000 {}".format(client_port,
                                                       server_port,
                                                       server.ip_address))
    tc.cmd_cookies1.append("Send ACK")
    trig_resp = api.Trigger(req)
    term_resp = api.Trigger_TerminateAllCommands(trig_resp)

    tc.resp = api.Trigger_AggregateCommandsResponse(trig_resp, term_resp)

    req = api.Trigger_CreateExecuteCommandsRequest(serial=True)

    #Step 7: Check TCPDUMP on the other side to make sure we dropped the packet
    api.Trigger_AddCommand(
        req, server.node_name, server.workload_name,
        "grep \"{} >\" out.txt | grep \"\[\.\]\"".format(
            client_port, server_port, server.ip_address))
    tc.cmd_cookies2.append("Check ACK Received")

    trig_resp = api.Trigger(req)
    term_resp = api.Trigger_TerminateAllCommands(trig_resp)

    tc.tcpdump_resp = api.Trigger_AggregateCommandsResponse(
        trig_resp, term_resp)
    return api.types.status.SUCCESS
Esempio n. 25
0
def __runTraffic(intf):

    api.Logger.info("Run traffic: %s" % intf)

    client = None
    server = None
    clientCmd = None
    serverCmd = None
    clientReq = None
    serverReq = None

    for pairs in api.GetRemoteWorkloadPairs():
        client = pairs[0]
        api.Logger.error("Comparing client interface %s with %s" %
                         (client.interface, intf))
        if client.interface == intf:
            server = pairs[1]
            break

    if server is None:
        api.Logger.error("No workload found for interface %s" % intf)
        return api.types.status.FAILURE

    cmd_descr = "Server: %s(%s) <--> Client: %s(%s)" %\
                   (server.workload_name, server.ip_address,\
                    client.workload_name, client.ip_address)
    api.Logger.info("Starting Iperf test from %s" % cmd_descr)

    port = api.AllocateTcpPort()
    serverCmd = iperf.ServerCmd(port)
    clientCmd = iperf.ClientCmd(server.ip_address, port)

    serverReq = api.Trigger_CreateExecuteCommandsRequest(serial=False)
    api.Trigger_AddCommand(serverReq,
                           server.node_name,
                           server.workload_name,
                           serverCmd,
                           background=True)

    clientReq = api.Trigger_CreateExecuteCommandsRequest(serial=False)
    api.Trigger_AddCommand(clientReq, client.node_name, client.workload_name,
                           clientCmd)

    # Server runs in the background
    server_resp = api.Trigger(serverReq)

    # Sleep for some time as bg may not have been started.
    time.sleep(5)
    client_resp = api.Trigger(clientReq)

    # Stop the backgrounded server
    term_resp = api.Trigger_TerminateAllCommands(server_resp)

    # We don't bother checking the iperf results, we just wanted traffic
    # so just check that the commands succeeded
    ret = api.types.status.SUCCESS
    for cmd in server_resp.commands:
        if cmd.exit_code != 0:
            ret = api.types.status.FAILURE
    for cmd in client_resp.commands:
        if cmd.exit_code != 0:
            ret = api.types.status.FAILURE
    for cmd in term_resp.commands:
        if cmd.exit_code != 0:
            ret = api.types.status.FAILURE

    return ret
Esempio n. 26
0
def Trigger(tc):
    pairs = api.GetLocalWorkloadPairs()
    server = pairs[0][0]
    client = pairs[0][1]
    tc.cmd_cookies = []

    naples = server
    if not server.IsNaples():
        naples = client
        if not client.IsNaples():
            return api.types.status.SUCCESS

    req = api.Trigger_CreateExecuteCommandsRequest(serial=True)
    tc.cmd_descr = "Server: %s(%s) <--> Client: %s(%s)" %\
                   (server.workload_name, server.ip_address, client.workload_name, client.ip_address)
    api.Logger.info("Starting MSRPC test from %s" % (tc.cmd_descr))

    api.Trigger_AddCommand(req,
                           server.node_name,
                           server.workload_name,
                           "tcpdump -nni %s > out.txt" % (server.interface),
                           background=True)
    tc.cmd_cookies.append("tcpdump")

    api.Trigger_AddCommand(req,
                           client.node_name,
                           client.workload_name,
                           "tcpdump -nni %s > out.txt" % (client.interface),
                           background=True)
    tc.cmd_cookies.append("tcpdump")

    api.Trigger_AddCommand(req,
                           server.node_name,
                           server.workload_name,
                           "sudo nc -l %s" % (MSRPC_PORT),
                           background=True)
    tc.cmd_cookies.append("netcat start-server")

    client_port = api.AllocateTcpPort()
    api.Trigger_AddCommand(req,
                           client.node_name,
                           client.workload_name,
                           "sudo nc %s %s -p %s" %
                           (server.ip_address, MSRPC_PORT, client_port),
                           background=True)
    tc.cmd_cookies.append("netcat start-client")

    msrpcscript = dir_path + '/' + "msrpcscapy.py"
    resp = api.CopyToWorkload(server.node_name, server.workload_name,
                              [msrpcscript], 'msrpcdir')
    if resp is None:
        return api.types.status.SUCCESS

    resp = api.CopyToWorkload(client.node_name, client.workload_name,
                              [msrpcscript], 'msrpcdir')
    if resp is None:
        return api.types.status.SUCCESS

    api.Trigger_AddNaplesCommand(
        req, naples.node_name,
        "/nic/bin/halctl show session --dstport 135 --yaml")
    tc.cmd_cookies.append("show session yaml")
    sesssetup = api.Trigger(req)
    cmd = sesssetup.commands[-1]
    api.PrintCommandResults(cmd)
    tc.pre_ctrckinf = get_conntrackinfo(cmd)

    req = api.Trigger_CreateExecuteCommandsRequest(serial=True)

    msrpcpcap = dir_path + '/' + "msrpc_first_bind.pcap"
    pkts = rdpcap(msrpcpcap)
    clientidx = 0
    serveridx = 0
    client_ack = tc.pre_ctrckinf.i_tcpacknum
    server_ack = tc.pre_ctrckinf.r_tcpacknum
    filename = None
    for pkt in pkts:
        node = client.node_name
        workload = client.workload_name
        if pkt[IP].src == "172.31.9.1":
            filename = ("msrpcscapy" + "%s" + ".pcap") % (clientidx)
            msrpcscapy = dir_path + '/' + filename
            pkt[Ether].src = client.mac_address
            pkt[IP].src = client.ip_address
            pkt[Ether].dst = server.mac_address
            pkt[IP].dst = server.ip_address
            pkt[TCP].sport = client_port
            pkt[TCP].dport = 135
            if clientidx == 0:
                client_start_seq = pkt[TCP].seq
                client_start_ack = pkt[TCP].ack
            pkt[TCP].seq = tc.pre_ctrckinf.i_tcpseqnum + (pkt[TCP].seq -
                                                          client_start_seq)
            pkt[TCP].ack = client_ack
            server_ack = pkt[TCP].seq + len(pkt[TCP].payload) + 1
            clientidx += 1
        else:
            filename = ("msrpcscapy" + "%s" + ".pcap") % (serveridx)
            msrpcscapy = dir_path + '/' + filename
            pkt[Ether].src = server.mac_address
            pkt[IP].src = server.ip_address
            pkt[Ether].dst = client.mac_address
            pkt[IP].dst = client.ip_address
            node = server.node_name
            workload = server.workload_name
            pkt[TCP].dport = client_port
            pkt[TCP].sport = 135
            if serveridx == 0:
                server_start_seq = pkt[TCP].seq
                server_start_ack = pkt[TCP].ack
            pkt[TCP].seq = tc.pre_ctrckinf.r_tcpseqnum + (pkt[TCP].seq -
                                                          server_start_seq)
            pkt[TCP].ack = server_ack
            client_ack = pkt[TCP].seq + len(pkt[TCP].payload) + 1
            serveridx += 1
        del pkt[IP].chksum
        del pkt[TCP].chksum
        a = pkt.show(dump=True)
        print(a)
        wrpcap(msrpcscapy, pkt)
        resp = api.CopyToWorkload(node, workload, [msrpcscapy], 'msrpcdir')
        if resp is None:
            continue
        api.Trigger_AddCommand(
            req, node, workload,
            "sh -c 'cd msrpcdir && chmod +x msrpcscapy.py && ./msrpcscapy.py %s'"
            % (filename))
        tc.cmd_cookies.append("running #%s on node %s workload %s" %
                              (filename, node, workload))
        os.remove(msrpcscapy)

    api.Trigger_AddNaplesCommand(
        req, naples.node_name,
        "/nic/bin/halctl show session --dstport 135 --yaml")
    tc.cmd_cookies.append("show session yaml")
    firstbind = api.Trigger(req)
    cmd = firstbind.commands[-1]
    api.PrintCommandResults(cmd)
    tc.post_ctrckinf = get_conntrackinfo(cmd)

    req = api.Trigger_CreateExecuteCommandsRequest(serial=True)

    msrpcpcap = dir_path + '/' + "msrpc_second_bind.pcap"
    pkts = rdpcap(msrpcpcap)
    client_ack = tc.post_ctrckinf.i_tcpacknum
    server_ack = tc.post_ctrckinf.r_tcpacknum
    clientidx = 0
    serveridx = 0
    filename = None
    for pkt in pkts:
        node = client.node_name
        workload = client.workload_name
        if pkt[IP].src == "172.31.9.1":
            filename = ("msrpcscapy" + "%s" + ".pcap") % (clientidx)
            msrpcscapy = dir_path + '/' + filename
            pkt[Ether].src = client.mac_address
            pkt[IP].src = client.ip_address
            pkt[Ether].dst = server.mac_address
            pkt[IP].dst = server.ip_address
            pkt[TCP].sport = client_port
            pkt[TCP].dport = 135
            if clientidx == 0:
                client_start_seq = pkt[TCP].seq
                client_start_ack = pkt[TCP].ack
            pkt[TCP].seq = tc.post_ctrckinf.i_tcpseqnum + (pkt[TCP].seq -
                                                           client_start_seq)
            pkt[TCP].ack = client_ack
            server_ack = pkt[TCP].seq + len(pkt[TCP].payload) + 1
            clientidx += 1
        else:
            filename = ("msrpcscapy" + "%s" + ".pcap") % (serveridx)
            msrpcscapy = dir_path + '/' + filename
            pkt[Ether].src = server.mac_address
            pkt[IP].src = server.ip_address
            pkt[Ether].dst = client.mac_address
            pkt[IP].dst = client.ip_address
            node = server.node_name
            workload = server.workload_name
            pkt[TCP].dport = client_port
            pkt[TCP].sport = 135
            if serveridx == 0:
                server_start_seq = pkt[TCP].seq
                server_start_ack = pkt[TCP].ack
            pkt[TCP].seq = tc.post_ctrckinf.r_tcpseqnum + (pkt[TCP].seq -
                                                           server_start_seq)
            pkt[TCP].ack = server_ack
            client_ack = pkt[TCP].seq + len(pkt[TCP].payload) + 1
            serveridx += 1
        del pkt[IP].chksum
        del pkt[TCP].chksum
        a = pkt.show(dump=True)
        print(a)
        wrpcap(msrpcscapy, pkt)
        resp = api.CopyToWorkload(node, workload, [msrpcscapy], 'msrpcdir')
        if resp is None:
            continue
        api.Trigger_AddCommand(
            req, node, workload,
            "sh -c 'cd msrpcdir && chmod +x msrpcscapy.py && ./msrpcscapy.py %s'"
            % (filename))
        tc.cmd_cookies.append("running #%s on node %s workload %s" %
                              (filename, node, workload))
        os.remove(msrpcscapy)

    api.Trigger_AddCommand(req,
                           server.node_name,
                           server.workload_name,
                           "sudo nc -l 49134",
                           background=True)
    tc.cmd_cookies.append("msrpc start-server")

    api.Trigger_AddCommand(req,
                           client.node_name,
                           client.workload_name,
                           "sudo nc %s 49134 -p 59374" % (server.ip_address),
                           background=True)
    tc.cmd_cookies.append("msrpc start-client")

    # Add Naples command validation
    #api.Trigger_AddNaplesCommand(req, naples.node_name,
    #                       "/nic/bin/halctl show security flow-gate | grep MSRPC")
    #tc.cmd_cookies.append("show security flow-gate")

    api.Trigger_AddNaplesCommand(req, naples.node_name,
                                 "/nic/bin/halctl show session")
    tc.cmd_cookies.append("show session")

    trig_resp = api.Trigger(req)
    term_resp = api.Trigger_TerminateAllCommands(trig_resp)
    tc.resp2 = api.Trigger_AggregateCommandsResponse(trig_resp, term_resp)

    term_resp = api.Trigger_TerminateAllCommands(firstbind)
    tc.resp1 = api.Trigger_AggregateCommandsResponse(firstbind, term_resp)

    term_resp = api.Trigger_TerminateAllCommands(sesssetup)
    tc.resp = api.Trigger_AggregateCommandsResponse(sesssetup, term_resp)

    #GetTcpdumpData(client)
    #GetTcpdumpData(server)

    return api.types.status.SUCCESS
Esempio n. 27
0
def Trigger(tc):
    if tc.skip: return api.types.status.SUCCESS

    tc.serverCmds = []
    tc.clientCmds = []
    tc.cmd_descr = []

    fuzClients = {}
    fuzServers = {}
    clientReq = api.Trigger_CreateAllParallelCommandsRequest()
    serverReq = api.Trigger_CreateAllParallelCommandsRequest()
    clientArp = defaultdict(lambda: {})

    sip_dip_cache = dict()
    server_key_cache = dict()

    def __sip_dip_key(sip, dip):
        return sip + ":" + dip

    def __server_key(server_ip, port):
        return server_ip + ":" + str(port)

    for idx, pairs in enumerate(tc.workload_pairs):
        client = pairs[0]
        server = pairs[1]
        port = None
        try:
            port = int(pairs[2])
        except:
            port = api.AllocateTcpPort()

        server_key = __server_key(server.ip_address, port)
        sip_dip_key = __sip_dip_key(client.ip_address, server.ip_address)
        if sip_dip_key in sip_dip_cache:
            #Already added, ignore for this workload pair
            continue
        sip_dip_cache[sip_dip_key] = True

        cmd_descr = "Server: %s(%s) <--> Client: %s(%s)" %\
                       (server.workload_name, server.ip_address, client.workload_name, client.ip_address)
        tc.cmd_descr.append(cmd_descr)
        num_sessions = int(getattr(tc.args, "num_sessions", 1))
        api.Logger.info("Starting Fuz test from %s num-sessions %d" %
                        (cmd_descr, num_sessions))

        fuzClient = fuzClients.get(client.workload_name, None)
        if not fuzClient:
            fuzClient = FuzContext(client.workload_name, client.interface,
                                   client.node_name)
            fuzClients[client.workload_name] = fuzClient

        fuzClient.AddServer(server.ip_address, port)

        #Just start once instance of server for the combination
        if server_key not in server_key_cache:
            server_key_cache[server_key] = True
            #Combine baremetal workloads
            if api.IsBareMetalWorkloadType(server.node_name):
                fuzServer = fuzServers.get(server.node_name, None)
                if not fuzServer:
                    fuzServer = FuzContext(server.workload_name,
                                           server.interface, server.node_name)
                    fuzServers[server.node_name] = fuzServer
                else:
                    fuzServer.AddInterface(server.interface)
                fuzServer.AddServer(server.ip_address, port)
            else:
                serverCmd = FUZ_EXEC[server.workload_name] + " -port " + str(
                    port)
                api.Trigger_AddCommand(serverReq,
                                       server.node_name,
                                       server.workload_name,
                                       serverCmd,
                                       background=True,
                                       stdout_on_err=True,
                                       stderr_on_err=True)

        #For now add static arp
        if api.IsBareMetalWorkloadType(client.node_name):
            if not clientArp[client.node_name].get(server.ip_address, None):
                clientArp[client.node_name][
                    server.ip_address] = server.mac_address
                arp_cmd = "arp -s " + server.ip_address + " " + server.mac_address
                api.Trigger_AddCommand(clientReq, client.node_name,
                                       client.workload_name, arp_cmd)
        else:
            if not clientArp[client.workload_name].get(server.ip_address,
                                                       None):
                clientArp[client.workload_name][
                    server.ip_address] = server.mac_address
                arp_cmd = "arp -s " + server.ip_address + " " + server.mac_address
                api.Trigger_AddCommand(clientReq, client.node_name,
                                       client.workload_name, arp_cmd)

    store = tc.GetBundleStore()
    store["server_req"] = serverReq
    store["client_ctxts"] = fuzClients
    store["server_ctxts"] = fuzServers
    store["arp_ctx"] = clientReq

    return api.types.status.SUCCESS
Esempio n. 28
0
def Trigger(tc):
    tc.cmd_cookies = []
    client, server, naples = tc.client, tc.server, tc.naples

    req = api.Trigger_CreateExecuteCommandsRequest(serial = True)
    cmd_cookie = "%s(%s) --> %s(%s)" %\
                (server.workload_name, server.ip_address, client.workload_name, client.ip_address)
    api.Logger.info("Starting TCP aging test from %s" % (cmd_cookie))

    cmd_cookie = "halctl clear session"
    api.Trigger_AddNaplesCommand(req, server.node_name, "/nic/bin/halctl clear session")
    tc.cmd_cookies.append(cmd_cookie)

    #Step 0: Update the timeout in the config object
    if not tc.skip_security_prof:
        update_timeout('tcp-timeout', tc.iterators.timeout)
        timeout = timetoseconds(tc.iterators.timeout)
    else:
        timeout = DEFAULT_TCP_TIMEOUT

    #profilereq = api.Trigger_CreateExecuteCommandsRequest(serial = True)
    #api.Trigger_AddNaplesCommand(profilereq, naples.node_name, "/nic/bin/halctl show nwsec profile --id 11")
    #profcommandresp = api.Trigger(profilereq)
    #cmd = profcommandresp.commands[-1]
    #for command in profcommandresp.commands:
    #    api.PrintCommandResults(command)
    #timeout = get_haltimeout('tcp-timeout', cmd) 
    #tc.config_update_fail = 0
    #if (timeout != timetoseconds(tc.iterators.timeout)):
    #    tc.config_update_fail = 1

    server_port = api.AllocateTcpPort() 
    client_port = api.AllocateTcpPort()

    cmd_cookie = "Send SYN from client"
    api.Trigger_AddCommand(req, client.node_name, client.workload_name,
                           "hping3 -c 1 -s %s -p %s -M 0 -L 0 -S %s" % (client_port, server_port, server.ip_address))
    tc.cmd_cookies.append(cmd_cookie)

    cmd_cookie = "Send SYN, ACK and SEQ from server"
    api.Trigger_AddCommand(req, server.node_name, server.workload_name,
                           "hping3 -c 1 -s %s -p %s -M 0 -A -L 1 -S %s" % (server_port, client_port, client.ip_address))
    tc.cmd_cookies.append(cmd_cookie)

    #Get Seq + Ack
    api.Trigger_AddNaplesCommand(req, naples.node_name,
                "/nic/bin/halctl show session ".format(server_port, server.ip_address))
    tc.cmd_cookies.append("show session")
    api.Trigger_AddNaplesCommand(req, naples.node_name,
                "/nic/bin/halctl show session --dstport {} --dstip {} --yaml".format(server_port, server.ip_address))
    tc.cmd_cookies.append("show session detail")
    trig_resp1 = api.Trigger(req)
    cmd = trig_resp1.commands[-1]
    for command in trig_resp1.commands:
        api.PrintCommandResults(command)
    iseq_num, iack_num, iwindosz, iwinscale, rseq_num, rack_num, rwindo_sz, rwinscale = get_conntrackinfo(cmd)

    #Send ACK
    req2 = api.Trigger_CreateExecuteCommandsRequest(serial = True)
    api.Trigger_AddCommand(req2, client.node_name, client.workload_name,
               "hping3 -c 1 -s {} -p {} -M {}  -L {} --ack {}".format(client_port, server_port, rack_num, rseq_num, server.ip_address))
    tc.cmd_cookies.append("Send ACK") 

    cmd_cookie = "Before moving show session"
    api.Trigger_AddNaplesCommand(req2, naples.node_name, "/nic/bin/halctl show session --dstport {} --dstip {} | grep ESTABLISHED".format(server_port, server.ip_address))
    tc.cmd_cookies.append(cmd_cookie)

     # vm_utils.do_vmotion(tc, tc.wl, tc.new_node)
     vm_utils.do_vmotion(tc, True)
Esempio n. 29
0
def Trigger(tc):
    if tc.args.type == 'local_only':
        pairs = api.GetLocalWorkloadPairs()
    else:
        pairs = api.GetRemoteWorkloadPairs()
    tc.cmd_cookies = []
    server, client = pairs[0]
    naples = server
    if not server.IsNaples():
        naples = client
        if not client.IsNaples():
            return api.types.status.SUCCESS
        else:
            client, server = pairs[0]

    req = api.Trigger_CreateExecuteCommandsRequest(serial=True)
    cmd_cookie = "%s(%s) --> %s(%s)" %\
                (server.workload_name, server.ip_address, client.workload_name, client.ip_address)
    api.Logger.info("Starting Iperf test from %s" % (cmd_cookie))

    cmd_cookie = "halctl clear session"
    api.Trigger_AddNaplesCommand(req, server.node_name,
                                 "/nic/bin/halctl clear session")
    tc.cmd_cookies.append(cmd_cookie)

    server_port = api.AllocateTcpPort()
    basecmd = 'iperf -p %d ' % server_port
    proto = 6
    timeout_str = 'tcp-drop'
    if tc.iterators.proto == 'udp':
        server_port = api.AllocateUdpPort()
        basecmd = 'iperf -u -p %d ' % server_port
        proto = 17
        timeout_str = 'udp-drop'
    #Step 0: Update the timeout in the config object
    update_timeout(timeout_str, tc.iterators.timeout)
    update_sgpolicy(client.ip_address, server.ip_address, tc.iterators.proto,
                    server_port)

    profilereq = api.Trigger_CreateExecuteCommandsRequest(serial=True)
    api.Trigger_AddNaplesCommand(profilereq, naples.node_name,
                                 "/nic/bin/halctl show nwsec profile --id 16")
    profcommandresp = api.Trigger(profilereq)
    cmd = profcommandresp.commands[-1]
    for command in profcommandresp.commands:
        api.PrintCommandResults(command)
    timeout = get_haltimeout(timeout_str, cmd)
    api.Logger.info("Hal timeout %s, tc timeout %s" %
                    (timeout, timetoseconds(tc.iterators.timeout)))
    if (timeout != timetoseconds(tc.iterators.timeout)):
        api.Logger.errror("Found mismatch in HAL and testcase timeout")
        tc.config_update_fail = 1
        return api.types.status.FAILURE

    timeout += GRACE_TIME
    cmd_cookie = "iperf -s"
    api.Trigger_AddCommand(req,
                           server.node_name,
                           server.workload_name,
                           "%s -s -t 300 " % basecmd,
                           background=True)
    tc.cmd_cookies.append(cmd_cookie)

    cmd_cookie = "iperf -c "
    api.Trigger_AddCommand(req, client.node_name, client.workload_name,
                           "%s -c %s " % (basecmd, server.ip_address))
    tc.cmd_cookies.append(cmd_cookie)

    cmd_cookie = "Before aging show session"
    api.Trigger_AddNaplesCommand(
        req, naples.node_name,
        "/nic/bin/halctl show session --ipproto %s | grep %s" %
        (proto, naples.ip_address))
    tc.cmd_cookies.append(cmd_cookie)

    #Get it from the config
    cmd_cookie = "sleep"
    api.Trigger_AddNaplesCommand(req,
                                 naples.node_name,
                                 "sleep %s" % timeout,
                                 timeout=300)
    tc.cmd_cookies.append(cmd_cookie)

    cmd_cookie = "After aging show session"
    api.Trigger_AddNaplesCommand(
        req, naples.node_name,
        "/nic/bin/halctl show session --ipproto %s | grep %s" %
        (proto, naples.ip_address))
    tc.cmd_cookies.append(cmd_cookie)

    trig_resp = api.Trigger(req)
    term_resp = api.Trigger_TerminateAllCommands(trig_resp)

    tc.resp = api.Trigger_AggregateCommandsResponse(trig_resp, term_resp)

    return api.types.status.SUCCESS