Beispiel #1
0
def Setup(tc):
    if api.IsDryrun(): return api.types.status.SUCCESS
    tc.nodes = api.GetWorkloadNodeHostnames()
    tc.node_intfs = {}
    srv,cli = _get_workloads(tc)
    tc.workloads = [srv, cli]

    if getattr(tc.args, 'restart', False):
        ret = api.RestartNodes(tc.nodes)
        if ret != api.types.status.SUCCESS:
            api.Logger.error("Node restart failed")
            return api.types.status.FAILURE


    api.Logger.info("Setting driver features")
    if common.setup_features(tc) != api.types.status.SUCCESS:
        api.Logger.info("Setting driver features :Failed")
        return api.types.status.FAILURE

    api.Logger.info("Setting driver features : Success")
    if getattr(tc.args, 'capture_pcap', False):
        if common.start_pcap_capture(tc) != api.types.status.SUCCESS:
            return api.types.status.FAILURE
    if getattr(tc.args, 'capture_single_pcap', False):
        if common.start_single_pcap_capture(tc) != api.types.status.SUCCESS:
            return api.types.status.FAILURE
    return api.types.status.SUCCESS
Beispiel #2
0
def debug_dump_all_nodes():
    api.Logger.verbose("debug_dump_all_nodes : START")
    nodes = api.GetWorkloadNodeHostnames()
    for node in nodes:
        debug_dump_node_info(node)
    api.Logger.verbose("debug_dump_all_nodes : END")
    return
Beispiel #3
0
def Setup(tc):
    if api.IsDryrun():
        return api.types.status.SUCCESS

    tc.nodes = api.GetWorkloadNodeHostnames()

    tc.workload_pairs = getRemoteWorkloadPairs(tc)

    if len(tc.workload_pairs) == 0:
        api.Logger.error("Skipping Testcase due to no workload pairs.")
        tc.skip = True
        return api.types.status.FAILURE

    for pair in tc.workload_pairs:
        srv = pair[0]
        cli = pair[1]
        tc.workloads = [srv, cli]
        api.Logger.info("Setting up interfaces %s(%s) --> %s(%s)" %
                        (srv.workload_name, srv.ip_address, cli.workload_name,
                         cli.ip_address))
        if common.setup_features(tc) != api.types.status.SUCCESS:
            api.Logger.info("Setting driver features :Failed")
            return api.types.status.FAILURE

    return api.types.status.SUCCESS
Beispiel #4
0
def ServerCmd(port = None, time=None, run_core=None, jsonOut=False, naples=False,
              server_ip=None):
    assert(port)
    nodes = api.GetWorkloadNodeHostnames()
    for node in nodes:
        os = api.GetNodeOs(node)
        break

    if os == "windows" and not naples:
        cmd = [api.WINDOWS_POWERSHELL_CMD + " \"iperf3.exe", "-s", "-p", str(port)]
    else:
        cmd = ["iperf3", "-s", "-p", str(port)]
        if naples:
            cmd = iper3_env + cmd

    if run_core:
        cmd.extend(["-A", str(run_core)])

    if time:
        cmd.extend(["-t", str(time)])

    if jsonOut:
        cmd.append("-J")

    # no periodic output
    cmd.extend(["-i", "0"])

    if os != "windows" or naples:
        return " ".join(cmd)
    else:
        return " ".join(cmd) + "\""
    if server_ip:
        cmd.extend(["-B", server_ip])

    return " ".join(cmd)
Beispiel #5
0
def Setup(tc):
    tc.Nodes = api.GetNaplesHostnames()
    tc.AllNodes = api.GetWorkloadNodeHostnames()
    tc.uuidMap = api.GetNaplesNodeUuidMap()
    tc.new_node = None
    tc.old_node = None
    tc.vm_non_dsc_to_dsc = False
    tc.vm_dsc_to_dsc = False
    tc.move_info = []
    if tc.args.vm_type == 'non_dsc_to_dsc':
        tc.vm_non_dsc_to_dsc = True
    else:
        tc.vm_dsc_to_dsc = True
    tc.num_moves = int(getattr(tc.args, "num_moves", 1))

    getNonNaplesNodes(tc)
    if arping.ArPing(tc) != api.types.status.SUCCESS:
        api.Logger.info("arping failed on setup")
    if ping.TestPing(tc, 'local_only', 'ipv4',
                     64) != api.types.status.SUCCESS or ping.TestPing(
                         tc, 'remote_only', 'ipv4',
                         64) != api.types.status.SUCCESS:
        api.Logger.info("ping test failed on setup")
        return api.types.status.FAILURE

    #Start Fuz
    ret = start_fuz(tc)
    if ret != api.types.status.SUCCESS:
        api.Logger.error("Fuz start failed")
        return api.types.status.FAILURE

    return api.types.status.SUCCESS
Beispiel #6
0
def getNativeWorkloads():
    # Get all available host interfaces
    lst_nodes = api.GetWorkloadNodeHostnames()
    lst_native_if = {}
    mgmt_intf = {}

    # Get host interfaces on all nodes
    for node in lst_nodes:
        lst_native_if[node] = list(api.GetWorkloadNodeHostInterfaces(node))
        # Exclude host side management interface from this test case on naples
        if api.IsNaplesNode(node):
            mgmt_intf[node] = list(
                naples_utils.GetHostInternalMgmtInterfaces(node))

    for node, infs in mgmt_intf.items():
        for inf in infs:
            if inf in lst_native_if[node]:
                lst_native_if[node].remove(inf)

    # Get workloads corresponding to host interfaces
    total_workloads = api.GetWorkloads()
    native_workloads = []
    for w1 in total_workloads:
        for node, infs in lst_native_if.items():
            if w1.interface in infs and w1.node_name == node:
                native_workloads.append(w1)

    return native_workloads
Beispiel #7
0
def start_single_pcap_capture(tc):
    try:
        req = api.Trigger_CreateExecuteCommandsRequest(serial=True)
        nodes = api.GetWorkloadNodeHostnames()
        tc.pcap_cmds = []
        intf = api.GetRemoteWorkloadPairs()[0][0].interface
        tc.pcap_filename = pcap_file_name(intf)
        cmd = cmd_builder.tcpdump_cmd(intf, tc.pcap_filename)
        api.Trigger_AddHostCommand(req, n, cmd, background=True)
        resp = api.Trigger(req)
        for cmd in resp.commands:
            if cmd.handle == None or len(cmd.handle) == 0:
                api.Logger.error("Error starting pcap : %s " % cmd.command)
                api.Logger.error("Std Output : %s " % cmd.stdout)
                api.Logger.error("Std Err :  %s " % cmd.stdout)
                return api.types.status.FAILURE
            api.Logger.info("Success running cmd : %s" % cmd.command)
        tc.pcap_trigger = resp
        return api.types.status.SUCCESS
    except:
        api.Logger.info("failed to start single pcap capture")
        api.Logger.debug(
            "failed to start single pcap capture. error was: {0}".format(
                traceback.format_exc()))
        return api.types.status.SUCCESS
Beispiel #8
0
def Verify(tc):
    nodes = api.GetWorkloadNodeHostnames()
    if api.IsNaplesNode(nodes[0]) and api.IsNaplesNode(nodes[1]):
        return api.types.status.DISABLED
    if tc.resp is None:
        return api.types.status.FAILURE

    file1 = tc.GetLogsDir() + '/tcp_proxy_client.dat'
    file2 = tc.GetLogsDir() + '/tcp_proxy_server.dat'
    if not filecmp.cmp(file1, file2, shallow=False):
        api.Logger.error("Client and server files do not match")
        return api.types.status.FAILURE

    result = api.types.status.SUCCESS
    cookie_idx = 0
    api.Logger.info("NC Results for %s" % (tc.cmd_descr))
    for cmd in tc.resp.commands:
        api.Logger.info("%s" % (tc.cmd_cookies[cookie_idx]))
        api.PrintCommandResults(cmd)
        if cmd.exit_code != 0 and not api.Trigger_IsBackgroundCommand(cmd):
            #TOOD nc seems to be stuck sometimes, have to debug this
            #result = api.types.status.FAILURE
            pass
        cookie_idx += 1
    return result
Beispiel #9
0
def Setup(tc):
    tc.skip = False

    nodes = api.GetWorkloadNodeHostnames()
    tc.naples_node0 = nodes[0]
    tc.naples_node1 = nodes[1]

    return api.types.status.SUCCESS
Beispiel #10
0
def __get_role(workload_peers):
    '''
    Gets client or server role for input workload
    '''
    role = ['client', 'server']
    naples_nodes = api.GetWorkloadNodeHostnames()

    for w in workload_peers.keys():
        idx = naples_nodes.index(w.node_name)
        w.role = role[idx % 2]
Beispiel #11
0
def stop_single_pcap_capture(tc):
    if not getattr(tc, "pcap_trigger", None):
        return api.types.status.SUCCESS
    api.Trigger_TerminateAllCommands(tc.pcap_trigger)
    nodes = api.GetWorkloadNodeHostnames()
    tc_dir = tc.GetLogsDir()
    resp = api.CopyFromHost(n, [tc.pcap_filename], tc_dir)
    if resp == None or resp.api_response.api_status != types_pb2.API_STATUS_OK:
        api.Logger.error("Failed to copy from  to Node: %s" % n)
        return api.types.status.FAILURE
    return api.types.status.SUCCESS
Beispiel #12
0
def Setup(tc):
    tc.nodes = api.GetWorkloadNodeHostnames()

    tc.node_intfs = {}
    for node in tc.nodes:
        #return api.types.status.SUCCESS if api.GetNodeOs(node) in ["esx"] else api.types.status.FAILURE
        tc.node_intfs[node] = naples_workload_api.GetNodeInterface(node)

    ret = interface.ConfigureInterfaces(tc)
    if ret != api.types.status.SUCCESS:
        api.Logger.error("Error in setting up interfaces")
        return api.types.status.FAILURE
    return api.types.status.SUCCESS
Beispiel #13
0
def getNativeWorkloadIntfs(tc):
    tc.host_nodes = api.GetWorkloadNodeHostnames()
    tmp_native_intf_list = {}
    tc.native_intf_list = {}
    tc.mgmt_intf_list = {}

    # Get host interfaces on all nodes
    for node in tc.host_nodes:
        tmp_native_intf_list[node] = list(api.GetWorkloadNodeHostInterfaces(node))
        if api.IsNaplesNode(node):
            tc.mgmt_intf_list[node] = list(naples_host_utils.GetHostInternalMgmtInterfaces(node))

    for node in tc.mgmt_intf_list:
        tc.native_intf_list[node] = list(set(tmp_native_intf_list.get(node))-set(tc.mgmt_intf_list.get(node)))
    return api.types.status.SUCCESS
Beispiel #14
0
def stop_pcap_capture(tc):
    api.Trigger_TerminateAllCommands(tc.pcap_trigger)
    nodes = api.GetWorkloadNodeHostnames()
    tc_dir = tc.GetLogsDir()
    for n, host_intfs in tc.host_intfs.items():
        if len(host_intfs) == 0:
            api.Logger.error("No host interfaces for node :%s" % n)
            return api.types.status.FAILURE
        for intfObj in host_intfs:
            intf = intfObj.Name()
            resp = api.CopyFromHost(n, [pcap_file_name(intf)], tc_dir)
            if resp == None or resp.api_response.api_status != types_pb2.API_STATUS_OK:
                api.Logger.error("Failed to copy from  to Node: %s" % n)
                return api.types.status.FAILURE

    return api.types.status.SUCCESS
Beispiel #15
0
def Setup(tc):

    tc.skip = False
    node_names = api.GetWorkloadNodeHostnames()

    if api.IsNaplesNode(node_names[0]):
        tc.naples_node = node_names[0]
        tc.peer_node = node_names[1]
    elif api.IsNaplesNode(node_names[1]):
        tc.naples_node = node_names[1]
        tc.peer_node = node_names[0]
    else:
        api.Logger.verbose("Skipping as there are no Naples nodes")
        tc.skip = True
        return api.types.status.IGNORED

    tc.on_host = {}

    tc.host_intfs = list(api.GetNaplesHostInterfaces(tc.naples_node))
    for intf in tc.host_intfs:
        tc.on_host[intf] = True

    # Mgmt interface on host for network connection to Naples over PCIE
    tc.host_int_intfs = naples_host_utils.GetHostInternalMgmtInterfaces(
        tc.naples_node)
    for intf in tc.host_int_intfs:
        tc.on_host[intf] = True

    tc.inband_intfs = naples_host_utils.GetNaplesInbandInterfaces(
        tc.naples_node)
    for intf in tc.inband_intfs:
        tc.on_host[intf] = False

    tc.naples_int_mgmt_intfs = naples_host_utils.GetNaplesInternalMgmtInterfaces(
        tc.naples_node)
    for intf in tc.naples_int_mgmt_intfs:
        tc.on_host[intf] = False

    tc.naples_oob_mgmt_intfs = naples_host_utils.GetNaplesOobInterfaces(
        tc.naples_node)
    for intf in tc.naples_oob_mgmt_intfs:
        tc.on_host[intf] = False

    tc.all_intfs = tc.host_intfs + tc.host_int_intfs + tc.inband_intfs + tc.naples_int_mgmt_intfs + tc.naples_oob_mgmt_intfs
    api.Logger.debug("Promiscuous test interfaces: ", tc.all_intfs)

    return api.types.status.SUCCESS
Beispiel #16
0
def Verify(tc):
    nodes = api.GetWorkloadNodeHostnames()
    if api.IsNaplesNode(nodes[0]) and api.IsNaplesNode(nodes[1]):
        return api.types.status.DISABLED
    if tc.resp is None:
        return api.types.status.FAILURE

    result = api.types.status.SUCCESS
    cookie_idx = 0
    api.Logger.info("Iperf Results for %s" % (tc.cmd_descr))
    for cmd in tc.resp.commands:
        api.Logger.info("%s" % (tc.cmd_cookies[cookie_idx]))
        api.PrintCommandResults(cmd)
        if cmd.exit_code != 0 and not api.Trigger_IsBackgroundCommand(cmd):
            result = api.types.status.FAILURE
        cookie_idx += 1
    return result
Beispiel #17
0
def Setup(tc):
    vm_threads = []
    node_list  = []
    node = getattr(tc.args, "node", None)
    if node:
        node_list.append(node)
    else:
        '''
        add all nodes in the topo
        '''
        nodes = api.GetNodes()
        for node in nodes:
            node_list.append(node.Name())
    tc.Nodes    = api.GetNaplesHostnames()
    tc.AllNodes = api.GetWorkloadNodeHostnames()
    tc.uuidMap  = api.GetNaplesNodeUuidMap()
    tc.move_info         = []
    tc.vm_dsc_to_dsc     = True
    tc.num_moves         = 0

    if hasattr(tc.args, "conntrack"):
        tc.detailed = True
    else:
        tc.detailed = False


    getNonNaplesNodes(tc)
    if arping.ArPing(tc) != api.types.status.SUCCESS:
        api.Logger.info("arping failed on setup")
    if ping.TestPing(tc, 'local_only', 'ipv4', 64) != api.types.status.SUCCESS or ping.TestPing(tc, 'remote_only', 'ipv4', 64) != api.types.status.SUCCESS:
        api.Logger.info("ping test failed on setup")
        return api.types.status.FAILURE

    for node in node_list:
        (wls,new_node) = getWorkloadsToRemove(tc, node)
        tc.num_moves = len(wls)
        vm_utils.update_move_info(tc, wls, False, new_node)

    #Start Fuz
    ret = vm_utils.start_fuz(tc)
    if ret != api.types.status.SUCCESS:
        api.Logger.error("Fuz start failed")
        return api.types.status.FAILURE

    return api.types.status.SUCCESS
Beispiel #18
0
 def __mk_testcase_directory(self, newdir):
     Logger.debug("Creating Testcase directory: %s" % newdir)
     command = "mkdir -p %s && chmod 777 %s" % (newdir, newdir)
     req = api.Trigger_CreateAllParallelCommandsRequest()
     for nodename in api.GetWorkloadNodeHostnames():
         api.Trigger_AddHostCommand(req, nodename, command)
     for wl in api.GetWorkloads():
         if api.IsWorkloadRunning(wl.workload_name):
             api.Trigger_AddCommand(req,
                                    wl.node_name,
                                    wl.workload_name,
                                    command,
                                    timeout=60)
     resp = api.Trigger(req)
     if not api.Trigger_IsSuccess(resp):
         Logger.error("Failed to create destination directory %s" % newdir)
         return types.status.FAILURE
     return types.status.SUCCESS
Beispiel #19
0
def Setup(tc):
    tc.Nodes = api.GetNaplesHostnames()
    tc.AllNodes = api.GetWorkloadNodeHostnames()
    tc.uuidMap = api.GetNaplesNodeUuidMap()
    tc.new_node = None
    tc.old_node = None
    tc.vm_non_dsc_to_dsc = False
    tc.vm_dsc_to_dsc = False
    tc.move_info = []
    if tc.args.vm_type == 'non_dsc_to_dsc':
        tc.vm_non_dsc_to_dsc = True
    else:
        tc.vm_dsc_to_dsc = True
    tc.num_moves = int(getattr(tc.args, "num_moves", 1))

    tc.trigger = None
    tc.trigger = getattr(tc.args, "trigger_type", None)
    tc.trigger_on = getattr(tc.args, "trigger_on", 'new')
    '''
    if tc.args.trigger_type  == 'port_flap':
        tc.trigger = 'port_flap'
    elif tc.args.trigger_type == 'mgmt_down':
        tc.trigger = 'mgmt_down'
    elif tc.args.trigger_type == 'delete_ep':
        tc.trigger = 'delete_ep'
    '''

    getNonNaplesNodes(tc)
    #Start Fuz

    if arping.ArPing(tc) != api.types.status.SUCCESS:
        api.Logger.info("arping failed on setup")
    tc1 = ping.TestPing(tc, 'local_only', 'ipv4', 64)
    tc2 = ping.TestPing(tc, 'remote_only', 'ipv4', 64)
    if tc1 != api.types.status.SUCCESS or tc2 != api.types.status.SUCCESS:
        api.Logger.info("ping test failed on setup")
        return api.types.status.FAILURE

    ret = start_fuz(tc, "20s", "20")
    if ret != api.types.status.SUCCESS:
        api.Logger.error("Fuz start failed")
        return api.types.status.FAILURE
    stop_fuz(tc)
    return api.types.status.SUCCESS
Beispiel #20
0
def Main(step):
    nodes = api.GetWorkloadNodeHostnames()
    req = api.Trigger_CreateExecuteCommandsRequest(serial=False)
    for n in nodes:
        intfs = api.GetNaplesHostInterfaces(n)
        for i in intfs:
            api.Logger.info("Enable l2-fwd-offload on intf %s" % i)
            api.Trigger_AddHostCommand(req, n,
                                       "ethtool -K %s l2-fwd-offload on" % i)
    resp = api.Trigger(req)
    if resp == None:
        return api.types.status.FAILURE
    else:
        for cmd in resp.commands:
            if cmd.exit_code != 0:
                api.Logger.info("Enable l2-fwd-offload FAILED!")
                api.Logger.info(cmd.stderr)
                return api.types.status.FAILURE
        return api.types.status.SUCCESS
Beispiel #21
0
def Setup(tc):
    api.Logger.verbose("MTU filter : Setup")
    tc.skip = False
    result = api.types.status.SUCCESS
    global __OS_TYPE
    global __MIN_MTU
    global __MAX_MTU

    tc.naples_node, res = naples_host_utils.GetNaplesNodeName()
    if res is False:
        tc.skip = True

    if tc.skip:
        api.Logger.error("MTU filter : Setup -> No Naples Topology - So skipping the TC")
        return api.types.status.IGNORED

    if getNativeWorkloadIntfs(tc) != api.types.status.SUCCESS:
        api.Logger.error("MTU filter : Setup -> Failure in retrieving Native Workload interfaces")
        return api.types.status.FAILURE

    """
      # In Intel cards, post MTU change, need to wait for few sec before pinging
      # instead, set max MTU on peer node
    """
    result = initPeerNode(tc, tc.naples_node)
    nodes = api.GetWorkloadNodeHostnames()
    for node in nodes:
        __OS_TYPE = api.GetNodeOs(node)
        if __OS_TYPE == "freebsd":
            __MIN_MTU = __MIN_MTU_FREEBSD
        elif __OS_TYPE == "windows":
            __MIN_MTU = __MIN_MTU_WINDOWS_IPv4
            __MAX_MTU = __MAX_MTU_WINDOWS
        break

    tc.new_mtu = getMTUconfigs(tc)
    api.Logger.info("MTU filter : new MTU - ", tc.new_mtu)

    api.Logger.info("MTU filter : Setup final result - ", result)
    debug_utils.collect_showtech(result)
    return result
Beispiel #22
0
def Main(tc):
    nodes = api.GetWorkloadNodeHostnames()
    for node in nodes:
        if api.GetNodeOs(node) == "esx":
            continue

        for device in api.GetDeviceNames(node):
            api.Logger.debug("Creating NodeInterface for node: %s device: %s" %
                             (node, device))
            node_if_info = GetNodeInterface(node, device)
            api.Logger.debug("Adding MgmtWorkloads for node: %s device: %s" %
                             (node, device))
            ret = AddMgmtWorkloads(node_if_info)
            if ret != api.types.status.SUCCESS:
                api.Logger.debug("Failed to add MgmtWorkloads for node: %s" %
                                 node)
                return api.types.status.FAILURE
            if api.IsNaplesNode(node):
                api.Logger.debug("Adding NaplesWorkloads for node: %s" % node)
                AddNaplesWorkloads(node_if_info)
    return api.types.status.SUCCESS
Beispiel #23
0
def Setup(tc):
    api.SetTestsuiteAttr("driver_path", api.GetHostToolsDir() + '/')
    tc.iota_path = api.GetTestsuiteAttr("driver_path")

    tc.nodes = api.GetNaplesHostnames()
    tc.other_nodes = api.GetWorkloadNodeHostnames()
    tc.os = api.GetNodeOs(tc.nodes[0])

    platform_gendir = api.GetTopDir()+'/platform/gen/'
    if tc.os == host.OS_TYPE_LINUX:
        tc.pkgname = 'drivers-linux.tar.xz'
        tc.showgid = 'drivers-linux/show_gid'
    else:
        tc.pkgname = 'drivers-freebsd.tar.xz'
        tc.showgid = 'drivers-freebsd/show_gid'

    # Copy RDMA driver to naples nodes
    for n in tc.nodes:
        api.Logger.info("Copying {pkg} to {node}"
                .format(pkg=tc.pkgname, node=n))
        resp = api.CopyToHost(n, [platform_gendir + tc.pkgname])
        if not api.IsApiResponseOk(resp):
            api.Logger.error("Failed to copy {pkg} to {node}: {resp}"
                    .format(pkg=tc.pkgname, node=n, resp=resp))
            return api.types.status.FAILURE

    # Copy show_gid to other nodes
    for n in tc.other_nodes:
        if n in tc.nodes:
            continue
        api.Logger.info("Copying show_gid to {node}"
                .format(node=n))
        resp = api.CopyToHost(n, [platform_gendir + tc.showgid])
        if not api.IsApiResponseOk(resp):
            api.Logger.error("Failed to copy show_gid to {node}: {resp}"
                    .format(node=n, resp=resp))
            return api.types.status.FAILURE

    return api.types.status.SUCCESS
Beispiel #24
0
def Trigger(tc):
    print(
        "\t\t\t########################################################################"
    )
    print(
        "\t\t\t#            TRANSPARENT, ENFORCE => INSERTION, ENFORCE              #"
    )
    print(
        "\t\t\t########################################################################"
    )

    # Delete workloads
    wl_api.DeleteWorkloads()

    # Reset the config object store
    netagent_api.ResetConfigs()

    # Change mode from unified => hostpin
    api.SetConfigNicMode("hostpin")
    for node_name in api.GetWorkloadNodeHostnames():
        api.SetTestbedNicMode("hostpin", node_name=node_name)

    # Change mode from TRANSPARENT, ENFORCED => INSERTION, ENFORCE
    ret = netagent_api.switch_profile(fwd_mode="INSERTION",
                                      policy_mode="ENFORCED")
    if ret != api.types.status.SUCCESS:
        api.Logger.error("Failed to switch profile")
        return ret

    # HW push
    ret = wl_api.Main(None)
    if ret != api.types.status.SUCCESS:
        api.Logger.error("Failed to push hw config")
        return ret

    api.Logger.info(
        "Successfully changed the mode TRANSPARENT, ENFORCED => INSERTION, ENFORCE"
    )
    return api.types.status.SUCCESS
Beispiel #25
0
def start_pcap_capture(tc):
    req = api.Trigger_CreateExecuteCommandsRequest(serial=True)
    nodes = api.GetWorkloadNodeHostnames()
    tc.pcap_cmds = []
    for n, host_intfs in tc.host_intfs.items():
        if len(host_intfs) == 0:
            api.Logger.error("No host interfaces for node :%s" % n)
            return api.types.status.FAILURE
        for intfObj in host_intfs:
            intf = intfObj.Name()
            cmd = cmd_builder.tcpdump_cmd(intf, pcap_file_name(intf))
            api.Trigger_AddHostCommand(req, n, cmd, background=True)

    resp = api.Trigger(req)
    for cmd in resp.commands:
        if cmd.handle == None or len(cmd.handle) == 0:
            api.Logger.error("Error starting pcap : %s " % cmd.command)
            api.Logger.error("Std Output : %s " % cmd.stdout)
            api.Logger.error("Std Err :  %s " % cmd.stdout)
            return api.types.status.FAILURE
        api.Logger.info("Success running cmd : %s" % cmd.command)
    tc.pcap_trigger = resp
    return api.types.status.SUCCESS
Beispiel #26
0
def Setup(tc):
    host1 = None
    host2 = None
    api.Logger.info("Nodes: %s" % api.GetWorkloadNodeHostnames())
    for _node in api.GetWorkloadNodeHostnames():
        if not host1 and api.IsNaplesNode(_node) and api.GetNodeOs(
                _node) == "linux":
            host1 = _node
        else:
            host2 = _node

    if not host1:
        api.Logger.error("Unable to find a Naples node with linux os")
        return api.types.status.ERROR
    if not host2:
        api.Logger.error("Unable to find a node with linux os")
        return api.types.status.ERROR

    tc.host1 = host1
    tc.host2 = host2
    tc.pf_1 = api.GetWorkloadNodeHostInterfaces(tc.host1)[0]
    tc.pf_2 = api.GetWorkloadNodeHostInterfaces(tc.host2)[0]

    api.Logger.info("Host interface pair[%s, %s]" % (tc.pf_1, tc.pf_2))

    tc.num_vfs = GetSupportedVFs(tc.host1, tc.pf_1)
    api.Logger.info("Host %s PF %s supports %d VFs" %
                    (tc.host1, tc.pf_1, tc.num_vfs))

    if tc.num_vfs == 0:
        api.Logger.warn(
            "Max supported VFs on host %s is 0, expected non-zero" % host)
        return api.types.status.ERROR

    if CreateVFs(tc.host1, tc.pf_1, tc.num_vfs) != api.types.status.SUCCESS:
        return api.types.status.ERROR

    tc.vfid = 0
    tc.vf_intf = GetVFName(tc.host1, tc.pf_1, tc.vfid)
    tc.remote_intf = tc.pf_2
    # Set trust on to set the mac addr
    if SetVFTrust(tc.host1, tc.pf_1, tc.vfid,
                  "on") != api.types.status.SUCCESS:
        return api.types.status.FAILURE
    # Assign mac addr to VF
    if SetVFNdevMac(host1, tc.pf_1, tc.vfid,
                    "00:22:44:66:88:a1") != api.types.status.SUCCESS:
        return api.types.status.ERROR

    # Assign Ips to the vf interface and corresponding PF on the remote node.
    tc.vf_ip = "30.0.0.1"
    tc.remote_ip = "30.0.0.2"

    if SetIp(host1, tc.vf_intf, tc.vf_ip, 24) != api.types.status.SUCCESS:
        return api.types.status.ERROR
    if SetIp(host2, tc.remote_intf, tc.remote_ip,
             24) != api.types.status.SUCCESS:
        return api.types.status.ERROR
    if SetVFNdevState(tc.host1, tc.pf_1, tc.vfid,
                      "up") != api.types.status.SUCCESS:
        return api.types.status.ERROR
    if ping_check(tc.host1, tc.vf_intf, tc.remote_ip,
                  retries=5) != api.types.status.SUCCESS:
        api.Logger.error("Unable to ping the remote interface")
        return api.types.status.ERROR
    return api.types.status.SUCCESS
Beispiel #27
0
def Setup(tc):

    tc.skip = False
    node_names = api.GetWorkloadNodeHostnames()

    if api.IsNaplesNode(node_names[0]):
        tc.naples_node = node_names[0]
        tc.peer_node = node_names[1]
    elif api.IsNaplesNode(node_names[1]):
        tc.naples_node = node_names[1]
        tc.peer_node = node_names[0]
    else:
        api.Logger.verbose("Skipping as there are no Naples nodes")
        tc.skip = True
        return api.types.status.IGNORED

    tc.expect_pkt = {}
    tc.on_host = {}

    tc.host_intfs = list(api.GetNaplesHostInterfaces(tc.naples_node))
    # UUC Packets from uplink will reach host interface while promiscuous
    for intf in tc.host_intfs:
        tc.expect_pkt[intf] = True
        tc.on_host[intf] = True

    # Mgmt interface on host for network connection to Naples over PCIE
    tc.host_int_intfs = naples_host_utils.GetHostInternalMgmtInterfaces(
        tc.naples_node)
    for intf in tc.host_int_intfs:
        # Host internal management should not receive packets from uplink regardless of its promiscuity state
        tc.expect_pkt[intf] = False
        tc.on_host[intf] = True

    tc.inband_intfs = naples_host_utils.GetNaplesInbandInterfaces(
        tc.naples_node)
    # UUC Packets from uplink will reach inabnd interface while promiscuous
    for intf in tc.inband_intfs:
        tc.expect_pkt[intf] = True
        tc.on_host[intf] = False

    tc.naples_int_mgmt_intfs = naples_host_utils.GetNaplesInternalMgmtInterfaces(
        tc.naples_node)
    # Packets from uplink should not reach naples internal managment interfaces [int_mnic0] regardless of its promiscuity state
    for intf in tc.naples_int_mgmt_intfs:
        tc.expect_pkt[intf] = False
        tc.on_host[intf] = False

    tc.naples_oob_mgmt_intfs = naples_host_utils.GetNaplesOobInterfaces(
        tc.naples_node)
    # Packets from uplink should not reach naples oob managment interfaces [oob_mnic0] regardless of its promiscuity state
    for intf in tc.naples_oob_mgmt_intfs:
        tc.expect_pkt[intf] = False
        tc.on_host[intf] = False

    tc.all_intfs = tc.host_intfs + tc.host_int_intfs + tc.inband_intfs + tc.naples_int_mgmt_intfs + tc.naples_oob_mgmt_intfs
    api.Logger.info("Promiscuous test interfaces: ", tc.all_intfs)

    workloads = api.GetWorkloads()
    tc.peer_workloads = []

    # List of 'default vlan' workloads on peer node
    for workload in workloads:
        if workload.encap_vlan == 0 and workload.node_name == tc.peer_node:
            tc.peer_workloads.append(workload)

    # Random IP address within workload[0] IP address domain
    tc.target_IP = (ipaddress.ip_address(tc.peer_workloads[0].ip_address) +
                    2).__str__()

    #TODO: Generate a random MAC instead
    tc.random_mac = "00:0f:b7:aa:bb:cc"

    api.Logger.info("Random_Ip = %s Random_MAC = %s " %
                    (tc.target_IP, tc.random_mac))

    host_utils.DeleteARP(tc.peer_node, tc.peer_workloads[0].interface,
                         tc.target_IP)
    if host_utils.AddStaticARP(tc.peer_node, tc.peer_workloads[0].interface,
                               tc.target_IP,
                               tc.random_mac) != api.types.status.SUCCESS:
        api.Logger.error("Failed to add Static ARP entry on %s" %
                         (tc.peer_node))
        return api.types.status.FAILURE
    return api.types.status.SUCCESS
Beispiel #28
0
def Setup(tc):

    tc.skip = False
    node_names = api.GetWorkloadNodeHostnames()

    if api.IsNaplesNode(node_names[0]):
        tc.naples_node = node_names[0]
        tc.peer_node = node_names[1]
    elif api.IsNaplesNode(node_names[1]):
        tc.naples_node = node_names[1]
        tc.peer_node = node_names[0]
    else:
        api.Logger.verbose("Skipping as there are no Naples nodes")
        tc.skip = True
        return api.types.status.IGNORED

    if tc.args.mode != "promiscuous" and tc.args.mode != "non-promiscuous":
        api.Logger.error("Unknown mode '%s'. Skipping testcase" %
                         (tc.args.mode))
        tc.skip = True
        return api.types.status.IGNORED

    tc.on_host = {}

    tc.host_intfs = list(api.GetNaplesHostInterfaces(tc.naples_node))
    for intf in tc.host_intfs:
        tc.on_host[intf] = True

    # Mgmt interface on host for network connection to Naples over PCIE (Subset of tc.host_intfs)
    tc.host_int_intfs = naples_host_utils.GetHostInternalMgmtInterfaces(
        tc.naples_node)
    for intf in tc.host_int_intfs:
        tc.on_host[intf] = True

    tc.inband_intfs = naples_host_utils.GetNaplesInbandInterfaces(
        tc.naples_node)
    for intf in tc.inband_intfs:
        tc.on_host[intf] = False

    tc.naples_int_mgmt_intfs = naples_host_utils.GetNaplesInternalMgmtInterfaces(
        tc.naples_node)
    for intf in tc.naples_int_mgmt_intfs:
        tc.on_host[intf] = False

    tc.naples_oob_mgmt_intfs = naples_host_utils.GetNaplesOobInterfaces(
        tc.naples_node)
    for intf in tc.naples_oob_mgmt_intfs:
        tc.on_host[intf] = False

    tc.all_intfs = tc.host_intfs + tc.host_int_intfs + tc.inband_intfs + tc.naples_int_mgmt_intfs + tc.naples_oob_mgmt_intfs

    workloads = api.GetWorkloads()
    tc.peer_workloads = []

    # List of 'default vlan' workloads on peer node
    for workload in workloads:
        if workload.encap_vlan == 0 and workload.node_name == tc.peer_node:
            tc.peer_workloads.append(workload)

    # Random IP address within workload[0] IP address domain
    tc.target_IP = (ipaddress.ip_address(tc.peer_workloads[0].ip_address) +
                    4).__str__()

    return api.types.status.SUCCESS
Beispiel #29
0
def Trigger(tc):
    tc.cmd_cookies = []
    tc.cmd_cookies1 = []
    tc.cmd_cookies2 = []

    nodes = api.GetWorkloadNodeHostnames()
    push_node_0 = [nodes[0]]
    push_node_1 = [nodes[1]]

    encrypt_objects = netagent_cfg_api.QueryConfigs(kind='IPSecSAEncrypt')
    decrypt_objects = netagent_cfg_api.QueryConfigs(kind='IPSecSADecrypt')
    policy_objects = netagent_cfg_api.QueryConfigs(kind='IPSecPolicy')

    # Configure IPsec on Node 1

    if api.IsNaplesNode(nodes[0]):

        if len(encrypt_objects) == 0:

            newObjects = netagent_cfg_api.AddOneConfig(
                api.GetTopologyDirectory() +
                "/ipsec/ipsec_encryption_node1.json")
            if len(newObjects) == 0:
                api.Logger.error(
                    "Adding new objects to store failed for ipsec_encryption_node1.json"
                )
                return api.types.status.FAILURE

            ret = netagent_cfg_api.PushConfigObjects(newObjects,
                                                     node_names=push_node_0,
                                                     ignore_error=True)
            if ret != api.types.status.SUCCESS:
                api.Logger.error(
                    "Unable to push ipsec-encryption objects to node %s" %
                    nodes[0])
                return api.types.status.FAILURE

            get_config_objects = netagent_cfg_api.GetConfigObjects(newObjects)
            if len(get_config_objects) == 0:
                api.Logger.error("Unable to fetch newly pushed objects")
                return api.types.status.FAILURE

        if len(decrypt_objects) == 0:

            newObjects = netagent_cfg_api.AddOneConfig(
                api.GetTopologyDirectory() +
                "/ipsec/ipsec_decryption_node1.json")
            if len(newObjects) == 0:
                api.Logger.error(
                    "Adding new objects to store failed for ipsec_decryption_node1.json"
                )
                return api.types.status.FAILURE

            ret = netagent_cfg_api.PushConfigObjects(newObjects,
                                                     node_names=push_node_0,
                                                     ignore_error=True)
            if ret != api.types.status.SUCCESS:
                api.Logger.error(
                    "Unable to push ipsec-encryption objects to node %s" %
                    nodes[0])
                return api.types.status.FAILURE

            get_config_objects = netagent_cfg_api.GetConfigObjects(newObjects)
            if len(get_config_objects) == 0:
                api.Logger.error("Unable to fetch newly pushed objects")
                return api.types.status.FAILURE

        if len(policy_objects) == 0:

            newObjects = netagent_cfg_api.AddOneConfig(
                api.GetTopologyDirectory() +
                "/ipsec/ipsec_policies_node1.json")
            if len(newObjects) == 0:
                api.Logger.error(
                    "Adding new objects to store failed for ipsec_policies_node1.json"
                )
                return api.types.status.FAILURE

            ret = netagent_cfg_api.PushConfigObjects(newObjects,
                                                     node_names=push_node_0,
                                                     ignore_error=True)
            if ret != api.types.status.SUCCESS:
                api.Logger.error(
                    "Unable to push ipsec-policy objects to node %s" %
                    nodes[0])
                return api.types.status.FAILURE

            get_config_objects = netagent_cfg_api.GetConfigObjects(newObjects)
            if len(get_config_objects) == 0:
                api.Logger.error("Unable to fetch newly pushed objects")
                return api.types.status.FAILURE

    else:
        workloads = api.GetWorkloads(nodes[0])
        w1 = workloads[0]

        req1 = api.Trigger_CreateExecuteCommandsRequest(serial=True)

        api.Trigger_AddCommand(req1, w1.node_name, w1.workload_name,
                               "sudo ip xfrm state flush")

        api.Trigger_AddCommand(req1, w1.node_name, w1.workload_name,
                               "sudo ip xfrm policy flush")

        api.Trigger_AddCommand(
            req1, w1.node_name, w1.workload_name,
            "sudo ip xfrm policy add src 192.168.100.103/32 dst 192.168.100.101/32 proto tcp dport %s dir in tmpl src 192.168.100.103 dst 192.168.100.101 proto esp mode tunnel"
            % (tc.iterators.port))

        api.Trigger_AddCommand(
            req1, w1.node_name, w1.workload_name,
            "sudo ip xfrm policy add src 192.168.100.103/32 dst 192.168.100.101/32 proto tcp dport %s dir fwd tmpl src 192.168.100.103 dst 192.168.100.101 proto esp mode tunnel"
            % (tc.iterators.port))

        api.Trigger_AddCommand(
            req1, w1.node_name, w1.workload_name,
            "sudo ip xfrm policy add src 192.168.100.101/32 dst 192.168.100.103/32 proto tcp sport %s dir out tmpl src 192.168.100.101 dst 192.168.100.103 proto esp mode tunnel"
            % (tc.iterators.port))

        api.Trigger_AddCommand(
            req1, w1.node_name, w1.workload_name,
            "sudo ip xfrm state add src 192.168.100.103 dst 192.168.100.101 proto esp spi 0x01 mode tunnel aead 'rfc4106(gcm(aes))' 0x414141414141414141414141414141414141414141414141414141414141414100000000 128 sel src 192.168.100.103/32 dst 192.168.100.101/32"
        )

        api.Trigger_AddCommand(
            req1, w1.node_name, w1.workload_name,
            "sudo ip xfrm state add src 192.168.100.101 dst 192.168.100.103 proto esp spi 0x01 mode tunnel aead 'rfc4106(gcm(aes))' 0x414141414141414141414141414141414141414141414141414141414141414100000000 128 sel src 192.168.100.101/32 dst 192.168.100.103/32"
        )

        api.Trigger_AddCommand(
            req1, w1.node_name, w1.workload_name,
            "sudo ip xfrm policy add src 192.168.100.103/32 dst 192.168.100.101/32 proto udp dport %s dir in tmpl src 192.168.100.103 dst 192.168.100.101 proto esp mode tunnel"
            % (tc.iterators.port))

        api.Trigger_AddCommand(
            req1, w1.node_name, w1.workload_name,
            "sudo ip xfrm policy add src 192.168.100.103/32 dst 192.168.100.101/32 proto udp dport %s dir fwd tmpl src 192.168.100.103 dst 192.168.100.101 proto esp mode tunnel"
            % (tc.iterators.port))

        api.Trigger_AddCommand(
            req1, w1.node_name, w1.workload_name,
            "sudo ip xfrm policy add src 192.168.100.101/32 dst 192.168.100.103/32 proto udp sport %s dir out tmpl src 192.168.100.101 dst 192.168.100.103 proto esp mode tunnel"
            % (tc.iterators.port))

        api.Trigger_AddCommand(req1, w1.node_name, w1.workload_name,
                               "sudo ip xfrm state list")

        api.Trigger_AddCommand(req1, w1.node_name, w1.workload_name,
                               "sudo ip xfrm policy list")

        trig_resp1 = api.Trigger(req1)
        term_resp1 = api.Trigger_TerminateAllCommands(trig_resp1)

    # Configure IPsec on Node 2

    if api.IsNaplesNode(nodes[1]):

        if len(encrypt_objects) == 0:

            newObjects = netagent_cfg_api.AddOneConfig(
                api.GetTopologyDirectory() +
                "/ipsec/ipsec_encryption_node2.json")
            if len(newObjects) == 0:
                api.Logger.error(
                    "Adding new objects to store failed for ipsec_encryption_node2.json"
                )
                return api.types.status.FAILURE

            ret = netagent_cfg_api.PushConfigObjects(newObjects,
                                                     node_names=push_node_1,
                                                     ignore_error=True)
            if ret != api.types.status.SUCCESS:
                api.Logger.error(
                    "Unable to push ipsec-encryption objects to node %s" %
                    nodes[1])
                return api.types.status.FAILURE

            get_config_objects = netagent_cfg_api.GetConfigObjects(newObjects)
            if len(get_config_objects) == 0:
                api.Logger.error("Unable to fetch newly pushed objects")
                #return api.types.status.FAILURE

        if len(decrypt_objects) == 0:

            newObjects = netagent_cfg_api.AddOneConfig(
                api.GetTopologyDirectory() +
                "/ipsec/ipsec_decryption_node2.json")
            if len(newObjects) == 0:
                api.Logger.error(
                    "Adding new objects to store failed for ipsec_decryption_node2.json"
                )
                return api.types.status.FAILURE

            ret = netagent_cfg_api.PushConfigObjects(newObjects,
                                                     node_names=push_node_1,
                                                     ignore_error=True)
            if ret != api.types.status.SUCCESS:
                api.Logger.error(
                    "Unable to push ipsec-encryption objects to node %s" %
                    nodes[1])
                return api.types.status.FAILURE

            get_config_objects = netagent_cfg_api.GetConfigObjects(newObjects)
            if len(get_config_objects) == 0:
                api.Logger.error("Unable to fetch newly pushed objects")
                #return api.types.status.FAILURE

        if len(policy_objects) == 0:

            newObjects = netagent_cfg_api.AddOneConfig(
                api.GetTopologyDirectory() +
                "/ipsec/ipsec_policies_node2.json")
            if len(newObjects) == 0:
                api.Logger.error(
                    "Adding new objects to store failed for ipsec_policies_node2.json"
                )
                return api.types.status.FAILURE

            ret = netagent_cfg_api.PushConfigObjects(newObjects,
                                                     node_names=push_node_1,
                                                     ignore_error=True)
            if ret != api.types.status.SUCCESS:
                api.Logger.error(
                    "Unable to push ipsec-encryption objects to node %s" %
                    nodes[1])
                return api.types.status.FAILURE

            get_config_objects = netagent_cfg_api.GetConfigObjects(newObjects)
            if len(get_config_objects) == 0:
                api.Logger.error("Unable to fetch newly pushed objects")
                #return api.types.status.FAILURE

    else:
        workloads = api.GetWorkloads(nodes[1])
        w2 = workloads[0]

        req2 = api.Trigger_CreateExecuteCommandsRequest(serial=True)

        api.Trigger_AddCommand(req2, w2.node_name, w2.workload_name,
                               "sudo ip xfrm state flush")

        api.Trigger_AddCommand(req2, w2.node_name, w2.workload_name,
                               "sudo ip xfrm policy flush")

        api.Trigger_AddCommand(
            req2, w2.node_name, w2.workload_name,
            "sudo ip xfrm policy add src 192.168.100.101/32 dst 192.168.100.103/32 proto tcp dport %s dir in tmpl src 192.168.100.101 dst 192.168.100.103 proto esp mode tunnel"
            % (tc.iterators.port))

        api.Trigger_AddCommand(
            req2, w2.node_name, w2.workload_name,
            "sudo ip xfrm policy add src 192.168.100.101/32 dst 192.168.100.103/32 proto tcp dport %s dir fwd tmpl src 192.168.100.101 dst 192.168.100.103 proto esp mode tunnel"
            % (tc.iterators.port))

        api.Trigger_AddCommand(
            req2, w2.node_name, w2.workload_name,
            "sudo ip xfrm policy add src 192.168.100.103/32 dst 192.168.100.101/32 proto tcp sport %s dir out tmpl src 192.168.100.103 dst 192.168.100.101 proto esp mode tunnel"
            % (tc.iterators.port))

        api.Trigger_AddCommand(
            req2, w2.node_name, w2.workload_name,
            "sudo ip xfrm state add src 192.168.100.101 dst 192.168.100.103 proto esp spi 0x01 mode tunnel aead 'rfc4106(gcm(aes))' 0x414141414141414141414141414141414141414141414141414141414141414100000000 128 sel src 192.168.100.101/32 dst 192.168.100.103/32"
        )

        api.Trigger_AddCommand(
            req2, w2.node_name, w2.workload_name,
            "sudo ip xfrm state add src 192.168.100.103 dst 192.168.100.101 proto esp spi 0x01 mode tunnel aead 'rfc4106(gcm(aes))' 0x414141414141414141414141414141414141414141414141414141414141414100000000 128 sel src 192.168.100.103/32 dst 192.168.100.101/32"
        )

        api.Trigger_AddCommand(
            req2, w2.node_name, w2.workload_name,
            "sudo ip xfrm policy add src 192.168.100.101/32 dst 192.168.100.103/32 proto udp dport %s dir in tmpl src 192.168.100.101 dst 192.168.100.103 proto esp mode tunnel"
            % (tc.iterators.port))

        api.Trigger_AddCommand(
            req2, w2.node_name, w2.workload_name,
            "sudo ip xfrm policy add src 192.168.100.101/32 dst 192.168.100.103/32 proto udp dport %s dir fwd tmpl src 192.168.100.101 dst 192.168.100.103 proto esp mode tunnel"
            % (tc.iterators.port))

        api.Trigger_AddCommand(
            req2, w2.node_name, w2.workload_name,
            "sudo ip xfrm policy add src 192.168.100.103/32 dst 192.168.100.101/32 proto udp sport %s dir out tmpl src 192.168.100.103 dst 192.168.100.101 proto esp mode tunnel"
            % (tc.iterators.port))

        api.Trigger_AddCommand(req2, w2.node_name, w2.workload_name,
                               "sudo ip xfrm state list")

        api.Trigger_AddCommand(req2, w2.node_name, w2.workload_name,
                               "sudo ip xfrm policy list")

        trig_resp2 = api.Trigger(req2)
        term_resp2 = api.Trigger_TerminateAllCommands(trig_resp2)

    workloads = api.GetWorkloads(nodes[0])
    w1 = workloads[0]
    workloads = api.GetWorkloads(nodes[1])
    w2 = workloads[0]
    bypass_test = 0

    if w1.IsNaples() and w2.IsNaples():
        api.Logger.info(
            "Both workloads are Naples, %s is nc client, %s is nc server, bypassing test"
            % (w1.node_name, w2.node_name))
        nc_client_wl = w1
        nc_server_wl = w2
        bypass_test = 1
    elif w1.IsNaples():
        api.Logger.info("%s is Naples and nc client, %s is nc server" %
                        (w1.node_name, w2.node_name))
        nc_client_wl = w1
        nc_server_wl = w2
    elif w2.IsNaples():
        api.Logger.info("%s is Naples and nc client, %s is nc server" %
                        (w2.node_name, w1.node_name))
        nc_client_wl = w2
        nc_server_wl = w1

    req = api.Trigger_CreateExecuteCommandsRequest(serial=True)

    tc.cmd_descr = "Server: %s(%s) <--> Client: %s(%s) on %s port %s" %\
                   (nc_server_wl.workload_name, nc_server_wl.ip_address, nc_client_wl.workload_name, nc_client_wl.ip_address, tc.iterators.protocol, tc.iterators.port)

    api.Logger.info("Starting NC test over IPSec from %s" % (tc.cmd_descr))

    if bypass_test == 0:
        cmd_cookie = "Creating test file on %s" % (nc_client_wl.workload_name)
        api.Trigger_AddCommand(
            req, nc_client_wl.node_name, nc_client_wl.workload_name,
            "base64 /dev/urandom | head -1000 > ipsec_client.dat")
        tc.cmd_cookies.append(cmd_cookie)

        cmd_cookie = "Setting MTU to smaller value on %s" % (
            nc_client_wl.workload_name)
        api.Trigger_AddCommand(req, nc_client_wl.node_name,
                               nc_client_wl.workload_name,
                               "ifconfig %s mtu 1048" % nc_client_wl.interface)
        tc.cmd_cookies.append(cmd_cookie)

        cmd_cookie = "Running nc server on %s" % (nc_server_wl.workload_name)
        if tc.iterators.protocol == "tcp":
            api.Trigger_AddCommand(req,
                                   nc_server_wl.node_name,
                                   nc_server_wl.workload_name,
                                   "nc -l %s > ipsec_server.dat" %
                                   (tc.iterators.port),
                                   background=True)
        else:
            api.Trigger_AddCommand(req,
                                   nc_server_wl.node_name,
                                   nc_server_wl.workload_name,
                                   "nc --udp -l %s > ipsec_server.dat" %
                                   (tc.iterators.port),
                                   background=True)
        tc.cmd_cookies.append(cmd_cookie)

        cmd_cookie = "Running nc client on %s" % (nc_client_wl.workload_name)
        if tc.iterators.protocol == "tcp":
            api.Trigger_AddCommand(
                req, nc_client_wl.node_name, nc_client_wl.workload_name,
                "nc %s %s < ipsec_client.dat" %
                (nc_server_wl.ip_address, tc.iterators.port))
        else:
            api.Trigger_AddCommand(
                req, nc_client_wl.node_name, nc_client_wl.workload_name,
                "nc --udp %s %s < ipsec_client.dat" %
                (nc_server_wl.ip_address, tc.iterators.port))
        tc.cmd_cookies.append(cmd_cookie)
    else:
        cmd_cookie = "Creating dummy file on %s" % (nc_client_wl.workload_name)
        api.Trigger_AddCommand(
            req, nc_client_wl.node_name, nc_client_wl.workload_name,
            "rm -f ipsec_client.dat ; touch ipsec_client.dat")
        tc.cmd_cookies.append(cmd_cookie)

        cmd_cookie = "Creating dummy file on %s" % (nc_server_wl.workload_name)
        api.Trigger_AddCommand(
            req, nc_server_wl.node_name, nc_server_wl.workload_name,
            "rm -f ipsec_server.dat ; touch ipsec_server.dat")
        tc.cmd_cookies.append(cmd_cookie)

    if nc_client_wl.IsNaples():
        cmd_cookie = "IPSec state on %s AFTER running nc test" % (
            nc_client_wl.node_name)
        api.Trigger_AddNaplesCommand(
            req, nc_client_wl.node_name,
            "/nic/bin/halctl show ipsec-global-stats")
        tc.cmd_cookies.append(cmd_cookie)
    else:
        cmd_cookie = "IPSec state on %s AFTER running nc test" % (
            nc_client_wl.node_name)
        api.Trigger_AddCommand(req, nc_client_wl.node_name,
                               nc_client_wl.workload_name,
                               "sudo ip xfrm policy show")
        tc.cmd_cookies.append(cmd_cookie)

    if nc_server_wl.IsNaples():
        cmd_cookie = "IPSec state on %s AFTER running nc test" % (
            nc_server_wl.node_name)
        api.Trigger_AddNaplesCommand(
            req, nc_server_wl.node_name,
            "/nic/bin/halctl show ipsec-global-stats")
        tc.cmd_cookies.append(cmd_cookie)
    else:
        cmd_cookie = "IPSec state on %s AFTER running nc test" % (
            nc_server_wl.node_name)
        api.Trigger_AddCommand(req, nc_server_wl.node_name,
                               nc_server_wl.workload_name,
                               "sudo ip xfrm policy show")
        tc.cmd_cookies.append(cmd_cookie)

    trig_resp = api.Trigger(req)
    term_resp = api.Trigger_TerminateAllCommands(trig_resp)

    tc.resp = api.Trigger_AggregateCommandsResponse(trig_resp, term_resp)
    resp = api.CopyFromWorkload(nc_client_wl.node_name,
                                nc_client_wl.workload_name,
                                ['ipsec_client.dat'], tc.GetLogsDir())
    if resp is None:
        api.Logger.error("Could not find ipsec_client.dat")
        return api.types.status.FAILURE
    resp = api.CopyFromWorkload(nc_server_wl.node_name,
                                nc_server_wl.workload_name,
                                ['ipsec_server.dat'], tc.GetLogsDir())
    if resp is None:
        api.Logger.error("Could not find ipsec_server.dat")
        return api.types.status.FAILURE
    return api.types.status.SUCCESS
Beispiel #30
0
def ClientCmd(server_ip, port = None, time=10, pktsize=None, proto='tcp', run_core=None,
              ipproto='v4', bandwidth="100G", num_of_streams = None, jsonOut=False,
              connect_timeout=None, client_ip=None, client_port=None, packet_count=None,
              naples=False, msssize=None, reverse=False):
    assert(port)
    nodes = api.GetWorkloadNodeHostnames()
    for node in nodes:
        os = api.GetNodeOs(node)
        break

    if os == "windows" and not naples:
        cmd = [api.WINDOWS_POWERSHELL_CMD + " \"iperf3.exe", "-c", str(server_ip), "-p", str(port), "-b", str(bandwidth)]
    else:
        cmd = ["iperf3", "-c", str(server_ip), "-p", str(port), "-b", str(bandwidth)]
        if naples:
            cmd = iper3_env + cmd

    if client_ip:
        cmd.extend(["-B", str(client_ip)])

    if client_port:
        cmd.extend(["--cport", str(client_port)])

    if time and packet_count is None: 
        cmd.extend(["-t", str(time)])

    if run_core:
        cmd.extend(["-A", str(run_core)])

    if proto == 'udp':
        cmd.append('-u')
        if bandwidth: 
            cmd.extend(["-b", str(bandwidth)])
        #cmd.append("-w10000")

    if jsonOut:
        cmd.append('-J')

    if num_of_streams:
        cmd.extend(["-P", str(num_of_streams)])

    if connect_timeout:
        cmd.extend(["--connect-timeout", str(connect_timeout)])

    if pktsize:
        cmd.extend(["-l", str(pktsize)])
    
    if msssize:
        cmd.extend(["-M", str(msssize)])
        
    if ipproto == 'v6':
        cmd.append("-6")

    if packet_count:
        cmd.extend(["-k", str(packet_count)])

    # no periodic output
    cmd.extend(["-i", "0"])

    if os != "windows" or naples:
        return " ".join(cmd)
    else:
        return " ".join(cmd) + "\""
    if reverse:
        cmd.append("-R")   
        
    return " ".join(cmd)