예제 #1
0
def Verify(tc):
    if tc.resp is None:
        return api.types.status.FAILURE

    api.Logger.info("show_gid results")

    for cmd in tc.resp.commands:
        api.PrintCommandResults(cmd)
        if cmd.exit_code != 0 and not api.Trigger_IsBackgroundCommand(cmd):
            return api.types.status.FAILURE

    #set the path for testcases in this testsuite to use
    w = [tc.w1, tc.w2, tc.vlan_w1, tc.vlan_w2]
    for i in range(len(w)):
        if api.IsDryrun():
            api.SetTestsuiteAttr(w[i].ip_address + "_device", '0')
        else:
            api.SetTestsuiteAttr(
                w[i].ip_address + "_device",
                rdma.GetWorkloadDevice(tc.resp.commands[i + 1].stdout))
        if api.IsDryrun():
            api.SetTestsuiteAttr(w[i].ip_address + "_gid", '0')
        else:
            api.SetTestsuiteAttr(
                w[i].ip_address + "_gid",
                rdma.GetWorkloadGID(tc.resp.commands[i + 1].stdout))

    cookie_idx = 0
    for cmd in tc.resp.commands:
        if "show drops cmd" in tc.cmd_cookies[cookie_idx]:
            cookie_attrs = tc.cmd_cookies[cookie_idx].split()
            ip_address = cookie_attrs[-1]
            node_name = cookie_attrs[5]
            dev = api.GetTestsuiteAttr(ip_address + "_device")[-1]
            curr_drops = qos.QosGetDropsForDevFromOutput(cmd.stdout, dev)
            qos.QosSetDropsForDev(cmd.stdout, dev, node_name)
        if "QoS sysctl get" in tc.cmd_cookies[cookie_idx]:
            qos.QosSetTestsuiteAttrs(cmd.stdout)
        if "show lif" in tc.cmd_cookies[cookie_idx]:
            lif_list = []
            lines = cmd.stdout.split('\n')
            for line in lines:
                api.Logger.info("{}".format(line))
                if len(line) == 0:
                    continue
                lif = line.split(' ')[0]
                lif_list.append(lif)
            api.SetTestsuiteAttr("lifs", lif_list)

        cookie_idx += 1

    return api.types.status.SUCCESS
예제 #2
0
def Setup(tc):
    if api.IsDryrun(): return api.types.status.SUCCESS
    tc.nodes = api.GetWorkloadNodeHostnames()
    tc.node_intfs = {}
    srv,cli = _get_workloads(tc)
    tc.workloads = [srv, cli]

    if getattr(tc.args, 'restart', False):
        ret = api.RestartNodes(tc.nodes)
        if ret != api.types.status.SUCCESS:
            api.Logger.error("Node restart failed")
            return api.types.status.FAILURE


    api.Logger.info("Setting driver features")
    if common.setup_features(tc) != api.types.status.SUCCESS:
        api.Logger.info("Setting driver features :Failed")
        return api.types.status.FAILURE

    api.Logger.info("Setting driver features : Success")
    if getattr(tc.args, 'capture_pcap', False):
        if common.start_pcap_capture(tc) != api.types.status.SUCCESS:
            return api.types.status.FAILURE
    if getattr(tc.args, 'capture_single_pcap', False):
        if common.start_single_pcap_capture(tc) != api.types.status.SUCCESS:
            return api.types.status.FAILURE
    return api.types.status.SUCCESS
예제 #3
0
def Setup(tc):
    if api.IsDryrun():
        return api.types.status.SUCCESS

    tc.nodes = api.GetWorkloadNodeHostnames()

    tc.workload_pairs = getRemoteWorkloadPairs(tc)

    if len(tc.workload_pairs) == 0:
        api.Logger.error("Skipping Testcase due to no workload pairs.")
        tc.skip = True
        return api.types.status.FAILURE

    for pair in tc.workload_pairs:
        srv = pair[0]
        cli = pair[1]
        tc.workloads = [srv, cli]
        api.Logger.info("Setting up interfaces %s(%s) --> %s(%s)" %
                        (srv.workload_name, srv.ip_address, cli.workload_name,
                         cli.ip_address))
        if common.setup_features(tc) != api.types.status.SUCCESS:
            api.Logger.info("Setting driver features :Failed")
            return api.types.status.FAILURE

    return api.types.status.SUCCESS
예제 #4
0
def Verify(tc):
    if api.IsDryrun():
        return api.types.status.SUCCESS

    #min_tokens = int(tc.expected_tokens * 0.9)
    #max_tokens = int(tc.expected_tokens * 1.1)
    min_tokens = int(tc.expected_tokens *
                     0.1)  # should be non-zero, we use 10%
    max_tokens = int(tc.expected_tokens *
                     1.05)  # shouldn't exceed 5% of max expected

    w1, w2 = tc.workload_pairs[0]
    w2.vnic.Read()
    if tc.iterators.policertype == 'pps':
        actual_tokens = w2.vnic.Stats.RxPackets
    else:
        actual_tokens = w2.vnic.Stats.RxBytes

    if actual_tokens < min_tokens:
        api.Logger.error(
            f"Recieved rate lower than expected: {actual_tokens} < {min_tokens}"
        )
        return api.types.status.FAILURE

    if actual_tokens > max_tokens:
        api.Logger.error(
            f"Recieved rate higher than expected: {actual_tokens} > {max_tokens}"
        )
        return api.types.status.FAILURE

    api.Logger.info(f"Passed: {min_tokens} < {actual_tokens} < {max_tokens}")

    return api.types.status.SUCCESS
예제 #5
0
def Setup(tc):
    api.Logger.info ("Driver compat test")
    if api.IsDryrun(): return api.types.status.SUCCESS

    tc.nodes = api.GetNaplesHostnames()
    tc.os = api.GetNodeOs(tc.nodes[0])

    tc.skip = False
    tc.driver_changed = False
    if tc.os == compat.OS_TYPE_BSD: # Not supportig BSD right now
        tc.skip = True
        return api.types.status.SUCCESS

    # Intention to test locally built FW with target-version driver
    tc.target_version = getattr(tc.iterators, 'release', 'latest')

    if compat.LoadDriver(tc.nodes, node_os, tc.target_version):
        tc.driver_changed = True

    if getattr(tc.args, 'type', 'local_only') == 'local_only': 
        tc.workload_pairs = api.GetLocalWorkloadPairs()
    else: 
        tc.workload_pairs = api.GetRemoteWorkloadPairs() 

    if len(tc.workload_pairs) == 0: 
        api.Logger.info("Skipping ping part of testcase due to no workload pairs.") 
        tc.skip = True

    return api.types.status.SUCCESS
예제 #6
0
def PacketTestSetup(tc):
    if tc.upgrade_mode is None:
        return api.types.status.SUCCESS

    tc.bg_cmd_cookies = None
    tc.bg_cmd_resp = None
    tc.pktsize = 128
    tc.duration = tc.sleep
    tc.background = True
    tc.pktlossverif = False
    tc.interval = 0.001  #1msec
    tc.count = int(tc.duration / tc.interval)

    if tc.upgrade_mode != "graceful":
        tc.pktlossverif = True

    if api.IsDryrun():
        return api.types.status.SUCCESS

    # start background ping before start of test
    if ping.TestPing(tc, 'user_input', "ipv4", tc.pktsize, interval=tc.interval, \
            count=tc.count, pktlossverif=tc.pktlossverif, \
            background=tc.background, hping3=True) != api.types.status.SUCCESS:
        api.Logger.error("Failed in triggering background Ping.")
        return api.types.status.FAILURE

    return api.types.status.SUCCESS
예제 #7
0
def changeWorkloadIntfMTU(new_mtu, node_name=None):
    result = api.types.status.SUCCESS
    if api.IsDryrun():
        return result
    workloads = api.GetWorkloads()
    for w in workloads:
        if node_name is not None:
            if node_name != w.node_name:
                api.Logger.debug(
                    "MTU filter : changeWorkloadIntfMTU skipping peer node ",
                    w.node_name, w.interface, new_mtu)
                continue
        # TODO: Maybe revisit this. Ignore 802.1q vlan workloads for now.
        if w.interface_type == topo_svc.INTERFACE_TYPE_VSS:
            api.Logger.debug(
                "MTU filter : changeWorkloadIntfMTU skipping vlan workload",
                w.workload_name, w.node_name, w.interface)
            continue
        cmd = naples_workload.setInterfaceMTU(w, new_mtu)
        if cmd.exit_code != 0:
            api.Logger.critical(
                "MTU filter : changeWorkloadIntfMTU failed for ", w.node_name,
                w.interface, new_mtu)
            api.PrintCommandResults(cmd)
            naples_workload.debug_dump_interface_info(w)
            result = api.types.status.FAILURE
    return result
예제 #8
0
def verifyMTUchange(tc):
    result = api.types.status.SUCCESS
    expected_mtu = tc.new_mtu
    node_name = tc.naples_node
    workloads = api.GetWorkloads()
    if api.IsDryrun():
        return result
    for w in workloads:
        configured_mtu = naples_workload.getInterfaceMTU(w)
        if node_name != w.node_name:
            api.Logger.verbose(
                "MTU filter : verifyMTUchange skipping peer node ",
                w.node_name, w.interface, configured_mtu, expected_mtu)
            continue
        # TODO: Maybe revisit this. Ignore 802.1q vlan workloads for now.
        if w.interface_type == topo_svc.INTERFACE_TYPE_VSS:
            api.Logger.verbose(
                "MTU filter : verifyMTUchange skipping vlan workload",
                w.workload_name, w.node_name, w.interface)
            continue
        if configured_mtu != expected_mtu:
            api.Logger.critical("MTU filter : verifyMTUchange failed for ",
                                w.interface, configured_mtu, expected_mtu)
            naples_workload.debug_dump_interface_info(w)
            result = api.types.status.FAILURE
    return result
예제 #9
0
def HitlessTriggerUpdateRequest(tc):
    result = api.types.status.SUCCESS
    if api.IsDryrun():
        return result

    backgroun_req = api.Trigger_CreateExecuteCommandsRequest(serial=False)
    # start upgrade manager process
    for node in tc.nodes:
        cmd = "/nic/tools/start-upgmgr.sh -n "
        api.Logger.info("Starting Upgrade Manager %s" % (cmd))
        api.Trigger_AddNaplesCommand(backgroun_req, node, cmd, background=True)
    api.Trigger(backgroun_req)

    # wait for upgrade manager to comeup
    misc_utils.Sleep(10)
    for node in tc.nodes:
        # initiate upgrade client objects
        # Generate Upgrade objects
        UpgradeClient.GenerateUpgradeObjects(node, api.GetNicMgmtIP(node))

        upg_obj = UpgradeClient.GetUpgradeObject(node)
        upg_obj.SetPkgName(tc.pkg_name)
        upg_obj.SetUpgMode(upgrade_pb2.UPGRADE_MODE_HITLESS)
        upg_status = upg_obj.UpgradeReq()
        api.Logger.info(
            f"Hitless Upgrade request for {node} returned status {upg_status}")
        if upg_status != upgrade_pb2.UPGRADE_STATUS_OK:
            api.Logger.error(f"Failed to start upgrade manager on {node}")
            result = api.types.status.FAILURE
            continue
    return result
예제 #10
0
def Trigger(tc):
    if api.IsDryrun(): return api.types.status.SUCCESS

    srv = tc.workloads[0]
    cli = tc.workloads[1]
    
    # Determine where the commands will be run - host or Naples.
    test_type = getattr(tc.args, "test-type", INTF_TEST_TYPE_HOST)
    is_naples_cmd = True
    if test_type == INTF_TEST_TYPE_HOST:
        is_naples_cmd = False

    srv_req = api.Trigger_CreateExecuteCommandsRequest(serial = False)
    cli_req = api.Trigger_CreateExecuteCommandsRequest(serial = False)

    proto = getattr(tc.iterators, "proto", 'tcp')
    number_of_iperf_threads = getattr(tc.args, "iperfthreads", 1)
    pktsize = getattr(tc.iterators, "pktsize", None)
    ipproto = getattr(tc.iterators, "ipproto", 'v4')

    if ipproto == 'v4':
        server_ip = srv.ip_address
        client_ip = cli.ip_address
    else:
        server_ip = srv.ipv6_address
        client_ip = cli.ipv6_address
        
    tc.cmd_descr = "Server: %s(%s) <--> Client: %s(%s)" %\
                   (srv.interface, server_ip, cli.interface, client_ip)

    api.Logger.info("Starting Iperf(%s/%s) test from %s"
                    % (proto, ipproto, tc.cmd_descr))

    duration =  10
    for i in range(number_of_iperf_threads):
        if proto == 'tcp':
            port = api.AllocateTcpPort()
        else:
            port = api.AllocateUdpPort()
 
        iperf_server_cmd = iperf.ServerCmd(port, naples = is_naples_cmd)
        api.Trigger_AddCommand(srv_req, srv.node_name, srv.workload_name, iperf_server_cmd, background = True)

        iperf_client_cmd = iperf.ClientCmd(server_ip, port, time=duration,
                                 proto=proto, jsonOut=True, ipproto=ipproto,
                                 pktsize=pktsize, client_ip=client_ip, naples = is_naples_cmd)
        api.Trigger_AddCommand(cli_req, cli.node_name, cli.workload_name, iperf_client_cmd, timeout = 60)

    srv_resp = api.Trigger(srv_req)
    # Wait for iperf server to start.
    time.sleep(10)
    tc.cli_resp = api.Trigger(cli_req)
    # Wait for iperf clients to finish.
    time.sleep(2*duration)

    srv_resp1 = api.Trigger_TerminateAllCommands(srv_resp)

    return api.types.status.SUCCESS
예제 #11
0
def Verify(tc):
    if tc.skip or api.IsDryrun(): return api.types.status.SUCCESS
    if tc.resp is None:
        return api.types.status.FAILURE

    result = api.types.status.SUCCESS

    # Verify pdsctl and host commands results
    for wl_name, tuples in tc.cmd_status.items():
        if api.IsApiResponseOk(tuples[0]):
            tx_enable = 'on' if tc.args.tx else 'off'
            api.Logger.info("SUCCESS: Name: %s, tx_status: %s" %
                            (wl_name, tx_enable))
        else:
            result = api.types.status.FAILURE
            api.Logger.info("FAILURE: Name: %s, tx_status: %s" %
                            (wl_name, tuples[0].stdout))

        if api.IsApiResponseOk(tuples[1]):
            rx_enable = 'on' if tc.args.rx else 'off'
            api.Logger.info("SUCCESS: Name: %s, rx_status: %s" %
                            (wl_name, rx_enable))
        else:
            result = api.types.status.FAILURE
            api.Logger.info("FAILURE: Name: %s, rx_status: %s" %
                            (wl_name, tuples[1].stdout))

        api.Logger.warn(
            "XXX 'pdsctl show lif' does not support --yaml output, and does not show the mode flag for validation"
        )
        #if tc.pds_verify.get(wl_name, None) != None:
        #    tx_chk = tc.pds_verify[wl_name][0]
        #    rx_chk = tc.pds_verify[wl_name][1]
        #    if api.GetNodeOs(wl_name.split('_')[0]) == 'freebsd':
        #        # NOTE: freebsd has only one flag for both rx and tx vlan offload
        #        # we overwrite tx flag using rx flag hence check tx flag with rx flag in verification
        #        if str(tx_chk[1]) != str(rx_chk[0]):
        #            result = api.types.status.FAILURE
        #            api.Logger.info("FAILURE: Name: %s, tx_chk: %s" % (wl_name, tx_chk))
        #        if str(rx_chk[1]) != str(rx_chk[0]):
        #            result = api.types.status.FAILURE
        #            api.Logger.info("FAILURE: Name: %s, rx_chk: %s" % (wl_name, rx_chk))
        #    else:
        #        if tx_chk[0] != tx_chk[1]:
        #            result = api.types.status.FAILURE
        #            api.Logger.info("FAILURE: Name: %s, expected tx-flag: %s, in PDS: %s" % (wl_name, tx_chk[0], tx_chk[1]))
        #        if rx_chk[0] != rx_chk[1]:
        #            result = api.types.status.FAILURE
        #            api.Logger.info("FAILURE: Name: %s, expected rx-flag: %s, in PDS: %s" % (wl_name, rx_chk[0], rx_chk[1]))

    # Verify traffic result
    vp_result = traffic_utils.verifyPing(tc.cmd_cookies, tc.resp)
    if vp_result is not api.types.status.SUCCESS:
        result = vp_result

    api.Logger.info("TC.Verify result: %s" % result)
    return result
예제 #12
0
 def __build_mem_usage_history(self, cmd_resp):
     records = self.__disset_mem_usage_resp(cmd_resp)
     if not records:
         if api.IsDryrun():
             return api.types.status.SUCCESS
         api.Logger.error("Failed to dissect the mem usage response")
         return api.types.status.FAILURE
     self.__build_mem_usage_history_from_rec(records)
     return api.types.status.SUCCESS
예제 #13
0
def ValidateBGPUnderlayNeighborshipInfo():
    if api.IsDryrun():
        return True
    nodes = api.GetNaplesHostnames()
    for node in nodes:
        if not ValidateBGPUnderlayNeighborship(node):
            api.Logger.error("Failed in BGP Underlay Neighborship validation "
                             "for node: %s" % (node))
            return False
    return True
예제 #14
0
def HitlessPollUpgradeStage(tc, stage, **kwargs):
    if api.IsDryrun():
        return api.types.status.SUCCESS
    try:
        __poll_upgrade_stage(tc, stage, **kwargs)
        return api.types.status.SUCCESS
    except:
        #traceback.print_exc()
        api.Logger.error(f"Failed to poll upgrade stage {stage.name}")
        return api.types.status.FAILURE
예제 #15
0
def VerifyConnectivity(tc):

    if api.IsDryrun():
        return api.types.status.SUCCESS

    # ensure connectivity with foreground ping before test
    if ping.TestPing(tc, 'user_input', "ipv4", pktsize=128, interval=0.001, \
            count=5) != api.types.status.SUCCESS:
        api.Logger.info("Connectivity Verification Failed")
        return api.types.status.FAILURE
    return api.types.status.SUCCESS
예제 #16
0
def VerifyConnectivity(tc):
    if api.IsDryrun():
        return api.types.status.SUCCESS
    # ensure connectivity with foreground ping before test
    tc.workload_pairs = config_api.GetPingableWorkloadPairs(
        wl_pair_type=config_api.WORKLOAD_PAIR_TYPE_REMOTE_ONLY)
    if ping.TestPing(tc, "user_input", "ipv4", pktsize=128, interval=0.001, \
            count=5) != api.types.status.SUCCESS:
        api.Logger.info("Connectivity Verification Failed")
        return api.types.status.FAILURE
    return api.types.status.SUCCESS
예제 #17
0
def verifyFlowTable(af, workload_pairs):
    if api.IsDryrun():
        return api.types.status.SUCCESS

    flowEntries = {}
    for pair in workload_pairs:
        w1 = pair[0]
        w2 = pair[1]

        if w1.node_name not in flowEntries:
            ret, resp = pdsctl.ExecutePdsctlShowCommand(w1.node_name,
                                                        "flow",
                                                        yaml=False)
            if ret != True:
                api.Logger.error(
                    "Failed to execute show flows at node %s : %s" %
                    (w1.node_name, resp))
                return api.types.status.FAILURE
            flowEntries[w1.node_name] = resp

        iflow_found = False
        rflow_found = False
        iflow_found, rflow_found = parseFlowEntries(flowEntries[w1.node_name],
                                                    w1, w2)
        if iflow_found == False or rflow_found == False:
            api.Logger.error(
                "Flows not found at node %s : %s[iflow %d, rflow %d]" %
                (w1.node_name, flowEntries[w1.node_name], iflow_found,
                 rflow_found))
            return api.types.status.FAILURE

        if w2.node_name not in flowEntries:
            ret, resp = pdsctl.ExecutePdsctlShowCommand(w2.node_name,
                                                        "flow",
                                                        yaml=False)
            if ret != True:
                api.Logger.error(
                    "Failed to execute show flows at node %s : %s" %
                    (w2.node_name, resp))
                return api.types.status.FAILURE
            flowEntries[w2.node_name] = resp

        iflow_found = False
        rflow_found = False
        iflow_found, rflow_found = parseFlowEntries(flowEntries[w2.node_name],
                                                    w2, w1)
        if iflow_found == False or rflow_found == False:
            api.Logger.error(
                "Flows not found at node %s : %s[iflow %d, rflow %d]" %
                (w2.node_name, flowEntries[w2.node_name], iflow_found,
                 rflow_found))
            return api.types.status.FAILURE

    return api.types.status.SUCCESS
예제 #18
0
def ShowFlowSummary(nodes=[]):
    if api.IsDryrun():
        return api.types.status.SUCCESS

    for node in nodes:
        pdsctl.ExecutePdsctlShowCommand(node,
                                        "flow",
                                        "--summary | grep \"No. of flows :\"",
                                        yaml=False,
                                        print_op=True)

    return api.types.status.SUCCESS
예제 #19
0
def Setup(tc):
    # select one of the LearnIP objects from dictionary
    tc.node = random.choice(api.GetNaplesHostnames())
    tc.learn_mac_obj = random.choice(learn_utils.GetLearnMACObjects(tc.node))
    tc.subnet = tc.learn_mac_obj.SUBNET
    if api.IsDryrun():
        tc.hostifidx = 'dryrun'
    else:
        tc.hostifidx = tc.subnet.HostIfIdx[0]
    tc.wload = config_api.FindWorkloadByVnic(tc.learn_mac_obj)
    api.Logger.debug("Chosen subnet %s" % tc.subnet)
    return api.types.status.SUCCESS
예제 #20
0
def Trigger(tc):
    if api.IsDryrun():
        return api.types.status.SUCCESS

    for pair in tc.workload_pairs:
        srv = pair[0]
        cli = pair[1]
        if not cli.IsNaples():
            cli = pair[0]
            srv = pair[1]
        
        tc.srv = srv
        tc.cli = cli
        tc.is_srv_naples = False
        if srv.IsNaples():
            tc.is_srv_naples = True
      
        vxlan = False
        if getattr(tc.iterators, 'vxlan', 'off') == 'on':
            vxlan = True

    
        if not vxlan:
            status = runCsumTest(tc)
            if status != api.types.status.SUCCESS:
                return status

        else:
            vxlan_ipproto = getattr(tc.iterators, "vxlan_ip", 'v4')
            ipv4 = True
            if vxlan_ipproto == 'v6':
                ipv4 = False
            srv_vxlan = copy.deepcopy(srv)
            cli_vxlan = copy.deepcopy(cli)

            status = ionic_utils.setupVxLAN(ipv4, srv, cli)
            if status != api.types.status.SUCCESS:
                return status

            # ipproto selects which IP version is used.
            srv_vxlan.interface = "vxlan0@" + srv.interface
            srv_vxlan.ip_address = ionic_utils.VXLAN_SERVER_IP
            srv_vxlan.ipv6_address = ionic_utils.VXLAN_SERVER_IPV6
            cli_vxlan.interface = "vxlan0@" + cli.interface
            cli_vxlan.ip_address = ionic_utils.VXLAN_CLIENT_IP
            cli_vxlan.ipv6_address = ionic_utils.VXLAN_CLIENT_IPV6
            tc.srv = srv_vxlan
            tc.cli = cli_vxlan
            status = runCsumTest(tc)
            if status != api.types.status.SUCCESS:
                return status

    return api.types.status.SUCCESS
예제 #21
0
def verifyFlowAgeing(workload_pairs, proto, af, timeout):
    if api.IsDryrun():
        return api.types.status.SUCCESS

    if len(workload_pairs) == 0:
        return api.types.status.SUCCESS
    pair = workload_pairs[0]
    # currently assume all naples have same security profile timeouts
    store_client = EzAccessStoreClient[pair[0].node_name]
    if store_client == None:
        return api.types.status.SUCCESS

    # Get the timeout value
    # For RST, the timeout value is same as close
    if timeout == 'rst':
        timeout = 'close'
    # For longlived, get idle timeout and ensure that the flows are not aged out
    # when idle timer expires
    longlived = False
    if timeout == 'longlived':
        timeout = 'idle'
        longlived = True

    timeout_func_name = 'Get%s%sTimeout' % (proto.upper(),
                                            timeout.capitalize())
    timeout_func = getattr(store_client.GetSecurityProfile(),
                           timeout_func_name)
    age_timeout = timeout_func()

    # if timeout is too low, can't reliably check flow existence in between
    if age_timeout > 30:
        # check at half time that flow still exists and not prematurely aged out
        Sleep(age_timeout / 2)
        resp = verifyFlowTable(af, workload_pairs)
        if resp != api.types.status.SUCCESS:
            api.Logger.error("Flow aged out before timeout - resp: %s" %
                             (resp))
            return resp
        age_timeout = age_timeout / 2

    Sleep(age_timeout)
    resp = verifyFlowTable(af, workload_pairs)
    if longlived and resp != api.types.status.SUCCESS:
        api.Logger.error("Flow aged out after timeout - resp: %s" % (resp))
        return api.types.status.FAILURE

    if not longlived and resp == api.types.status.SUCCESS:
        pdb.set_trace()
        api.Logger.error("Flow not aged out after timeout - resp: %s" % (resp))
        return api.types.status.FAILURE

    return api.types.status.SUCCESS
예제 #22
0
def __modify_workload_interface(tc):
    wl_api.DeleteWorkload(tc.wload)
    if api.IsDryrun():
        tc.wload.parent_interface = 'dryrun'
    else:
        tc.wload.parent_interface = intf_client.FindHostInterface(
            tc.subnet.Node, tc.subnet.HostIfIdx[0]).GetInterfaceName()
    tc.wload.interface = tc.wload.parent_interface
    store.SetWorkloadRunning(tc.wload.workload_name)
    wl_api.ReAddWorkload(tc.wload)
    add_routes.AddRoutes(tc.learn_mac_obj)
    arp.SendGratArp([tc.wload])
    return
예제 #23
0
def verifyFlowFlags(node, flowHandle, pktType):
    if api.IsDryrun():
        return api.types.status.SUCCESS

    # Verify that the local_to_local flag is correctly set in FTL for flow
    ret, resp = verifyFtlEntry(node, flowHandle)
    if ret != api.types.status.SUCCESS:
        return ret

    if "L2L" in pktType:
        assert resp.splitlines()[26].split()[2] == '0x1'
    else:
        assert resp.splitlines()[26].split()[2] == '0x0'
    return api.types.status.SUCCESS
예제 #24
0
def Trigger(tc):
    if api.IsDryrun(): return api.types.status.SUCCESS

    req1 = api.Trigger_CreateExecuteCommandsRequest(serial=True)
    req2 = api.Trigger_CreateExecuteCommandsRequest(serial=False)

    w1 = tc.workloads[0]
    w2 = tc.workloads[1]
    tc.cmd_descr = "Server: %s(%s) <--> Client: %s(%s)" %\
                   (w1.interface, w1.ip_address, w2.interface, w2.ip_address)
    api.Logger.info("Starting Iperf test from %s" % (tc.cmd_descr))

    proto = getattr(tc.iterators, "proto", 'tcp')

    number_of_iperf_threads = getattr(tc.args, "iperfthreads", 1)

    pktsize = getattr(tc.iterators, "pktsize", 512)
    ipproto = getattr(tc.iterators, "ipproto", 'v4')

    for i in range(number_of_iperf_threads):
        if proto == 'tcp':
            port = api.AllocateTcpPort()
        else:
            port = api.AllocateUdpPort()

        iperf_server_cmd = cmd_builder.iperf_server_cmd(port=port)
        api.Trigger_AddCommand(req1,
                               w1.node_name,
                               w1.workload_name,
                               iperf_server_cmd,
                               background=True)

        iperf_client_cmd = cmd_builder.iperf_client_cmd(
            server_ip=w1.ip_address,
            port=port,
            proto=proto,
            pktsize=pktsize,
            ipproto=ipproto)
        api.Trigger_AddCommand(req2, w2.node_name, w2.workload_name,
                               iperf_client_cmd)

    trig_resp1 = api.Trigger(req1)
    trig_resp2 = api.Trigger(req2)
    term_resp1 = api.Trigger_TerminateAllCommands(trig_resp1)

    response = api.Trigger_AggregateCommandsResponse(trig_resp1, term_resp1)
    tc.resp = api.Trigger_AggregateCommandsResponse(response, trig_resp2)

    return api.types.status.SUCCESS
예제 #25
0
def getFlowEntries(node):
    if api.IsDryrun():
        # Return a dummy entry
        resp = [
            "256     I/H       3     2.0.0.2             6915      2.0.0.5             2048        ICMP   A"
        ]
        return api.types.status.SUCCESS, resp

    ret, entries = pdsctl.ExecutePdsctlShowCommand(node, "flow", yaml=False)
    if ret != True:
        api.Logger.error("Failed to execute show flows on node %s : %s" %
                         (node, resp))
        return api.types.status.FAILURE
    # Skip first 16 lines as they are part of the legend
    return api.types.status.SUCCESS, entries.splitlines()[16:-1]
예제 #26
0
def Verify(tc):
    if api.IsDryrun(): return api.types.status.SUCCESS
    if getattr(tc.args, 'capture_pcap', False):
        ret = common.stop_pcap_capture(tc)
        if ret != api.types.status.SUCCESS:
            api.Logger.info("pcap caputre failed")
            return ret

    if getattr(tc.args, 'capture_single_pcap', False):
        ret = common.stop_single_pcap_capture(tc)
        if ret != api.types.status.SUCCESS:
            api.Logger.info("single pcap caputre failed")
            return ret

    if tc.cli_resp is None:
        return api.types.status.FAILURE

    sendGbps = []
    recvGbps = []
    for cmd in tc.cli_resp.commands:

        if cmd.exit_code != 0:
            api.Logger.error("Iperf client exited with error")
            api.PrintCommandResults(cmd)
            if iperf.ConnectionTimedout(cmd.stdout):
                api.Logger.error("Connection timeout, ignoring for now")
                continue
            if iperf.ControlSocketClosed(cmd.stdout):
                api.Logger.error("Control socket cloned, ignoring for now")
                continue
            if iperf.ServerTerminated(cmd.stdout):
                api.Logger.error("Iperf server terminated")
                return api.types.status.FAILURE
            if not iperf.Success(cmd.stdout):
                api.Logger.error("Iperf failed", iperf.Error(cmd.stdout))
                return api.types.status.FAILURE
        elif not api.GlobalOptions.dryrun:
            sendGbps.append(float(iperf.GetSentGbps(cmd.stdout)))
            recvGbps.append(float(iperf.GetReceivedGbps(cmd.stdout)))
            api.Logger.info("Iperf Send Rate in Gbps ", iperf.GetSentGbps(cmd.stdout))
            api.Logger.info("Iperf Receive Rate in Gbps ", iperf.GetReceivedGbps(cmd.stdout))

    api.Logger.info("Iperf Send Rate in Gbps ", sendGbps)
    api.Logger.info("Iperf Receive Rate in Gbps ", recvGbps)
    api.Logger.info("iperf test successfull total send Gbps ", sum(sendGbps), " receive Gbps ", sum(recvGbps))

    return verify.driver_feature_verify(tc)
예제 #27
0
def Verify(tc):
    if api.IsDryrun(): return api.types.status.SUCCESS
    if getattr(tc.args, 'capture_pcap', False):
        ret = common.stop_pcap_capture(tc)
        if ret != api.types.status.SUCCESS:
            api.Logger.info("pcap caputre failed")
            return ret

    if tc.resp is None:
        return api.types.status.FAILURE

    for cmd in tc.resp.commands:
        api.PrintCommandResults(cmd)
        if cmd.exit_code != 0:
            return api.types.status.FAILURE

    return verify.driver_feature_verify(tc)
예제 #28
0
def clearFlowTable(workload_pairs):
    if api.IsDryrun():
        return api.types.status.SUCCESS

    nodes = api.GetNaplesHostnames()
    for node in nodes:
        ret, resp = pdsctl.ExecutePdsctlCommand(node, "clear flow", yaml=False)
        if ret != True:
            api.Logger.error("Failed to execute clear flows at node %s : %s" %
                             (node, resp))
            return api.types.status.FAILURE

        if "Clearing flows succeeded" not in resp:
            api.Logger.error("Failed to clear flows at node %s : %s" %
                             (node, resp))
            return api.types.status.FAILURE

    return api.types.status.SUCCESS
예제 #29
0
def Setup(tc):
    api.Logger.info("Fw compat test")
    if api.IsDryrun(): return api.types.status.SUCCESS

    tc.nodes = api.GetNaplesHostnames()
    tc.os = api.GetNodeOs(tc.nodes[0])

    tc.skip = False
    if tc.os == compat.OS_TYPE_BSD:  # Not supportig BSD & ESXi right now
        tc.skip = True
        return api.types.status.SUCCESS

    # Intention to test locally built FW with target-version driver
    tc.target_version = getattr(tc.iterators, 'release', 'latest')

    # this is required to bring the testbed into operation state
    # after driver unload interfaces need to be initialized
    tc.fw_changed = False
    if compat.LoadFirmware(tc.nodes, tc.os,
                           tc.target_version) == api.types.status.SUCCESS:
        api.Logger.info("Loaded %s Fw on %s" % (tc.target_version, node))
    else:
        return api.types.status.FAILURE

    tc.fw_changed = True
    if tc.os == compat.OS_TYPE_LINUX:
        for node in tc.nodes:
            # this is required to bring the testbed into operation state
            # after driver unload interfaces need to be initialized
            wl_api.ReAddWorkloads(node)

    if getattr(tc.args, 'type', 'local_only') == 'local_only':
        tc.workload_pairs = api.GetLocalWorkloadPairs()
    else:
        tc.workload_pairs = api.GetRemoteWorkloadPairs()

    if len(tc.workload_pairs) == 0:
        api.Logger.info(
            "Skipping ping part of testcase due to no workload pairs.")
        tc.skip = True

    return api.types.status.SUCCESS
예제 #30
0
def verifyMTUPings(tc):
    result = api.types.status.SUCCESS
    final_result = api.types.status.SUCCESS
    new_mtu = tc.new_mtu
    global __IS_FREEBSD

    if api.IsDryrun(): return final_result

    # Verify ping with exact MTU is successful
    result = traffic_utils.verifyPing(tc.cmd_cookies_1, tc.resp_1)
    if result is not api.types.status.SUCCESS:
        api.Logger.error(
            "MTU filter : Verify failed for verifyMTUPings - exact MTU case ",
            new_mtu)
        final_result = result

    # Verify ping with (MTU - 1) is successful
    result = traffic_utils.verifyPing(tc.cmd_cookies_2, tc.resp_2)
    if result is not api.types.status.SUCCESS:
        api.Logger.error(
            "MTU filter : Verify failed for verifyMTUPings - MTU - 1 case ",
            new_mtu - 1)
        final_result = result

    if __IS_FREEBSD is True:
        msg_too_long_exit_code = 2
    else:
        msg_too_long_exit_code = 1

    # Verify ping with (MTU + 1) is NOT successful
    result = traffic_utils.verifyPing(tc.cmd_cookies_3,
                                      tc.resp_3,
                                      exit_code=msg_too_long_exit_code)
    if result is not api.types.status.SUCCESS:
        api.Logger.error(
            "MTU filter : Verify failed for verifyMTUPings - MTU + 1 case ",
            new_mtu + 1)
        final_result = result

    return final_result