Exemplo n.º 1
0
def Trigger(tc):
    flow_utils.clearFlowTable(tc.workload_pairs)
    #    import pdb; pdb.set_trace()
    for pair in tc.workload_pairs:
        api.Logger.info("pinging between %s and %s" %
                        (pair[0].ip_address, pair[1].ip_address))
    tc.cmd_cookies, tc.resp = traffic_utils.pingWorkloads(
        tc.workload_pairs, tc.iterators.ipaf, tc.iterators.pktsize)
    return api.types.status.SUCCESS
Exemplo n.º 2
0
def clearFlows(tc):
    if tc.cancel:
        api.Logger.info("Canceling clearFlows...")
        sys.exit(0)

    api.Logger.info("Running clearFlows...")

    if api.GlobalOptions.dryrun:
        return api.types.status.SUCCESS
    flowutils.clearFlowTable(api.GetRemoteWorkloadPairs())
    return api.types.status.SUCCESS
Exemplo n.º 3
0
def Teardown(tc):
    if hasattr(tc.args, 'policer'):
        for x, y in tc.workload_pairs:
            if x.IsNaples(): x.vnic.RollbackUpdate()
            if y.IsNaples(): y.vnic.RollbackUpdate()

    return flow_utils.clearFlowTable(tc.workload_pairs)
Exemplo n.º 4
0
def Teardown(tc):

    if tc.skip:
        return api.types.status.SUCCESS

    wl = tc.workload
    ctx = tc.mv_ctx
    home = ctx[wl]['home']
    new_home = ctx[wl]['new_home']
    subnet = ctx[wl]['subnet']
    mac = ctx[wl]['mac']
    ip_prefix_list = [ctx[wl]['ip_prefix']] + ctx[wl]['sec_ip_prefixes']

    api.Logger.info(
        f"Restoring {wl.workload_name} {wl.vnic.SUBNET}({wl.node_name}) => {subnet}({home}) "
        f"with mac {mac}, ip prefixes {ip_prefix_list}")
    move_utils.MoveEpMACEntry(wl, subnet, mac, ip_prefix_list)

    ret = __verify_move_stats(new_home, home)
    if ret != api.types.status.SUCCESS:
        return ret

    api.Logger.verbose(
        "Move statistics are matching expectation on both nodes")
    # Validate with traffic after moving back
    if move_utils.ValidateEPMove() != api.types.status.SUCCESS:
        return api.types.status.FAILURE

    return flow_utils.clearFlowTable(None)
Exemplo n.º 5
0
def Teardown(tc):
    if tc.is_config_updated:
        rs = config_api.RestoreObjects("Update", tc.selected_objs)
        if rs is False:
            api.Logger.error(
                f"Teardown failed to restore objs from Update operation: {rs}")
    if tc.is_config_deleted:
        rs = config_api.RestoreObjects("Delete", tc.selected_objs)
        if rs is False:
            api.Logger.error(
                f"Teardown failed to restore objs from Delete operation: {rs}")
    return flow_utils.clearFlowTable(tc.workload_pairs)
Exemplo n.º 6
0
def clearFlows(tc):
    if tc.cancel:
        api.Logger.info("Canceling clearFlows...")
        sys.exit(0)
    api.Logger.info("Running clearFlows...")

    if api.GlobalOptions.dryrun:
        return api.types.status.SUCCESS

    nodes = api.GetNaplesHostnames()
    for node in nodes:
        ret, resp = pdsctl.ExecutePdsctlShowCommand(
            node,
            "flow",
            "--summary | grep \"No. of flows :\"",
            yaml=False,
            print_op=True)

    flowutils.clearFlowTable(tc.workload_pairs)
    api.Logger.debug("Completed Running clearFlows...")
    return api.types.status.SUCCESS
Exemplo n.º 7
0
def Teardown(tc):

    if tc.skip:
        return api.types.status.SUCCESS

    stats_utils.Clear()

    ctx = tc.mv_ctx
    ip_prefix = tc.mv_ctx['ip_prefix']
    src_wl = tc.mv_ctx['src_wl']
    dst_wl = tc.mv_ctx['dst_wl']

    move_utils.MoveEpIPEntry(dst_wl.node_name, src_wl.node_name, ip_prefix)

    misc_utils.Sleep(5)  # let metaswitch carry it to the other side
    learn_utils.DumpLearnData()
    ret = __validate_move_stats(dst_wl.node_name, src_wl.node_name)
    if ret != api.types.status.SUCCESS:
        return api.types.status.FAILURE

    api.Logger.verbose(
        "Move statistics are matching expectation on both nodes")

    # Validate flow move on src and dst.
    ret = __validate_flow_move(tc, tc.mv_ctx['src_wl'].node_name,
                               tc.mv_ctx['ip_prefix'], 'R2L')
    if ret != api.types.status.SUCCESS:
        api.Logger.error("Failed to validate flows on node %s" %
                         tc.mv_ctx['src_wl'].node_name)
        return api.types.status.FAILURE

    ret = __validate_flow_move(tc, tc.mv_ctx['dst_wl'].node_name,
                               tc.mv_ctx['ip_prefix'], 'L2R')
    if ret != api.types.status.SUCCESS:
        api.Logger.error("Failed to validate flows on node %s" %
                         tc.mv_ctx['dst_wl'].node_name)
        return api.types.status.FAILURE
    # Also validate new flows on src
    ret = __validate_flows(tc, tc.mv_ctx['src_wl'].node_name)
    if ret != api.types.status.SUCCESS:
        api.Logger.error("Failed to validate flows on node %s" % node)
        return api.types.status.FAILURE

    # Terminate background ping and check for loss
    ret = __verify_background_ping(tc)
    if ret != api.types.status.SUCCESS:
        return ret

    # Validate with traffic after moving back
    if move_utils.ValidateEPMove() != api.types.status.SUCCESS:
        return api.types.status.FAILURE

    return flow_utils.clearFlowTable(None)
Exemplo n.º 8
0
def Teardown(tc):
    if (tc.iterators.proto == 'udp' or
        (tc.iterators.proto == 'tcp' and tc.iterators.timeout != 'rst' and
        tc.iterators.timeout != 'longlived')):
        if not addPktFilterRuleOnEp(tc.workload_pairs, tc.iterators.proto, False):
            api.Logger.error("Failed to delete drop rules")
            return api.types.status.FAILURE

    if tc.iterators.timeout == 'drop':
        modifyPolicyRule(tc.workload_pairs, tc.iterators.proto, "allow")

    if tc.iterators.timeout == 'longlived':
        # Terminate background commands
        api.Trigger_TerminateAllCommands(tc.resp)

    return flow_utils.clearFlowTable(tc.workload_pairs)
Exemplo n.º 9
0
def Teardown(tc):
    return flow_utils.clearFlowTable(tc.workload_pairs)
Exemplo n.º 10
0
def Teardown(tc):
    return flow_utils.clearFlowTable(None)
Exemplo n.º 11
0
def Teardown(tc):
    for obj in tc.selected_objs:
        obj.RollbackUpdate()

    return flow_utils.clearFlowTable(tc.workload_pairs)
Exemplo n.º 12
0
def Teardown(tc):
    cleanup(tc)
    flowutils.clearFlowTable(api.GetRemoteWorkloadPairs())
    return api.types.status.SUCCESS
Exemplo n.º 13
0
def Setup(tc):
    tc.seed = random.randrange(sys.maxsize)
    api.Logger.info("Using seed : %s" % (tc.seed))
    tc.serverHandle = None
    tc.clientHandle = None
    tc.selected_sec_profile_objs = None

    tc.skip_stats_validation = getattr(tc.args, 'triggers', False)

    tc.cancel = False
    tc.workloads = api.GetWorkloads()

    utils.UpdateSecurityProfileTimeouts(tc)
    chooseWorkload(tc)
    server, client = tc.workload_pair[0], tc.workload_pair[1]
    if not updateSessionLimits(tc, server):
        api.logger.error("Cannot configure session limit on non-Naples NIC")
        return api.types.status.FAILURE

    if tc.skip_stats_validation:
        naples_utils.CopyMemStatsCheckTool()

    api.Logger.info("Server: %s(%s)(%s) <--> Client: %s(%s)(%s)" %\
                    (server.workload_name, server.ip_address,
                     server.mgmt_ip, client.workload_name,
                     client.ip_address, client.mgmt_ip))

    try:
        StoreCurrentPdsLogLevel(tc)
        pds_utils.SetPdsLogsLevel("error")
    except Exception as e:
        #traceback.print_exc()
        api.Logger.error("Failed to setup cps test workloads : %s" % (e))
        return api.types.status.FAILURE

    try:
        tc.serverHandle = TRexIotaWrapper(
            server,
            role="server",
            gw=client.ip_address,
            kill=0,
            sync_port=server.exposed_tcp_ports[0],
            async_port=server.exposed_tcp_ports[1])
        tc.clientHandle = TRexIotaWrapper(
            client,
            role="client",
            gw=server.ip_address,
            kill=0,
            sync_port=client.exposed_tcp_ports[0],
            async_port=client.exposed_tcp_ports[1])

        api.Logger.info("connect trex...")
        tc.serverHandle.connect()
        tc.clientHandle.connect()

        api.Logger.info("reset connection...")
        tc.serverHandle.reset(True)
        tc.clientHandle.reset(True)

        api.Logger.info("setting profile...")
        profile_path = getProfilePath(tc)
        tc.serverHandle.load_profile(getProfilePath(tc), getTunables(tc))
        tc.clientHandle.load_profile(getProfilePath(tc), getTunables(tc))

    except Exception as e:
        #traceback.print_exc()
        api.Logger.info("Failed to setup TRex topology: %s" % e)
        #cleanup(tc)
        return api.types.status.FAILURE

    api.Logger.info("Clear hardware state before trex trigger...")
    flowutils.clearFlowTable(tc.workload_pairs)
    __clearVPPEntity("errors")
    flowutils.clearFlowTable(tc.workload_pairs)
    __clearVPPEntity("flow statistics")
    __clearVPPEntity("flow entries")
    __clearVPPEntity("runtime")

    return api.types.status.SUCCESS
Exemplo n.º 14
0
def Setup(tc):

    flow_type = getattr(tc.iterators, "flow_type", "intra-subnet")
    tc.skip = False
    tc.mv_ctx = {}
    tc.bg_cmd_resp = None
    tc.sessionInfo = {}

    # Select movable workload pair
    if flow_type == "intra-subnet":
        wl_type = config_api.WORKLOAD_PAIR_TYPE_REMOTE_ONLY
        wl_scope = config_api.WORKLOAD_PAIR_SCOPE_INTRA_SUBNET
    elif flow_type == "inter-subnet":
        wl_type = config_api.WORKLOAD_PAIR_TYPE_REMOTE_ONLY
        wl_scope = config_api.WORKLOAD_PAIR_SCOPE_INTER_SUBNET
    else:
        assert 0, ("Flow type %s not supported" % flow_type)

    pairs = config_api.GetWorkloadPairs(wl_type, wl_scope)
    if not pairs:
        tc.skip = True
        return api.types.status.SUCCESS

    # Setup move of primary IP address of source workload to destination
    # workload.
    src_wl, dst_wl = pairs[0]
    tc.mv_ctx['src_wl'] = src_wl
    tc.mv_ctx['dst_wl'] = dst_wl
    tc.mv_ctx['ip_prefix'] = src_wl.ip_prefix
    tc.mv_ctx['inter'] = "inter" in flow_type
    tc.wl_pairs = pairs

    # Clear move stats
    stats_utils.Clear()
    learn_utils.DumpLearnData()

    # Clear flows
    flow_utils.clearFlowTable(None)

    # Increase ICMP idle timeout
    cmd = "set pds security-profile icmp-idle-timeout 600"
    vppctl.ExecuteVPPctlCommand(src_wl.node_name, cmd)
    vppctl.ExecuteVPPctlCommand(dst_wl.node_name, cmd)

    ret = __setup_background_ping(tc)
    if ret != api.types.status.SUCCESS:
        return ret

    ret = __validate_flows(tc, src_wl.node_name)
    if ret != api.types.status.SUCCESS:
        api.Logger.error("Failed to validate flows on node %s" %
                         src_wl.node_name)
        return api.types.status.FAILURE

    ret = __validate_flows(tc, dst_wl.node_name)
    if ret != api.types.status.SUCCESS:
        api.Logger.error("Failed to validate flows on node %s" %
                         dst_wl.node_name)
        return api.types.status.FAILURE

    return api.types.status.SUCCESS