Esempio n. 1
0
def Main(step):
    res = api.types.status.SUCCESS
    workloads = api.GetLocalWorkloadPairs(True)
    naples_inband_ip = {}
    ip_offset = 100
    new_controller_ip = None
    for wl in workloads:
        w1 = wl[0]
        w2 = wl[1]

        if (w1.uplink_vlan != 0):
            continue

        if naples_inband_ip.get(w1.node_name) != None:
            continue

        ip_mask = w1.ip_prefix.split('/')[1]
        ip_addr = ip.IPv4Interface(w1.ip_prefix)

        w1_ip_intf = ip.IPv4Interface(str(w1.ip_prefix))
        #For inband collector config, agent always resolves thru first ip addr on the inband mgmt i/f.
        #so forcing to use 10.x.x.x network as inband mgmt ip for baremetal workloads
        if api.IsBareMetalWorkloadType(w1.node_name):
            if (str(w1_ip_intf.network).split('/')[0]) != "10.255.0.0":
                continue

        #chose ip_offset as the IP address for host in-band mgmt ip
        new_naples_inband_ip = ip.IPv4Interface(ip_addr.ip + ip_offset)
        ip_offset += 1
        if new_controller_ip == None:
            new_controller_ip = ip.IPv4Interface(ip_addr.ip + ip_offset)
            ip_offset += 1

        new_naples_inband_ip_str = str(
            new_naples_inband_ip.ip) + "/" + str(ip_mask)

        node = w1.node_name
        api.Logger.info(
            "Configuring Naples Inband IP {}".format(new_naples_inband_ip_str))
        ret = common.SetNaplesModeInband_Static(node,
                                                str(new_controller_ip.ip),
                                                new_naples_inband_ip_str)
        if ret == None:
            api.Logger.info("Failed to set Naples Inband mode IP: {}".format(
                new_naples_inband_ip_str))
            res = api.types.status.FAILURE
        #api.Logger.info("Naples {}".format(common.PenctlGetNaplesMgtmIp(node)))

        naples_inband_ip.update({w1.node_name: new_naples_inband_ip_str})
        #break

    return res
Esempio n. 2
0
def Setup(tc):
    if tc.args.type == 'local_only':
        tc.workload_pairs = api.GetLocalWorkloadPairs()
    else:
        tc.workload_pairs = api.GetRemoteWorkloadPairs()

    for pair in tc.workload_pairs:
        w1 = pair[0]
        w2 = pair[1]
        # check if BM type
        if api.IsBareMetalWorkloadType(w1.node_name):
            api.Logger.info("For BM type, this test is not supported yet")
            tc.skip = True
            break

    return api.types.status.SUCCESS
Esempio n. 3
0
def Setup(tc):
    tc.cmd_cookies = []
    tc.interval = 0.1
    tc.resp = None
    tc.skip = False
    tc.uplink_fail_stage = False

    tc.nodes = api.GetNaplesHostnames()
    tc.is_bm_type = False
    for node in tc.nodes:
        if api.IsBareMetalWorkloadType(node):
            # for baremetal topology, untag WL is configured on the uplink itself.
            # shutting down uplink on both nodes will bring down the WL.
            # so bring down uplink only on one naples
            tc.nodes = [node]
            tc.is_bm_type = True
            break

    tc.port_down_time = getattr(tc.args, "port_shut_time", 60)
    tc.failover_delay = getattr(tc.args, "failover_delay", 0)

    result = bond_utils.DetectUpLinkState(tc.nodes,
                                          bond_utils.PORT_OPER_STATUS_UP, all)
    if result != api.types.status.SUCCESS:
        api.Logger.error("All uplink on %s are not in UP state." % tc.nodes)
        tc.skip = True
        return api.types.status.SUCCESS

    api.Logger.info("All uplink on %s are UP!" % tc.nodes)

    # Bring up inband and reset the active link on bond.
    result = bond_utils.SetupInbandInterface()
    if result != api.types.status.SUCCESS:
        return result

    trigger_ping_to_bond(tc)

    result = verify_ping_to_bond_result(tc)
    if result != api.types.status.SUCCESS:
        api.Logger.error("Ping failed during setup stage")

    return result
Esempio n. 4
0
def Setup(tc):
    global is_wl_type_bm
    global wl_sec_ip_info
    tc.workload_pairs = api.GetRemoteWorkloadPairs()
    tc.workloads = api.GetWorkloads()

    for wl in tc.workloads:
        wl_sec_ip_info[wl.workload_name] = []
        # for BM type set untag collector
        if api.IsBareMetalWorkloadType(wl.node_name):
            is_wl_type_bm = True
        sec_ip_list = sec_ip_api.ConfigWorkloadSecondaryIp(wl, True, 2)
        wl_sec_ip_info[wl.workload_name] = sec_ip_list

    #for wl in tc.workloads:
    #    for sec_ip in wl_sec_ip_info[wl.workload_name]:
    #        api.Logger.info("Node: {} WL: {} Sec IP: {}".format(wl.node_name, wl.workload_name, sec_ip))

    #tc.skip = True
    return api.types.status.SUCCESS
Esempio n. 5
0
def Setup(tc):
    #
    # Set up global variables
    #
    tc.feature = 'lif-flow-erspan-flowmon'
    tc.udp_count = NUMBER_OF_UDP_ERSPAN_PACKETS_PER_SESSION >> 1
    tc.icmp_count = NUMBER_OF_ICMP_ERSPAN_PACKETS_PER_SESSION >> 1

    tc.tcp_close_val = None
    tc.classic_mode = False
    tc.ignore = False
    tc.error = False

    #
    # Establish Workloads
    #
    result = eutils.establishWorkloads(tc)
    if result != api.types.status.SUCCESS:
        tc.error = True
        return result

    #
    # Check to see if Naples-hosted node is present
    #
    result = eutils.establishNaplesWorkload(tc)
    if result != api.types.status.SUCCESS:
        tc.error = True
        return result

    #
    # Ignore non-applicable test-options in Sanity mode
    # - [peer = local] is not supported until ps_2263 is fixed
    # - [collector = local] is not supported until ps_2790 is fixed
    # - Multi-collector testing is not enabled in freebsd
    #   environment until tcpdump capture on secondary-IP is resolved
    #
    if (tc.args.ps_2263 != 'fixed' and tc.iterators.peer == 'local') or\
       (tc.args.ps_2790 != 'fixed' and tc.iterators.collector == 'local') or\
       (api.GetNodeOs(tc.naples.node_name) == 'freebsd' and\
        tc.iterators.ccount > 1):
        tc.ignore = True
        return api.types.status.SUCCESS

    if api.IsBareMetalWorkloadType(tc.naples.node_name):
        tc.classic_mode = True
        tc.udp_count = 1
        tc.icmp_count = 1

    #
    # Figure out (Collector) Workloads that are remote to Naples node and
    # identify remote Naples-peer, if applicable
    #
    result = eutils.establishRemoteWorkloads(tc)
    if result != api.types.status.SUCCESS:
        tc.error = True
        return result

    #
    # Figure out Workloads that are local to Naples node and identify
    # local Naples-peer, if applicable
    #
    result = eutils.establishLocalWorkloads(tc)
    if result != api.types.status.SUCCESS:
        tc.error = True
        return result

    if tc.classic_mode == True:
        result = eutils.establishCollectorWorkloadInClassicMode(
            tc, '10.255.0.2')
        if result != api.types.status.SUCCESS:
            tc.error = True
            return result

    #
    # Allocate Secondary-IPs for collectors as needed in order to test up to
    # Eight collectors
    #
    result = eutils.establishCollectorSecondaryIPs(tc)
    if result != api.types.status.SUCCESS:
        tc.error = True
        return result

    #
    # Generate feature specific Collector list
    #
    eutils.generateFeatureCollectorList(tc)

    eutils.debugWorkLoadTraces(tc)

    #
    # Retrieve relevant Table-Id's
    #
    eutils.retrieveTableIds(tc)

    #
    # Set up runtime validation knobs
    # - dupcheck:   To circumvent duplicate-pkts checks in case of LIF-ERSPAN
    #               where multiple workloads map to the same LIF
    #
    tc.dupcheck = 'enable'
    if tc.iterators.peer == 'local' or tc.iterators.iftype != 'uplink':
        tc.dupcheck = 'disable'

    #
    # Preserve current TCP-Close configs and shrink it to 1-second
    #
    if tc.classic_mode == False:
        tc.tcp_close_val = get_timeout_val('tcp-close')
        update_timeout('tcp-close', "1s")

    return api.types.status.SUCCESS
Esempio n. 6
0
def Setup(tc):
    #
    # Set up global variables
    #
    tc.feature = 'endpoint-span'
    tc.protocol = 'all'
    tc.udp_count = 1
    tc.icmp_count = 1

    tc.tcp_close_val = None
    tc.classic_mode = False
    tc.ignore = False
    tc.error = False

    #
    # Establish Workloads
    #
    result = eutils.establishWorkloads(tc)
    if result != api.types.status.SUCCESS:
        tc.error = True
        return result

    #
    # Check to see if Naples-hosted node is present
    #
    result = eutils.establishNaplesWorkload(tc)
    if result != api.types.status.SUCCESS:
        tc.error = True
        return result

    #
    # Ignore non-applicable test-options in Sanity mode
    # - Multi-collector testing is not enabled in freebsd
    #   environment until tcpdump capture on secondary-IP is resolved
    # - Multi-collector testing is limited to 2 in esx
    #   environment until secondary-IP works in esx mode
    #
    if (api.GetNodeOs(tc.naples.node_name) == 'freebsd' and\
        tc.iterators.ccount > 1) or\
       (api.GetNodeOs(tc.naples.node_name) == 'esx' and\
        tc.iterators.ccount > 2):
        tc.ignore = True
        return api.types.status.SUCCESS

    if api.IsBareMetalWorkloadType(tc.naples.node_name):
        tc.classic_mode = True

    #
    # Figure out (Collector) Workloads that are remote to Naples node and
    # identify remote Naples-peer, if applicable
    #
    result = eutils.establishRemoteWorkloads(tc)
    if result != api.types.status.SUCCESS:
        tc.error = True
        return result

    #
    # Figure out Workloads that are local to Naples node and identify
    # local Naples-peer, if applicable
    #
    result = eutils.establishLocalWorkloads(tc)
    if result != api.types.status.SUCCESS:
        tc.error = True
        return result
    '''
    # endpoint span is not supported in classic mode
    if tc.classic_mode == True:
        result = eutils.establishCollectorWorkloadInClassicMode(tc, 
                                                                '10.255.0.2')
        if result != api.types.status.SUCCESS:
            tc.error = True
            return result
    '''

    #
    # Allocate Secondary-IPs for collectors as needed in order to test up to
    # Eight collectors
    #
    result = eutils.establishCollectorSecondaryIPs(tc)
    if result != api.types.status.SUCCESS:
        tc.error = True
        return result

    #
    # Generate feature specific Collector list
    #
    eutils.generateFeatureCollectorList(tc)

    eutils.debugWorkLoadTraces(tc)

    #
    # Retrieve relevant Table-Id's
    #
    eutils.retrieveTableIds(tc)

    #
    # Set up runtime validation knobs
    # - dupcheck:   To circumvent duplicate-pkts checks in case of LIF-ERSPAN
    #               where multiple workloads map to the same LIF
    #
    tc.dupcheck = 'enable'
    if tc.iterators.peer == 'local':
        tc.dupcheck = 'disable'

    #
    # Preserve current TCP-Close configs and shrink it to 1-second
    #
    if tc.classic_mode == False:
        tc.tcp_close_val = get_timeout_val('tcp-close')
        update_timeout('tcp-close', "1s")

    #Query will get the reference of objects on store
    nw_filter = "meta.name=" + tc.naples.workload_name + ";"
    tc.store_endpoint_objects = netagent_cfg_api.QueryConfigs(kind='Endpoint',
                                                              filter=nw_filter)
    if len(tc.store_endpoint_objects) == 0:
        api.Logger.error("No Endpoint objects in store")
        tc.ignore = True
        return api.types.status.FAILURE

    #api.Logger.info("Len of endpoint_objects {}".format(len(tc.store_endpoint_objects)))
    #cfg_api.PrintConfigsObjects(tc.store_endpoint_objects)
    #api.Logger.info("Naples selected for test node: {} WL: {}".format(tc.naples.node_name, tc.naples.workload_name))

    return api.types.status.SUCCESS
Esempio n. 7
0
def Trigger(tc):
    if tc.skip: return api.types.status.SUCCESS

    tc.serverCmds = []
    tc.clientCmds = []
    tc.cmd_descr = []

    fuzClients = {}
    fuzServers = {}
    clientReq = api.Trigger_CreateAllParallelCommandsRequest()
    serverReq = api.Trigger_CreateAllParallelCommandsRequest()
    clientArp = defaultdict(lambda: {})

    sip_dip_cache = dict()
    server_key_cache = dict()

    def __sip_dip_key(sip, dip):
        return sip + ":" + dip

    def __server_key(server_ip, port):
        return server_ip + ":" + str(port)

    for idx, pairs in enumerate(tc.workload_pairs):
        client = pairs[0]
        server = pairs[1]
        port = None
        try:
            port = int(pairs[2])
        except:
            port = api.AllocateTcpPort()

        server_key = __server_key(server.ip_address, port)
        sip_dip_key = __sip_dip_key(client.ip_address, server.ip_address)
        if sip_dip_key in sip_dip_cache:
            #Already added, ignore for this workload pair
            continue
        sip_dip_cache[sip_dip_key] = True

        cmd_descr = "Server: %s(%s) <--> Client: %s(%s)" %\
                       (server.workload_name, server.ip_address, client.workload_name, client.ip_address)
        tc.cmd_descr.append(cmd_descr)
        num_sessions = int(getattr(tc.args, "num_sessions", 1))
        api.Logger.info("Starting Fuz test from %s num-sessions %d" %
                        (cmd_descr, num_sessions))

        fuzClient = fuzClients.get(client.workload_name, None)
        if not fuzClient:
            fuzClient = FuzContext(client.workload_name, client.interface,
                                   client.node_name)
            fuzClients[client.workload_name] = fuzClient

        fuzClient.AddServer(server.ip_address, port)

        #Just start once instance of server for the combination
        if server_key not in server_key_cache:
            server_key_cache[server_key] = True
            #Combine baremetal workloads
            if api.IsBareMetalWorkloadType(server.node_name):
                fuzServer = fuzServers.get(server.node_name, None)
                if not fuzServer:
                    fuzServer = FuzContext(server.workload_name,
                                           server.interface, server.node_name)
                    fuzServers[server.node_name] = fuzServer
                else:
                    fuzServer.AddInterface(server.interface)
                fuzServer.AddServer(server.ip_address, port)
            else:
                serverCmd = FUZ_EXEC[server.workload_name] + " -port " + str(
                    port)
                api.Trigger_AddCommand(serverReq,
                                       server.node_name,
                                       server.workload_name,
                                       serverCmd,
                                       background=True,
                                       stdout_on_err=True,
                                       stderr_on_err=True)

        #For now add static arp
        if api.IsBareMetalWorkloadType(client.node_name):
            if not clientArp[client.node_name].get(server.ip_address, None):
                clientArp[client.node_name][
                    server.ip_address] = server.mac_address
                arp_cmd = "arp -s " + server.ip_address + " " + server.mac_address
                api.Trigger_AddCommand(clientReq, client.node_name,
                                       client.workload_name, arp_cmd)
        else:
            if not clientArp[client.workload_name].get(server.ip_address,
                                                       None):
                clientArp[client.workload_name][
                    server.ip_address] = server.mac_address
                arp_cmd = "arp -s " + server.ip_address + " " + server.mac_address
                api.Trigger_AddCommand(clientReq, client.node_name,
                                       client.workload_name, arp_cmd)

    store = tc.GetBundleStore()
    store["server_req"] = serverReq
    store["client_ctxts"] = fuzClients
    store["server_ctxts"] = fuzServers
    store["arp_ctx"] = clientReq

    return api.types.status.SUCCESS
Esempio n. 8
0
def ConfigWorkloadSecondaryIp(workload, is_add, sec_ip_count_per_intf=1):
    res = api.types.status.SUCCESS
    wl_sec_ip_list = []
    if (workload.uplink_vlan != 0):
        return wl_sec_ip_list

    nodes = api.GetWorkloadNodeHostnames()
    max_untag_wl = 0
    max_tag_wl = 0
    if is_add == True:
        op = "add"
    else:
        op = "del"

    is_wl_bm_type = False
    for node in nodes:
        if api.IsBareMetalWorkloadType(node):
            is_wl_bm_type = True
        workloads = api.GetWorkloads(node)
        num_untag_wl_in_node = 0
        num_tag_wl_in_node = 0
        for wl in workloads:
            if (wl.uplink_vlan == 0):
                num_untag_wl_in_node += 1
            else:
                num_tag_wl_in_node += 1
        if num_untag_wl_in_node > max_untag_wl:
            max_untag_wl = num_untag_wl_in_node
        if num_tag_wl_in_node > max_tag_wl:
            max_tag_wl = num_tag_wl_in_node
        #api.Logger.info("Node {} WL #untag {} #tag {} ".format(node, num_untag_wl_in_node, num_tag_wl_in_node))

    #api.Logger.info("Topo Max untag WL {} Max tag WL {} ".format(max_untag_wl, max_tag_wl))
    sec_ip_incr_step = max_untag_wl
    if is_wl_bm_type == False:
        sec_ip_incr_step += max_tag_wl

    req = api.Trigger_CreateExecuteCommandsRequest(serial=True)

    wl = workload
    sec_ipv4_allocator = resmgr.IpAddressStep(
        wl.ip_prefix.split('/')[0],
        str(ipaddress.IPv4Address(sec_ip_incr_step)), sec_ip_count_per_intf)
    sec_ip4_addr_str = str(sec_ipv4_allocator.Alloc())
    for i in range(sec_ip_count_per_intf):
        sec_ip4_addr_str = str(sec_ipv4_allocator.Alloc())
        sec_ip_prefix = sec_ip4_addr_str + "/" + str(
            wl.ip_prefix.split('/')[1])
        wl_sec_ip_list.append(sec_ip4_addr_str)
        #api.Logger.info("Node {} WL {} Intf {} Secondary IP {} Op {}".format(
        #                 wl.node_name, wl.workload_name, wl.interface, sec_ip_prefix, op))
        api.Trigger_AddCommand(
            req, wl.node_name, wl.workload_name,
            "ip address %s %s dev %s " % (op, sec_ip_prefix, wl.interface))
    trig_resp = api.Trigger(req)

    #api.Logger.info("Response ")
    #for cmd in trig_resp.commands:
    #    api.PrintCommandResults(cmd)

    return wl_sec_ip_list
Esempio n. 9
0
def IsBareMetal():
    for node_name in api.GetNaplesHostnames():
        if api.IsBareMetalWorkloadType(node_name):
            return True
    return False