def Setup(tc): api.Logger.verbose("BC MAC filter : Setup") tc.skip = False result = api.types.status.SUCCESS tc.skip, tc.workloads, tc.naples_node = filters_utils.getNaplesNodeandWorkloads( ) if tc.skip: api.Logger.error( "BC MAC filter : Setup -> No Naples Topology - So skipping the TC") return api.types.status.IGNORED tc.device_data = defaultdict() tc.arping_count = __ARPING_COUNT for device_name in api.GetDeviceNames(tc.naples_node): data = parser.Dict2Object({}) intfName2lifId_dict = hal_show_utils.GetIntfName2LifId_mapping( tc.naples_node, device_name) intf_pktfilter_list = getInterfaceList(tc.naples_node, device_name) statsCount, preStatsCount, postStatsCount = getStatObjects( tc, device_name) setattr(data, 'intfName2lifId_dict', intfName2lifId_dict) setattr(data, 'intf_pktfilter_list', intf_pktfilter_list) setattr(data, 'statsCount', statsCount) setattr(data, 'preStatsCount', preStatsCount) setattr(data, 'postStatsCount', postStatsCount) tc.device_data[device_name] = data api.Logger.info("BC MAC filter : Setup final result - ", result) debug_utils.collect_showtech(result) return result
def Trigger(tc): api.Logger.verbose("UC MAC filter : Trigger") result = api.types.status.SUCCESS if tc.skip: return api.types.status.IGNORED if tc.iterators.mac_change: result = changeMacAddrTrigger(tc) time.sleep(5) api.Logger.debug( "UC MAC filter : Trigger -> Change MAC addresses result ", result) else: api.Logger.debug( "UC MAC filter : Trigger -> NO Change to MAC addresses") # Triggers done - Now build endpoint view of Host and Naples for device_name in api.GetDeviceNames(tc.naples_node): data = tc.device_data[device_name] wload_ep_set, host_ep_set, naples_ep_set, hal_ep_set = getAllEndPointsView( tc, device_name) setattr(data, 'wload_ep_set', wload_ep_set) setattr(data, 'host_ep_set', host_ep_set) setattr(data, 'naples_ep_set', naples_ep_set) setattr(data, 'hal_ep_set', hal_ep_set) tc.cmd_cookies, tc.resp = traffic_utils.pingAllRemoteWloadPairs( mtu=tc.iterators.pktsize, af=str(tc.iterators.ipaf)) api.Logger.info("UC MAC filter : Trigger final result - ", result) debug_utils.collect_showtech(result) return result
def Verify(tc): ''' #TODO 1. any HW table dump to check? - Not needed apparently Ideally, We can also check registered MAC table as follows halctl show table dump --entry-id 1840 --table-id 4 Get the entry id from "halctl show multicast --yaml" 2. Check for memleaks [will pull from Amrita's TC] 2.1 "halctl show system memory slab | grep mc_entry" ''' api.Logger.verbose("MC MAC filter : Verify") result = api.types.status.SUCCESS if tc.skip: return api.types.status.IGNORED # Check if MACs in "halctl show multicast" match with host & naples interface mcast MAC if not verifyMCEndPoints(tc): api.Logger.error("MC MAC filter : Verify failed for verifyMCEndPoints") result = api.types.status.FAILURE else: api.Logger.debug("MC MAC filter : Verify - verifyMCEndPoints SUCCESS ") # Check multicast traffic stats if not verifyMCTrafficStats(tc): api.Logger.error("MC MAC filter : Verify failed for verifyMCTrafficStats ") result = api.types.status.FAILURE else: api.Logger.debug("MC MAC filter : Verify - verifyMCTrafficStats SUCCESS ") api.Logger.info("MC MAC filter : Verify final result - ", result) debug_utils.collect_showtech(result) return result
def Setup(tc): api.Logger.verbose("MC MAC filter : Setup") tc.skip = False result = api.types.status.SUCCESS if not api.RunningOnSameSwitch(): tc.skip = True api.Logger.error("MC MAC filter : Setup -> Multi switch topology not supported yet - So skipping the TC") return api.types.status.IGNORED tc.skip, tc.workloads, tc.naples_node = filters_utils.getNaplesNodeandWorkloads() if tc.skip: api.Logger.error("MC MAC filter : Setup -> No Naples Topology - So skipping the TC") return api.types.status.IGNORED tc.device_data = defaultdict() for device_name in api.GetDeviceNames(tc.naples_node): data = parser.Dict2Object({}) intfName2lifId_dict = hal_show_utils.GetIntfName2LifId_mapping(tc.naples_node) statsCount, preStatsCount, postStatsCount = getStatObjects(tc.naples_node, device_name) setattr(data, 'intfName2lifId_dict', intfName2lifId_dict) setattr(data, 'statsCount', statsCount) setattr(data, 'preStatsCount', preStatsCount) setattr(data, 'postStatsCount', postStatsCount) tc.device_data[device_name] = data api.Logger.info("MC MAC filter : Setup final result - ", result) debug_utils.collect_showtech(result) return result
def Teardown(tc): api.Logger.verbose("BC MAC filter : Teardown") result = api.types.status.SUCCESS if tc.skip: return api.types.status.IGNORED api.Logger.info("BC MAC filter : Teardown final result - ", result) debug_utils.collect_showtech(result) return result
def Teardown(tc): api.Logger.verbose("MTU filter : Teardown") if tc.skip: return api.types.status.IGNORED #rollback workloads MTU result = changeWorkloadIntfMTU(tc, __DEF_MTU) if result is not api.types.status.SUCCESS: api.Logger.error("MTU filter : rollback failed for changeWorkloadIntfMTU ", result) api.Logger.info("MTU filter : Teardown final result - ", result) debug_utils.collect_showtech(result) return result
def Verify(tc): ''' #TODO 1. Check for memleaks, maybe? Can that be made as common verif step? ''' api.Logger.verbose("BC MAC filter : Verify") result = api.types.status.SUCCESS if tc.skip: return api.types.status.IGNORED # Check if all interfaces have broadcast filter enabled on them for device_name in api.GetDeviceNames(tc.naples_node): data = tc.device_data[device_name] if not filters_utils.verifyPktFilters(data.intf_pktfilter_list, data.intf_pktfilter_dict, True): api.Logger.error( "BC MAC filter : Verify failed for verifyBCPktFilters ", data.intf_pktfilter_dict) result = api.types.status.FAILURE else: api.Logger.debug( "BC MAC filter : Verify - verifyBCPktFilters SUCCESS ") # Check broadcast traffic stats if not verifyBCTrafficStats(tc): api.Logger.error( "BC MAC filter : Verify failed for verifyBCTrafficStats ") result = api.types.status.FAILURE else: api.Logger.debug( "BC MAC filter : Verify - verifyBCTrafficStats SUCCESS ") if tc.resp is None: api.Logger.error("BC MAC filter : Verify failed - no response") return api.types.status.FAILURE cookie_idx = 0 for cmd in tc.resp.commands: #api.Logger.info("BC MAC filter : Results for %s" % (tc.cmd_cookies[cookie_idx])) #api.PrintCommandResults(cmd) if cmd.exit_code != 0 and not api.Trigger_IsBackgroundCommand(cmd): api.Logger.error("BC MAC filter : Verify failed for %s" % (tc.cmd_cookies[cookie_idx])) api.PrintCommandResults(cmd) result = api.types.status.FAILURE cookie_idx += 1 api.Logger.info("BC MAC filter : Verify final result - ", result) debug_utils.collect_showtech(result) return result
def Teardown(tc): api.Logger.verbose("UC MAC filter : Teardown") result = api.types.status.SUCCESS if tc.skip: return api.types.status.IGNORED if tc.iterators.mac_change: result = changeMacAddrTrigger(tc, True) api.Logger.debug( "UC MAC filter : Teardown -> rolling back MAC address changes ", result) else: api.Logger.debug("UC MAC filter : Teardown -> NO rollback") api.Logger.info("UC MAC filter : Teardown final result - ", result) debug_utils.collect_showtech(result) return result
def Trigger(tc): api.Logger.verbose("BC MAC filter : Trigger") result = api.types.status.SUCCESS if tc.skip: return api.types.status.IGNORED #get Packet Filters for device_name in api.GetDeviceNames(tc.naples_node): data = tc.device_data[device_name] intf_pktfilter_dict, res = filters_utils.getAllIntfPktFilter( tc.naples_node, device_name) if not res: api.Logger.error( "BC MAC filter : Trigger failed for getAllIntfPktFilter ", res) result = api.types.status.FAILURE debug_utils.collect_showtech(result) return result setattr(data, 'intf_pktfilter_dict', intf_pktfilter_dict) if api.GetNodeOs(tc.naples_node) != "windows": #first, find the right arp (we rely on -W option) tc.req = api.Trigger_CreateExecuteCommandsRequest(serial=False) ArpingPrefix = "/usr/sbin/" api.Trigger_AddHostCommand(tc.req, tc.naples_node, "ls %sarping" % ArpingPrefix) resp = api.Trigger(tc.req) for cmd in resp.commands: api.Logger.info("ls %sarping output stdout: %s, stderr: %s" % (ArpingPrefix, cmd.stdout, cmd.stderr)) if cmd.stderr.find("No such file or directory") != -1: ArpingPrefix = "" api.Logger.info("Using the default arping") else: api.Logger.info("Using %sarping" % ArpingPrefix) api.Logger.info("Using the following: %s arping" % ArpingPrefix) tc.ArpingPrefix = ArpingPrefix #Trigger arping and get interface BC stats pre & post trigger triggerBCtraffic(tc) api.Logger.info("BC MAC filter : Trigger final result - ", result) debug_utils.collect_showtech(result) return result
def Setup(tc): api.Logger.verbose("MTU filter : Setup") tc.skip = False result = api.types.status.SUCCESS global __OS_TYPE global __MIN_MTU global __MAX_MTU tc.naples_node, res = naples_host_utils.GetNaplesNodeName() if res is False: tc.skip = True if tc.skip: api.Logger.error("MTU filter : Setup -> No Naples Topology - So skipping the TC") return api.types.status.IGNORED if getNativeWorkloadIntfs(tc) != api.types.status.SUCCESS: api.Logger.error("MTU filter : Setup -> Failure in retrieving Native Workload interfaces") return api.types.status.FAILURE """ # In Intel cards, post MTU change, need to wait for few sec before pinging # instead, set max MTU on peer node """ result = initPeerNode(tc, tc.naples_node) nodes = api.GetWorkloadNodeHostnames() for node in nodes: __OS_TYPE = api.GetNodeOs(node) if __OS_TYPE == "freebsd": __MIN_MTU = __MIN_MTU_FREEBSD elif __OS_TYPE == "windows": __MIN_MTU = __MIN_MTU_WINDOWS_IPv4 __MAX_MTU = __MAX_MTU_WINDOWS break tc.new_mtu = getMTUconfigs(tc) api.Logger.info("MTU filter : new MTU - ", tc.new_mtu) api.Logger.info("MTU filter : Setup final result - ", result) debug_utils.collect_showtech(result) return result
def Trigger(tc): api.Logger.verbose("MTU filter : Trigger") result = api.types.status.SUCCESS if tc.skip: return api.types.status.IGNORED #change workloads MTU result = changeWorkloadIntfMTU(tc, tc.new_mtu, tc.naples_node) if result is not api.types.status.SUCCESS: api.Logger.error("MTU filter : Trigger failed for changeWorkloadIntfMTU ", result) debug_utils.collect_showtech(result) return result # check MTU change, windows needs it, while it doesn't hurt on any platform result = verifyMTUchange(tc) if result is not api.types.status.SUCCESS: api.Logger.error("MTU filter : Verify failed for verifyMTUchange") debug_utils.collect_showtech(result) return result #Trigger ping across all remote workload pairs triggerMTUPings(tc) api.Logger.info("MTU filter : Trigger final result - ", result) debug_utils.collect_showtech(result) return result
def Verify(tc): ''' #TODO 1. any HW table dump to check? - Not needed apparently Ideally, We can also check registered MAC table as follows halctl show table dump --entry-id 1840 --table-id 4 Get the entry id from "halctl show endpoint --yaml" 2. Check for memleaks [will pull from Amrita's TC] 2.1 "halctl show system memory slab | grep ep" ''' api.Logger.verbose("UC MAC filter : Verify") result = api.types.status.SUCCESS if tc.skip: return api.types.status.IGNORED cookie_idx = 0 # Check if MACs in "halctl show endpoint" match with host & workload interface MAC if not verifyEndPoints(tc): api.Logger.error("UC MAC filter : Verify failed for verifyEndPoints") result = api.types.status.FAILURE debug_utils.collect_showtech(result) return result else: api.Logger.debug("UC MAC filter : Verify - verifyEndPoints SUCCESS ") """ # If "filters" bundle run as first bundle in a suite, # there are chances that STP might not have converged yet and as a result of that # first couple of remote workload pings might fail. # Solutions are 1. Modify n3k configs for reduced STP convergence 2. Increase wait time between pings 3. Add sleep before starting the trigger # Last 2 solutions mean increase in script execution time which is a big NO. # so leaving it for now as this will not happen in sanity """ result = traffic_utils.verifyPing(tc.cmd_cookies, tc.resp) api.Logger.info("UC MAC filter : Verify final result - ", result) debug_utils.collect_showtech(result) return result
def Trigger(tc): api.Logger.verbose("MC MAC filter : Trigger") result = api.types.status.SUCCESS if tc.skip: return api.types.status.IGNORED # Triggers done - Now build endpoint view of Host and Naples for device_name in api.GetDeviceNames(tc.naples_node): data = tc.device_data[device_name] host_mc_ep_set, naples_mc_ep_set, hal_mc_ep_set = getAllmcastEndPointsView(tc.naples_node, device_name) api.Logger.debug("getAllmcastEndPointsView: host_mc_ep_set ", len(host_mc_ep_set), host_mc_ep_set) api.Logger.debug("getAllmcastEndPointsView: naples_mc_ep_set ", len(naples_mc_ep_set), naples_mc_ep_set) api.Logger.debug("getAllmcastEndPointsView: hal_mc_ep_set ", len(hal_mc_ep_set), hal_mc_ep_set) setattr(data, 'host_mc_ep_set', host_mc_ep_set) setattr(data, 'naples_mc_ep_set', naples_mc_ep_set) setattr(data, 'hal_mc_ep_set', hal_mc_ep_set) # Trigger multicast Traffic result = triggerMCtraffic(tc) api.Logger.info("MC MAC filter : Trigger final result - ", result) debug_utils.collect_showtech(result) return result
def Verify(tc): api.Logger.verbose("MTU filter : Verify") if tc.skip: return api.types.status.IGNORED result = verifyMTUchange(tc) if result is not api.types.status.SUCCESS: api.Logger.error("MTU filter : Verify failed for verifyMTUchange") debug_utils.collect_showtech(result) return result result = verifyMTUPings(tc) if result is not api.types.status.SUCCESS: api.Logger.error("MTU filter : Verify failed for verifyMTUPings") debug_utils.collect_showtech(result) return result api.Logger.info("MTU filter : Verify final result - ", result) debug_utils.collect_showtech(result) return result
def Setup(tc): api.Logger.verbose("UC MAC filter : Setup") tc.skip = False result = api.types.status.SUCCESS tc.skip, tc.workloads, naples_node = filters_utils.getNaplesNodeandWorkloads( ) if tc.skip: api.Logger.error( "UC MAC filter : Setup -> No Naples Topology - So skipping the TC") return api.types.status.IGNORED tc.device_data = defaultdict() tc.naples_node = naples_node for device_name in api.GetDeviceNames(naples_node): #Get MAC address of all the naples' interfaces for rollback later naples_intf_mac_dict = filters_utils.getNaplesIntfMacAddrDict( naples_node, device_name) #Get workload interfaces for rollback later wloadIntf_list, wload_intf_vlan_map = getNaplesWorkloadInfo( naples_node, device_name) #Keep a dict of (wloadIntf, mac) pair for rollback later wload_intf_mac_dict = dict() #Get MAC address of all the workload interfaces for intf in wloadIntf_list: intf_mac_addr = host_utils.GetMACAddress(naples_node, intf) wload_intf_mac_dict.update({intf: intf_mac_addr}) host_intf_mac_dict = dict() #Get MAC address of all the interfaces on the host of Naples (except the workload interfaces) host_intf_list = filters_utils.GetNaplesHostInterfacesList( naples_node, device_name) api.Logger.verbose("UC MAC filter : Setup host_intf_list : ", host_intf_list) for intf in host_intf_list: if intf not in wload_intf_mac_dict: intf_mac_addr = host_utils.GetMACAddress(naples_node, intf) host_intf_mac_dict.update({intf: intf_mac_addr}) #Store the following info for building endpoint view and also for rollback data = parser.Dict2Object({}) setattr(data, 'naples_intf_mac_dict', naples_intf_mac_dict) setattr(data, 'host_intf_mac_dict', host_intf_mac_dict) setattr(data, 'wload_intf_mac_dict', wload_intf_mac_dict) setattr(data, 'wload_intf_vlan_map', wload_intf_vlan_map) api.Logger.debug("UC MAC filter : Setup naples_intf_mac_dict : ", data.naples_intf_mac_dict) api.Logger.debug("UC MAC filter : Setup host_intf_mac_dict : ", data.host_intf_mac_dict) api.Logger.debug("UC MAC filter : Setup wload_intf_mac_dict : ", data.wload_intf_mac_dict) api.Logger.debug("UC MAC filter : Setup wload_intf_vlan_map : ", data.wload_intf_vlan_map) tc.device_data[device_name] = data api.Logger.info("UC MAC filter : Setup final result - ", result) debug_utils.collect_showtech(result) return result