def triggerBCtraffic(tc): workload_pairs = tc.workloads naples_node = tc.naples_node #Get stats before trigger for device_name in api.GetDeviceNames(naples_node): data = tc.device_data[device_name] GetBCFramesTxRxStats(naples_node, device_name, data.preStatsCount, data.intfName2lifId_dict) #trigger tc.req = api.Trigger_CreateExecuteCommandsRequest(serial=False) tc.cmd_cookies = [] for pairs in workload_pairs: #trigger arping in both directions triggerArping(pairs[0], pairs[1], tc) triggerArping(pairs[1], pairs[0], tc) trig_resp = api.Trigger(tc.req) term_resp = api.Trigger_TerminateAllCommands(trig_resp) tc.resp = api.Trigger_AggregateCommandsResponse(trig_resp, term_resp) #Get stats after trigger for device_name in api.GetDeviceNames(naples_node): data = tc.device_data[device_name] GetBCFramesTxRxStats(naples_node, device_name, data.postStatsCount, data.intfName2lifId_dict) return
def GetTables(node_name, device_name=None): if not device_name: dev_names = api.GetDeviceNames(node_name) device_name = dev_names[0] def __GetTables(): tables = [] marker = "---" out = RunP4ctlCmd_LIST(node_name, "--out_json", print_result=False, device_name=device_name) if not out: return tables try: s = out.split(marker) if len(s) != 2: api.Logger.error( "Make sure there is only one occurance of marker: '%s'" % marker) return tables metadata = json.loads(s[1]) for table in metadata: tables.append(table['name']) except Exception as e: traceback.print_exc() api.Logger.error("Failed to parse the output: %s" % e) return tables global __tables_cache if (node_name, device_name) not in __tables_cache: __tables_cache[(node_name, device_name)] = __GetTables() return __tables_cache[(node_name, device_name)]
def Trigger(tc): api.Logger.verbose("UC MAC filter : Trigger") result = api.types.status.SUCCESS if tc.skip: return api.types.status.IGNORED if tc.iterators.mac_change: result = changeMacAddrTrigger(tc) time.sleep(5) api.Logger.debug( "UC MAC filter : Trigger -> Change MAC addresses result ", result) else: api.Logger.debug( "UC MAC filter : Trigger -> NO Change to MAC addresses") # Triggers done - Now build endpoint view of Host and Naples for device_name in api.GetDeviceNames(tc.naples_node): data = tc.device_data[device_name] wload_ep_set, host_ep_set, naples_ep_set, hal_ep_set = getAllEndPointsView( tc, device_name) setattr(data, 'wload_ep_set', wload_ep_set) setattr(data, 'host_ep_set', host_ep_set) setattr(data, 'naples_ep_set', naples_ep_set) setattr(data, 'hal_ep_set', hal_ep_set) tc.cmd_cookies, tc.resp = traffic_utils.pingAllRemoteWloadPairs( mtu=tc.iterators.pktsize, af=str(tc.iterators.ipaf)) api.Logger.info("UC MAC filter : Trigger final result - ", result) debug_utils.collect_showtech(result) return result
def verifyBCTrafficStats(tc): result = True for device_name in api.GetDeviceNames(tc.naples_node): data = tc.device_data[device_name] statsCount = data.statsCount preStatsCount = data.preStatsCount postStatsCount = data.postStatsCount for intf, bcStats in statsCount.items(): #BC Frame stats before trigger preStats = preStatsCount[intf] #BC Frame stats after trigger postStats = postStatsCount[intf] #Expected BC Frame stats increase because of trigger expectedStats = statsCount[intf] #Actual BC Frame stats increase, actualStats = postStats - preStats actualStats = list(map(operator.sub, postStats, preStats)) api.Logger.debug("verifyBCTrafficStats info for ", intf, expectedStats, actualStats, preStats, postStats) # Agent's looking for venice which is generated dhcp bcast pkts which is affecting the counts. # For now failing only if interfaces received less number compared to expected #if actualStats != expectedStats: if all(x < y for x, y in zip(actualStats, expectedStats)): result = False api.Logger.error("verifyBCTrafficStats failed for ", intf, expectedStats, actualStats, preStats, postStats) return result
def Setup(tc): api.Logger.verbose("BC MAC filter : Setup") tc.skip = False result = api.types.status.SUCCESS tc.skip, tc.workloads, tc.naples_node = filters_utils.getNaplesNodeandWorkloads( ) if tc.skip: api.Logger.error( "BC MAC filter : Setup -> No Naples Topology - So skipping the TC") return api.types.status.IGNORED tc.device_data = defaultdict() tc.arping_count = __ARPING_COUNT for device_name in api.GetDeviceNames(tc.naples_node): data = parser.Dict2Object({}) intfName2lifId_dict = hal_show_utils.GetIntfName2LifId_mapping( tc.naples_node, device_name) intf_pktfilter_list = getInterfaceList(tc.naples_node, device_name) statsCount, preStatsCount, postStatsCount = getStatObjects( tc, device_name) setattr(data, 'intfName2lifId_dict', intfName2lifId_dict) setattr(data, 'intf_pktfilter_list', intf_pktfilter_list) setattr(data, 'statsCount', statsCount) setattr(data, 'preStatsCount', preStatsCount) setattr(data, 'postStatsCount', postStatsCount) tc.device_data[device_name] = data api.Logger.info("BC MAC filter : Setup final result - ", result) debug_utils.collect_showtech(result) return result
def get_uplink_stats(port_str, node_name, nic_name=None, stat_type=None): if port_str not in ['Eth1/1', 'Eth1/2']: raise Exception("Invalid port %s", port_str) if nic_name is None: nic_name = api.GetDeviceNames(node_name)[0] if stat_type is None: stat_type = 'FRAMES RX OK' uuid = get_port_uuid(port_str, node_name, nic_name) if not uuid: raise Exception("uuid get failed for port %s", port_str) cmd = "pdsctl show port statistics" show_cmd_substr = "port statistics" args = '--port ' + uuid ret, resp = pdsctl.ExecutePdsctlShowCommand(node_name, nic_name, show_cmd_substr, args, yaml=False) if ret != True: raise Exception("%s command failed" % cmd) if ('API_STATUS_NOT_FOUND' in resp) or ("err rpc error" in resp): raise Exception("GRPC get request failed for %s command" % cmd) for line in resp.splitlines(): if stat_type in line: return int(line.strip().split()[-1]) return 0
def Setup(tc): api.Logger.verbose("MC MAC filter : Setup") tc.skip = False result = api.types.status.SUCCESS if not api.RunningOnSameSwitch(): tc.skip = True api.Logger.error("MC MAC filter : Setup -> Multi switch topology not supported yet - So skipping the TC") return api.types.status.IGNORED tc.skip, tc.workloads, tc.naples_node = filters_utils.getNaplesNodeandWorkloads() if tc.skip: api.Logger.error("MC MAC filter : Setup -> No Naples Topology - So skipping the TC") return api.types.status.IGNORED tc.device_data = defaultdict() for device_name in api.GetDeviceNames(tc.naples_node): data = parser.Dict2Object({}) intfName2lifId_dict = hal_show_utils.GetIntfName2LifId_mapping(tc.naples_node) statsCount, preStatsCount, postStatsCount = getStatObjects(tc.naples_node, device_name) setattr(data, 'intfName2lifId_dict', intfName2lifId_dict) setattr(data, 'statsCount', statsCount) setattr(data, 'preStatsCount', preStatsCount) setattr(data, 'postStatsCount', postStatsCount) tc.device_data[device_name] = data api.Logger.info("MC MAC filter : Setup final result - ", result) debug_utils.collect_showtech(result) return result
def DeleteNMDDb(n): api.Logger.info("Deleting NMD DB.") req = api.Trigger_CreateExecuteCommandsRequest(serial=True) for naples in api.GetDeviceNames(n): api.Trigger_AddNaplesCommand(req, n, "rm -rf /sysconfig/config0/nmd.db", naples=naples) api.Trigger_AddNaplesCommand( req, n, "rm -f /sysconfig/config0/clusterTrustRoots.pem", naples=naples) resp = api.Trigger(req)
def AddPenctlCommand(req, node, cmd, device=None): if device: api.Trigger_AddHostCommand(req, node, __get_pen_ctl_cmd( node, device) + cmd, background=False, timeout=60 * 120) else: for naples in api.GetDeviceNames(node): api.Trigger_AddHostCommand(req, node, __get_pen_ctl_cmd( node, naples) + cmd, background=False, timeout=60 * 120)
def get_classic_node_nic_pairs(): classic_node_nic_pairs = [] for node in api.GetNodes(): for dev_name in api.GetDeviceNames(node.Name()): if api.GetTestbedNicMode(node.Name(), dev_name) == 'classic': classic_node_nic_pairs.append((node.Name(), dev_name)) return classic_node_nic_pairs
def GetHostInterfaces(node, device=None): intfs = [] devices = [device] if device else api.GetDeviceNames(node) for device_name in devices: for intf in api.GetWorkloadNodeHostInterfaces(node, device_name): api.Logger.debug("HostInterface for node:%s interface:%s " % (node, intf)) intfObj = NaplesInterface(node, intf, InterfaceType.HOST, api.GetNodeOs(node), device_name) intfs.append(intfObj) return intfs
def GetHostInternalMgmtInterfaces(node, device=None): intfs = [] devices = [device] if device else api.GetDeviceNames(node) for device_name in devices: for intf in naples_host.GetHostInternalMgmtInterfaces( node, device_name): intfObj = NaplesInterface(node, intf, InterfaceType.HOST_INTERNAL, api.GetNodeOs(node), device_name) intfs.append(intfObj) api.Logger.debug("HostInternalMgmtInterfaces for node: ", node, intfs) return intfs
def GetNaplesInbandInterfaces(node, device=None): if not api.IsNaplesNode(node): return [] intfs = [] devices = [device] if device else api.GetDeviceNames(node) for device_name in devices: for intf in naples_host.GetNaplesInbandInterfaces(node, device_name): intfObj = NaplesInterface(node, intf, InterfaceType.NAPLES_IB_100G, 'linux', device_name) intfs.append(intfObj) api.Logger.debug("NaplesInbandInterfaces for node: ", node, intfs) return intfs
def verifyMCEndPoints(tc): for device_name in api.GetDeviceNames(tc.naples_node): data = tc.device_data[device_name] host_mc_ep_view = data.host_mc_ep_set naples_mc_ep_view = data.naples_mc_ep_set hal_mc_ep_view = data.hal_mc_ep_set # HAL's view of endpoints = Union of workload + Host + Naples Intf host_view = host_mc_ep_view | naples_mc_ep_view if not filters_utils.verifyEndpoints(host_view, hal_mc_ep_view): return False return True
def __start_netcat(req, node, cmd, dst): if dst == 'naples': devices = api.GetDeviceNames(node) for device in devices: api.Trigger_AddNaplesCommand(req, node, cmd, device, background=True) elif dst == 'host': api.Trigger_AddHostCommand(req, node, cmd, background=True)
def triggerMCtraffic(tc): result = api.types.status.SUCCESS workload_pairs = tc.workloads naples_node = tc.naples_node #Get stats before trigger for device_name in api.GetDeviceNames(naples_node): data = tc.device_data[device_name] GetMCFramesTxRxStats(naples_node, device_name, data.preStatsCount, data.intfName2lifId_dict) #num_pairs = 0 for pair in workload_pairs: #num_pairs = num_pairs + 1 #if num_pairs != tc.iterators.mc_num: # continue w1 = pair[0] w2 = pair[1] api.Logger.verbose("workload1 : ", w1.workload_name, w1.node_name, w1.uplink_vlan, w1.interface, w1.parent_interface, w1.IsNaples()) api.Logger.verbose("workload2 : ", w2.workload_name, w2.node_name, w2.uplink_vlan, w2.interface, w2.parent_interface, w2.IsNaples()) if w1.node_name == naples_node: #printMCstats(w1, w2, tc, "BeforeHPING") data = tc.device_data[w1.device_name] result = initiateMCtraffic(w1, w2, data.statsCount) #printMCstats(w1, w2, tc, "AfterHPING") else: #printMCstats(w2, w1, tc, "BeforeHPING") data = tc.device_data[w2.device_name] result = initiateMCtraffic(w2, w1, data.statsCount) #printMCstats(w2, w1, tc, "AfterHPING") if result != api.types.status.SUCCESS: #In case of failure, bail out immediately api.Logger.info("triggerMCtraffic failed for workloads ", w1.workload_name, w2.workload_name) return result #break #Get stats after trigger for device_name in api.GetDeviceNames(naples_node): data = tc.device_data[device_name] GetMCFramesTxRxStats(naples_node, device_name, data.postStatsCount, data.intfName2lifId_dict) return result
def checkLinks(tc, node): result = api.types.status.SUCCESS all_naples = api.GetDeviceNames(node) api.Logger.info(f"Working with the following Naples Cards: {all_naples}") for naples in api.GetDeviceNames(node): req = api.Trigger_CreateExecuteCommandsRequest(serial=True) command = "/nic/bin/halctl show port --yaml" api.Trigger_AddNaplesCommand(req, node, command, naples) tc.resp = api.Trigger(req) if tc.resp is None: return api.types.status.FAILURE api.Logger.info("Verifying Link State for Uplink Ports:") for cmd in tc.resp.commands: if cmd.exit_code != 0 and not api.Trigger_IsBackgroundCommand(cmd): api.Logger.error(cmd.stderr) result = api.types.status.FAILURE #split output of halctl. Each entry will hold information about a port perPortOutput = cmd.stdout.split("---") for portInfo in perPortOutput: testobj = yaml.load(portInfo, Loader=yaml.FullLoader) #last split is always empty - skip it. Otherwise process it if bool(testobj): portId = testobj['spec']['keyorhandle']['keyorhandle'][ 'portid'] portStatus = testobj['status']['linkstatus']['operstate'] if portStatus != 1: api.Logger.error( f"ERROR:[{cmd.node_name},{naples}] port:{portId} status:{portStatus}" ) #TODO: return success for now. There is an issue with the port stuck in down state result = api.types.status.SUCCESS else: api.Logger.info( f" [{cmd.node_name},{naples}] port:{portId} status:{portStatus}" ) return result
def ResetNMDState(n): api.Logger.info("Resetting NMD State.") req = api.Trigger_CreateExecuteCommandsRequest(serial=True) for naples in api.GetDeviceNames(n): api.Trigger_AddNaplesCommand( req, n, "rm -rf /var/log/pensando/pen-nmd.log") api.Trigger_AddNaplesCommand(req, n, "rm -rf /sysconfig/config0/nmd.db", naples=naples) api.Trigger_AddNaplesCommand( req, n, "rm -rf /sysconfig/config0/app-start.conf", naples=naples) api.Trigger_AddNaplesCommand( req, n, "rm -rf /sysconfig/config0/device.conf", naples=naples) api.Trigger_AddNaplesCommand( req, n, "rm -f /sysconfig/config0/clusterTrustRoots.pem", naples=naples) resp = api.Trigger(req)
def CreateNaplesCores(n): req = api.Trigger_CreateExecuteCommandsRequest() for naples in api.GetDeviceNames(n): for core_file in core_file_names: api.Trigger_AddNaplesCommand( req, n, "touch /data/core/%s" % (core_file), naples=naples) resp = api.Trigger(req) for cmd_resp in resp.commands: api.PrintCommandResults(cmd_resp) if cmd_resp.exit_code != 0: api.Logger.error("Creating core failed %s", cmd_resp.command) return api.types.status.FAILURE return api.types.status.SUCCESS
def __get_agent_cfg_nodes(node_names=None, device_names=None): agent_node_names = node_names or api.GetNaplesHostnames() agent_cfg_nodes = [] for node_name in agent_node_names: assert (api.IsNaplesNode(node_name)) ip = api.GetNaplesMgmtIpAddress(node_name) if not ip: assert (0) if not device_names: device_names = api.GetDeviceNames(node_name) for device_name in device_names: nic_ip = api.GetNicIntMgmtIP(node_name, device_name) agent_cfg_nodes.append(cfg_api.NewCfgNode(node_name, ip, nic_ip)) return agent_cfg_nodes
def Verify(tc): ''' #TODO 1. Check for memleaks, maybe? Can that be made as common verif step? ''' api.Logger.verbose("BC MAC filter : Verify") result = api.types.status.SUCCESS if tc.skip: return api.types.status.IGNORED # Check if all interfaces have broadcast filter enabled on them for device_name in api.GetDeviceNames(tc.naples_node): data = tc.device_data[device_name] if not filters_utils.verifyPktFilters(data.intf_pktfilter_list, data.intf_pktfilter_dict, True): api.Logger.error( "BC MAC filter : Verify failed for verifyBCPktFilters ", data.intf_pktfilter_dict) result = api.types.status.FAILURE else: api.Logger.debug( "BC MAC filter : Verify - verifyBCPktFilters SUCCESS ") # Check broadcast traffic stats if not verifyBCTrafficStats(tc): api.Logger.error( "BC MAC filter : Verify failed for verifyBCTrafficStats ") result = api.types.status.FAILURE else: api.Logger.debug( "BC MAC filter : Verify - verifyBCTrafficStats SUCCESS ") if tc.resp is None: api.Logger.error("BC MAC filter : Verify failed - no response") return api.types.status.FAILURE cookie_idx = 0 for cmd in tc.resp.commands: #api.Logger.info("BC MAC filter : Results for %s" % (tc.cmd_cookies[cookie_idx])) #api.PrintCommandResults(cmd) if cmd.exit_code != 0 and not api.Trigger_IsBackgroundCommand(cmd): api.Logger.error("BC MAC filter : Verify failed for %s" % (tc.cmd_cookies[cookie_idx])) api.PrintCommandResults(cmd) result = api.types.status.FAILURE cookie_idx += 1 api.Logger.info("BC MAC filter : Verify final result - ", result) debug_utils.collect_showtech(result) return result
def triggerArping(w1, w2, tc): arping_count = tc.arping_count if w2.node_name == tc.naples_node: #get ETH_HOST interface on Naple for this workload pair ethIntf = w2.parent_interface data = tc.device_data[w2.device_name] else: ethIntf = w1.parent_interface data = tc.device_data[w1.device_name] if (w1.encap_vlan == 0): #In case of untagged workload, interface being untagged, #all untagged interfaces will receive BC traffic. so increment for all for device_name in api.GetDeviceNames(tc.naples_node): data = tc.device_data[device_name] for intf in data.statsCount.keys(): txrxbcframes = data.statsCount[intf] #increase rx by arping_count txrxbcframes[1] += arping_count data.statsCount[intf] = txrxbcframes else: #In case of tagged workload, increment rx BC only for that parent interface txrxbcframes = data.statsCount[ethIntf] #increase rx by arping_count txrxbcframes[1] += arping_count data.statsCount[ethIntf] = txrxbcframes if w2.node_name == tc.naples_node: #If arping being done on naples node, then increment tx for that parent interface txrxbcframes = data.statsCount[ethIntf] #increase tx by arping_count txrxbcframes[0] += arping_count #As arping being done on naples node itself, all other untagged interface #except this parent interface must receive BC traffic. #So decrease rx by arping_count as we had incremented it already above. txrxbcframes[1] -= arping_count data.statsCount[ethIntf] = txrxbcframes #do arping if api.GetNodeOs(tc.naples_node) == "windows": cmd_cookie = "%s \"arp-ping -n %d %s\"" % (api.WINDOWS_POWERSHELL_CMD, arping_count, w1.ip_address) else: cmd_cookie = "%sarping -W 0.01 -c %d %s" % ( tc.ArpingPrefix, arping_count, w1.ip_address) api.Trigger_AddHostCommand(tc.req, w2.node_name, cmd_cookie) tc.cmd_cookies.append(cmd_cookie) return
def __init__(self, wlSpec, default_vlan): self.__wlSpec = wlSpec self.__node = wlSpec.node self.__default_vlan = default_vlan self.__interfaces = {} self.__nic_devices = api.GetDeviceNames(self.__node) # Since its multi-Naples scenario, regenerate wl.interfaces for idx, dev_name in enumerate(self.__nic_devices): iflist = [] hostIfList = api.GetWorkloadNodeHostInterfaces( self.GetNodeName(), dev_name) ifType = api.GetWorkloadNodeHostInterfaceType( self.GetNodeName(), dev_name) for ifnum, hostIntf in enumerate(hostIfList): obj = parser.Dict2Object({}) setattr(obj, 'HostInterface', hostIntf) setattr( obj, 'ParentHostInterface', api.GetNodeParentHostInterface(self.GetNodeName(), dev_name, hostIntf)) setattr(obj, 'LogicalInterface', 'host_if{0}'.format(ifnum + 1)) setattr( obj, 'InterfaceIndex', api.GetNodeParentHostInterfaceIndex( self.GetNodeName(), dev_name, hostIntf)) setattr(obj, 'InterfaceType', ifType) iflist.append(obj) if iflist: self.__interfaces[dev_name] = iflist if self.__wlSpec.count == 'auto': # Review following with bug PS-724 is fixed self.__num_subifs = int((api.Testbed_GetVlanCount() - 2) / len(self.__nic_devices)) # 1 for native api.Logger.info( "With %d nic-devices, evaluating %d sub-ifs with %d vlans" % (len(self.__nic_devices), self.__num_subifs, api.Testbed_GetVlanCount())) # self.__num_subifs = 32 else: self.__num_subifs = int(self.__wlSpec.count) self.portUdpAllocator = resmgr.TestbedPortAllocator(20000) self.portTcpAllocator = resmgr.TestbedPortAllocator(30000) return
def Trigger(tc): api.Logger.verbose("BC MAC filter : Trigger") result = api.types.status.SUCCESS if tc.skip: return api.types.status.IGNORED #get Packet Filters for device_name in api.GetDeviceNames(tc.naples_node): data = tc.device_data[device_name] intf_pktfilter_dict, res = filters_utils.getAllIntfPktFilter( tc.naples_node, device_name) if not res: api.Logger.error( "BC MAC filter : Trigger failed for getAllIntfPktFilter ", res) result = api.types.status.FAILURE debug_utils.collect_showtech(result) return result setattr(data, 'intf_pktfilter_dict', intf_pktfilter_dict) if api.GetNodeOs(tc.naples_node) != "windows": #first, find the right arp (we rely on -W option) tc.req = api.Trigger_CreateExecuteCommandsRequest(serial=False) ArpingPrefix = "/usr/sbin/" api.Trigger_AddHostCommand(tc.req, tc.naples_node, "ls %sarping" % ArpingPrefix) resp = api.Trigger(tc.req) for cmd in resp.commands: api.Logger.info("ls %sarping output stdout: %s, stderr: %s" % (ArpingPrefix, cmd.stdout, cmd.stderr)) if cmd.stderr.find("No such file or directory") != -1: ArpingPrefix = "" api.Logger.info("Using the default arping") else: api.Logger.info("Using %sarping" % ArpingPrefix) api.Logger.info("Using the following: %s arping" % ArpingPrefix) tc.ArpingPrefix = ArpingPrefix #Trigger arping and get interface BC stats pre & post trigger triggerBCtraffic(tc) api.Logger.info("BC MAC filter : Trigger final result - ", result) debug_utils.collect_showtech(result) return result
def __add_workloads(nodes): req = topo_svc.WorkloadMsg() # Create list(s) based on NicMode classic_wload_nodes = [] unified_wload_nodes = [] vmotion_enabled_nodes = [] # Iterate over all devices for node in nodes: for dev_name in api.GetDeviceNames(node.Name()): if api.GetTestbedNicMode(node.Name(), dev_name) in [ 'hostpin', 'hostpin_dvs', 'unified' ]: if node.Name() not in unified_wload_nodes: unified_wload_nodes.append(node.Name()) if api.GetTestbedNicMode(node.Name(), dev_name) == 'hostpin_dvs': if node.Name() not in vmotion_enabled_nodes: vmotion_enabled_nodes.append(node.Name()) elif api.GetTestbedNicMode(node.Name(), dev_name) in ['classic', 'sriov']: if node.Name() not in classic_wload_nodes: classic_wload_nodes.append(node.Name()) else: api.Logger.error( "Unknown NicMode for node %s device %s - Skipping" % (node, dev_name)) ret = api.types.status.SUCCESS if classic_wload_nodes: api.Logger.info("Creating Classic-Workloads for %s" % classic_wload_nodes) ret = wl_orch.AddConfigClassicWorkloads(req, classic_wload_nodes) if unified_wload_nodes: api.Logger.info("Creating hostpin/unified mode workloads for %s" % unified_wload_nodes) ret = wl_orch.AddConfigWorkloads(req, unified_wload_nodes) if ret == api.types.status.SUCCESS and len(req.workloads): resp = api.AddWorkloads(req, skip_bringup=api.IsConfigOnly()) if resp is None: return api.types.status.FAILURE if vmotion_enabled_nodes: ret = __setup_vmotion_on_hosts(vmotion_enabled_nodes) return ret
def Main(tc): for node in api.GetNaplesHostnames(): ionic_utils.checkForIonicError(node) for dev_name in api.GetDeviceNames(node): req = api.Trigger_CreateExecuteCommandsRequest(serial=True) for i in api.GetNaplesHostInterfaces(node, dev_name): os = api.GetNodeOs(node) if os == host.OS_TYPE_BSD: api.Trigger_AddHostCommand( req, node, "bash " + IONIC_STATS_FILE + " -i %s -c" % (host.GetNaplesSysctl(i))) # Clear the stats. api.Trigger_AddHostCommand( req, node, 'sysctl dev.%s.reset_stats=1 1>/dev/null' % host.GetNaplesSysctl(i)) elif os == host.OS_TYPE_WINDOWS: intf = workload_api.GetNodeInterface(node, dev_name) name = intf.WindowsIntName(i) api.Trigger_AddHostCommand( req, node, "/mnt/c/Windows/temp/drivers-windows/IonicConfig.exe portstats -n '%s'" % name) else: api.Trigger_AddHostCommand( req, node, 'ethtool -S %s | grep packets' % i) resp = api.Trigger(req) if resp is None: api.Logger.error( "Failed to get stats for %s, is driver loaded?" % i) return api.types.status.FAILURE for cmd in resp.commands: if cmd.exit_code == 0: if cmd.stdout: #Log for debugging for now. api.Logger.info("Stats output for %s: %s" % (i, cmd.stdout)) else: api.Logger.error("Command failed to run: %s" % cmd.stderr) return api.types.status.FAILURE return api.types.status.SUCCESS
def GetDelphictlNapleStatusJson(n, device=None): req = api.Trigger_CreateExecuteCommandsRequest() if device: api.Trigger_AddNaplesCommand( req, n, "delphictl db get NaplesStatus --json", naples=device) else: for naples in api.GetDeviceNames(n): api.Trigger_AddNaplesCommand( req, n, "delphictl db get NaplesStatus --json", naples=naples) resp = api.Trigger(req) cmd_resp = resp.commands[0] api.PrintCommandResults(cmd_resp) if cmd_resp.exit_code != 0: return None return cmd_resp.stdout
def Setup(test_case): api.Logger.info("Link Down count verify after link resets") test_case.nodes = api.GetNaplesHostnames() # for each node for node in test_case.nodes: naples_devices = api.GetDeviceNames(node) # for each Naples on node for naples_device in naples_devices: req = api.Trigger_CreateExecuteCommandsRequest(serial=True) # for each port on Naples # cmds sent follows the ports ordering in ports_ifindex_arr for port_id in range(len(ports_ifindex_arr)): port_name = get_port_name(port_id) ifindex = get_port_ifindex(port_id) cmd_str = "/nic/bin/halctl show port --port " + port_name + " --yaml" api.Trigger_AddNaplesCommand(req, node, cmd_str, naples_device) port = Port(node, naples_device, port_name, ifindex) add_to_ports_dict(node, naples_device, port_name, port) resp = api.Trigger(req) if not api.Trigger_IsSuccess(resp): api.Logger.error("Failed to trigger show port cmd on node %s naples %s" % (node, naples_device)) return api.types.status.FAILURE # responses follow the port ordering in ports_ifindex_arr port_id = 0 for cmd in resp.commands: port_name = get_port_name(port_id) port = get_port(node, naples_device, port_name) oper_state_list = Port.parse_yaml_data(cmd.stdout) # set port oper states port.set_oper_states(oper_state_list) port.print() port_id += 1 return api.types.status.SUCCESS
def check_validate_link_up(test_case): # for each node for node in test_case.nodes: naples_devices = api.GetDeviceNames(node) # for each Naples on node for naples_device in naples_devices: req = api.Trigger_CreateExecuteCommandsRequest(serial=True) # for each port on Naples # cmds sent follows the ports ordering in ports_ifindex_arr for port_id in range(len(ports_ifindex_arr)): port_name = get_port_name(port_id) api.Trigger_AddNaplesCommand(req, node, "/nic/bin/halctl show port --port " + port_name + " --yaml", naples_device) resp = api.Trigger(req) if not api.Trigger_IsSuccess(resp): api.Logger.error("Failed to trigger show port cmd on node %s naples %s" % (node, naples_device)) return api.types.status.FAILURE # responses follow the port ordering in ports_ifindex_arr port_id = 0 for cmd in resp.commands: port_name = get_port_name(port_id) port = get_port(node, naples_device, port_name) oper_state_list = Port.parse_yaml_data(cmd.stdout) if not port.check_link_up(oper_state_list): api.Logger.error("Failed to linkup on node %s naples %s port %s" % (node, naples_device, port_name)) return api.types.status.FAILURE if not port.validate_link_up(oper_state_list): api.Logger.error("Failed to validate linkup on node %s naples %s port %s" % (node, naples_device, port_name)) return api.types.status.FAILURE # update port oper states port.set_oper_states(oper_state_list) port.print() port_id += 1 return api.types.status.SUCCESS
def Main(tc): nodes = api.GetWorkloadNodeHostnames() for node in nodes: if api.GetNodeOs(node) == "esx": continue for device in api.GetDeviceNames(node): api.Logger.debug("Creating NodeInterface for node: %s device: %s" % (node, device)) node_if_info = GetNodeInterface(node, device) api.Logger.debug("Adding MgmtWorkloads for node: %s device: %s" % (node, device)) ret = AddMgmtWorkloads(node_if_info) if ret != api.types.status.SUCCESS: api.Logger.debug("Failed to add MgmtWorkloads for node: %s" % node) return api.types.status.FAILURE if api.IsNaplesNode(node): api.Logger.debug("Adding NaplesWorkloads for node: %s" % node) AddNaplesWorkloads(node_if_info) return api.types.status.SUCCESS