def ValidateBGPOverlayNeighborship(node): if api.GlobalOptions.dryrun: return True status_ok, json_output = pdsctl.ExecutePdsctlShowCommand(node, "bgp peers-af", "--json", yaml=False) if not status_ok: api.Logger.error(" - ERROR: pdstcl show bgp peers-af failed") return False api.Logger.info("pdstcl show output: %s" % (json_output)) retList = GetBgpNbrEntries(json_output, "L2VPN") if not len(retList): api.Logger.error(" - ERROR: No L2VPN entries found in " "show bgp peers-af") return False api.Logger.info("L2VPN Neighbors : %s" % (retList)) status_ok, json_output = pdsctl.ExecutePdsctlShowCommand(node, "bgp peers", "--json", yaml=False) if not status_ok: api.Logger.error(" - ERROR: pdstcl show bgp peers failed") return False api.Logger.info("pdstcl show output: %s" % (json_output)) if not ValidateBGPPeerNbrStatus(json_output, retList): api.Logger.error(" - ERROR: Mismatch in BGP Peer status") return False return True
def verifyFlowTable(af, workload_pairs): if api.IsDryrun(): return api.types.status.SUCCESS flowEntries = {} for pair in workload_pairs: w1 = pair[0] w2 = pair[1] if w1.node_name not in flowEntries: ret, resp = pdsctl.ExecutePdsctlShowCommand(w1.node_name, "flow", yaml=False) if ret != True: api.Logger.error( "Failed to execute show flows at node %s : %s" % (w1.node_name, resp)) return api.types.status.FAILURE flowEntries[w1.node_name] = resp iflow_found = False rflow_found = False iflow_found, rflow_found = parseFlowEntries(flowEntries[w1.node_name], w1, w2) if iflow_found == False or rflow_found == False: api.Logger.error( "Flows not found at node %s : %s[iflow %d, rflow %d]" % (w1.node_name, flowEntries[w1.node_name], iflow_found, rflow_found)) return api.types.status.FAILURE if w2.node_name not in flowEntries: ret, resp = pdsctl.ExecutePdsctlShowCommand(w2.node_name, "flow", yaml=False) if ret != True: api.Logger.error( "Failed to execute show flows at node %s : %s" % (w2.node_name, resp)) return api.types.status.FAILURE flowEntries[w2.node_name] = resp iflow_found = False rflow_found = False iflow_found, rflow_found = parseFlowEntries(flowEntries[w2.node_name], w2, w1) if iflow_found == False or rflow_found == False: api.Logger.error( "Flows not found at node %s : %s[iflow %d, rflow %d]" % (w2.node_name, flowEntries[w2.node_name], iflow_found, rflow_found)) return api.types.status.FAILURE return api.types.status.SUCCESS
def setDataPortStatePerUplink(naples_nodes, oper, id): uplink_list = [] if id == 'Uplink0': uplink_list.append(UPLINK_PREFIX1) elif id == 'Uplink1': uplink_list.append(UPLINK_PREFIX2) else: uplink_list.append(UPLINK_PREFIX1) uplink_list.append(UPLINK_PREFIX2) if GlobalOptions.dryrun: return api.types.status.SUCCESS for node in naples_nodes: node_uuid = EzAccessStoreClient[node].GetNodeUuid(node) #node_uuid = 750763714960 for uplink in uplink_list: intf_uuid = uplink % node_uuid cmd = ("debug update port --admin-state %s --port " + intf_uuid) % oper ret, resp = pdsctl.ExecutePdsctlCommand(node, cmd, yaml=False) if ret != True: api.Logger.error("oper:%s uplink failed at node %s : %s" % (oper, node, resp)) return api.types.status.FAILURE misc_utils.Sleep(1) #give a short gap before printing status pdsctl.ExecutePdsctlShowCommand(node, "port status", yaml=False) return api.types.status.SUCCESS
def dumpPdsAgentInfo(tc, log_string=""): api.Logger.debug(log_string) nodes = api.GetNaplesHostnames() for node in nodes: ret, resp = pdsctl.ExecutePdsctlShowCommand( node, "subnet", " | grep \"No. of subnets :\"", yaml=False, print_op=True) ret, resp = pdsctl.ExecutePdsctlShowCommand( node, "vnic", " | grep \"No. of vnics :\"", yaml=False, print_op=True)
def showFlows(tc): if tc.cancel: api.Logger.info("Canceling showFlows...") sys.exit(0) api.Logger.info("Running showFlows...") if api.GlobalOptions.dryrun: return api.types.status.SUCCESS nodes = api.GetNaplesHostnames() for node in nodes: # Disabled printing of 'show flow' output as it could be huge. # objective is to trigger the backend to get a dump of the flows. # TODO. 'show flow' with 2M flows were causing timeout or crash at times in pds-agent. # so commenting 'show flow' to revisit the use case of this and tune agents accordingly. #ret, resp = pdsctl.ExecutePdsctlShowCommand(node, "flow", yaml=False, print_op=False) # Get only the number of flows. ret, resp = pdsctl.ExecutePdsctlShowCommand( node, "flow", "--summary | grep \"No. of flows :\"", yaml=False, print_op=True) api.Logger.debug("Completed Running showFlows...") return api.types.status.SUCCESS
def UpdateSecurityProfileTimeouts(tc): if not hasattr(tc.args, 'security_profile'): return api.types.status.SUCCESS sec_prof_spec = tc.args.security_profile oper = __getOperations(['update']) api.Logger.verbose("Update Security Profile Spec: ") api.Logger.verbose("conntrack : %s "%getattr(sec_prof_spec, 'conntrack', False)) api.Logger.verbose("tcpidletimeout : %s "%getattr(sec_prof_spec, 'tcpidletimeout', 0)) api.Logger.verbose("udpidletimeout : %s "%getattr(sec_prof_spec, 'udpidletimeout', 0)) # Update security profile timeout tc.selected_sec_profile_objs = config_api.SetupConfigObjects('security_profile', allnode=True) ret = api.types.status.SUCCESS for op in oper: res = config_api.ProcessObjectsByOperation(op, tc.selected_sec_profile_objs, sec_prof_spec) if res != api.types.status.SUCCESS: ret = api.types.status.FAILURE api.Logger.error("Failed to %s Security Profile Spec Timeouts"%op) else: api.Logger.info("Successfully %s Security Profile Spec Timeouts"%op) nodes = api.GetNaplesHostnames() for node in nodes: res, resp = pdsctl.ExecutePdsctlShowCommand(node, "security-profile", None, yaml=False, print_op=True) return ret
def ExecuteShowLearnStats(node, yaml=True, print_op=False): cmd = "learn statistics" ret, resp = pdsctl.ExecutePdsctlShowCommand(node, cmd, "", yaml, print_op) if ret != True: api.Logger.error( "Failed to execute show learn statistics at node %s : %s" % (node, resp)) return ret, resp
def __getLifInfo(host_name): retval, output = pdsctl.ExecutePdsctlShowCommand(host_name, 'lif', yaml=False) if not retval: api.Logger.error("Failed to get lif info from pdsctl") output = '' return output
def DumpVnicInfo(node): cmd = "vnic" status_ok, output = pdsctl.ExecutePdsctlShowCommand(node, cmd, None, yaml=False) if not status_ok: api.Logger.error(" - ERROR: pdstcl show %s failed" % (cmd)) return
def DumpRemoteMappingInfo(node): cmd = "mapping internal remote --type l3" status_ok, output = pdsctl.ExecutePdsctlShowCommand(node, cmd, None, yaml=False) if not status_ok: api.Logger.error(" - ERROR: pdstcl show %s failed" % (cmd)) return
def ExecuteShowLearnIP(node, lmapping=None, yaml=True): cmd = "learn ip" args = "--mode auto " if lmapping != None: args += "--ip %s --vpc %s" % (lmapping.IP, lmapping.VNIC.SUBNET.VPC.UUID.UuidStr) ret, resp = pdsctl.ExecutePdsctlShowCommand(node, cmd, args, yaml) if ret != True: api.Logger.error("Failed to execute show learn ip at node %s : %s" % (node, resp)) return ret, resp
def ExecuteShowLearnMAC(node, vnic=None, yaml=True): cmd = "learn mac" args = "--mode auto " if vnic != None: args += "--mac %s --subnet %s" % (vnic.MACAddr, vnic.SUBNET.UUID.UuidStr) ret, resp = pdsctl.ExecutePdsctlShowCommand(node, cmd, args, yaml) if ret != True: api.Logger.error("Failed to execute show learn mac at node %s : %s" % (node, resp)) return ret, resp
def ShowFlowSummary(nodes=[]): if api.IsDryrun(): return api.types.status.SUCCESS for node in nodes: pdsctl.ExecutePdsctlShowCommand(node, "flow", "--summary | grep \"No. of flows :\"", yaml=False, print_op=True) return api.types.status.SUCCESS
def getFirstOperDownPort(node): misc_utils.Sleep(3) if GlobalOptions.dryrun: return api.types.status.SUCCESS node_uuid = EzAccessStoreClient[node].GetNodeUuid(node) for uplink in [UPLINK_PREFIX1, UPLINK_PREFIX2]: intf_uuid = uplink % node_uuid cmd = "port status -p " + intf_uuid ret, resp = pdsctl.ExecutePdsctlShowCommand(node, cmd, yaml=False) if ret == True and "UP DOWN" in resp: return uplinkDict[uplink]
def ValidateBGPUnderlayNeighborship(node): if api.IsDryrun(): return True status_ok, json_output = pdsctl.ExecutePdsctlShowCommand(node, "bgp peers-af", "--json", yaml=False) if not status_ok: api.Logger.error(" - ERROR: pdstcl show bgp peers-af failed") return False api.Logger.verbose("pdstcl show output: %s" % (json_output)) bgp_peers = GetBgpNbrEntries(json_output, "IPV4") if not len(bgp_peers): api.Logger.error(" - ERROR: No BGP peer entries found in " "show bgp peers-af") return False api.Logger.info("BGP peer Neighbors : %s" % (bgp_peers)) status_ok, json_output = pdsctl.ExecutePdsctlShowCommand(node, "bgp peers", "--json", yaml=False) if not status_ok: api.Logger.error(" - ERROR: pdstcl show bgp peers failed") return False api.Logger.verbose("pdstcl show output: %s" % (json_output)) if not ValidateBGPPeerNbrStatus(json_output, bgp_peers): api.Logger.error(" - ERROR: Validating BGP Peer Underlay status") return False return True
def getFlowEntries(node): if api.IsDryrun(): # Return a dummy entry resp = [ "256 I/H 3 2.0.0.2 6915 2.0.0.5 2048 ICMP A" ] return api.types.status.SUCCESS, resp ret, entries = pdsctl.ExecutePdsctlShowCommand(node, "flow", yaml=False) if ret != True: api.Logger.error("Failed to execute show flows on node %s : %s" % (node, resp)) return api.types.status.FAILURE # Skip first 16 lines as they are part of the legend return api.types.status.SUCCESS, entries.splitlines()[16:-1]
def GetUplinkStatus(node_name): uplinkStatus = [] status, resp = pdsctl.ExecutePdsctlShowCommand(node_name, "port status", yaml=True, print_op=False) if status != True: return uplinkStatus ptrn = "---" for port in resp.split(ptrn): try: port = yaml.load(port) uplinkStatus.append(port) except: pass return uplinkStatus
def clearFlows(tc): if tc.cancel: api.Logger.info("Canceling clearFlows...") sys.exit(0) api.Logger.info("Running clearFlows...") if api.GlobalOptions.dryrun: return api.types.status.SUCCESS nodes = api.GetNaplesHostnames() for node in nodes: ret, resp = pdsctl.ExecutePdsctlShowCommand( node, "flow", "--summary | grep \"No. of flows :\"", yaml=False, print_op=True) flowutils.clearFlowTable(tc.workload_pairs) api.Logger.debug("Completed Running clearFlows...") return api.types.status.SUCCESS
def verifyDataPortStateHelper(naples_nodes, admin, oper): ret = api.types.status.SUCCESS for node in naples_nodes: node_uuid = EzAccessStoreClient[node].GetNodeUuid(node) #if not node_uuid: # node_uuid = int(''.join(filter(str.isdigit, node))) for uplink in [UPLINK_PREFIX1, UPLINK_PREFIX2]: intf_uuid = uplink % node_uuid cmd = "port status -p " + intf_uuid ret, resp = pdsctl.ExecutePdsctlShowCommand(node, cmd, yaml=False) if ret != True: api.Logger.error( "oper:%s uplink ret verify failed at node %s : %s" % (admin, node, resp)) return api.types.status.FAILURE else: ret = api.types.status.SUCCESS #explicitly mark SUCCESS if oper == operUp: if "UP/UP" not in resp: api.Logger.error( "oper:%s uplink verify failed at node %s : %s" % (oper, node, resp)) return api.types.status.FAILURE elif admin == operDown: if "DOWN/DOWN" not in resp: api.Logger.error( "admin:%s uplink verify failed at node %s : %s" % (admin, node, resp)) return api.types.status.FAILURE else: if "UP/DOWN" not in resp: api.Logger.error( "admin:%s uplink verify failed at node %s : %s" % (admin, node, resp)) return api.types.status.FAILURE api.Logger.info("verifyDataPortState done for %s and result %d..." % (naples_nodes, ret)) return ret
def GetLifInfo(node, if_name=None): if GlobalOptions.dryrun: return 1, '' retval, output = pdsctl.ExecutePdsctlShowCommand(node, 'lif', yaml=True) if not retval: api.Logger.error("Failed to get lif info from pdsctl") output = '' return retval, output if if_name: # split output and fetch the info for given if_name lif_entries = output.split("---") for lif in lif_entries: yamlOp = yaml.load(lif, Loader=yaml.FullLoader) if not yamlOp: continue #api.Logger.info("LIF Yaml Output for ifname:%s, output:%s"%(if_name, yamlOp)) if if_name == yamlOp['status']['name']: output = lif break #api.Logger.info("Returning lif output for node:%s, if_name:%s, output:%s"%(node, if_name, output)) return retval, output