def __compare_pktbuffers(self, epktbuf, apktbuf, tc): if GlobalOptions.dryrun: return True if epktbuf is None: logger.info("Packet Compare: Expected Buffer is None. Skipping") return True if apktbuf is None: logger.error("Packet Compare: ExpType:%s, ActType:%s" %\ (type(epktbuf), type(apktbuf))) return False # Make sure we are not accidentally comparing the same object if id(apktbuf) == id(epktbuf): logger.error("ExpBuf and ActBuf are same objects.") return False logger.verbose("Comparing Packets") pcr = comparators.PacketComparator(tc.GetIgnorePacketFields()) pcr.AddExpected(epktbuf, None, 1) pcr.AddReceived(apktbuf, None) pcr.Compare() pcr.ShowResults() return pcr.IsMatch()
def TestCaseStepVerify(tc, step): logger.info( "SRC EP DST EP %s(%s) %s(%s)" % (tc.config.src_endpoints[0], tc.config.src_endpoints[0].macaddr.get(), tc.config.dst_endpoints[0], tc.config.dst_endpoints[0].macaddr.get())) src_ipaddrs = tc.config.src_endpoints[0].ipaddrs dst_ipaddrs = tc.config.dst_endpoints[0].ipaddrs dst_ep = tc.config.dst_endpoints[0] src_ep = tc.config.src_endpoints[0] src_ep.Get() dst_ep.Get() if (step.step_id in [5, 6, 7]): if ( (not src_ep.IsRemote() and len(src_ep.ipaddrs)) or \ (not dst_ep.IsRemote() and len(dst_ep.ipaddrs))): logger.error("Endpoint has been configured with IP address still") return False #Restore values for futures testcases. tc.config.src_endpoints[0].ipaddrs = src_ipaddrs tc.config.dst_endpoints[0].ipaddrs = dst_ipaddrs else: if not src_ep.ipaddrs[0].get() == src_ipaddrs[0].get() or \ not dst_ep.ipaddrs[0].get() == dst_ipaddrs[0].get(): logger.error("Endpoint has not been configured with IP address") return False tc.config.src_endpoints[0].ipaddrs = src_ipaddrs tc.config.dst_endpoints[0].ipaddrs = dst_ipaddrs return True
def _trigger(self, tc): tc.PreTriggerCallback() status = defs.status.SUCCESS vfstatus = defs.status.SUCCESS cbstatus = defs.status.SUCCESS try: for step in tc.session.steps: tc.StepSetupCallback(step) tc.StepTriggerCallback(step) self._trigger_step(tc, step) vfstatus = self.__verify_step(tc, step) tc.StepTeardownCallback(step) if vfstatus is defs.status.ERROR: break cbstatus = tc.VerifyCallback() status = self.__resolve_status(cbstatus, vfstatus) except TriggerError as e: logger.error(f"Testcase raised exception {e}") status = defs.status.ERROR if status is defs.status.ERROR: logger.error("TESTCASE FINAL STATUS = %s(Verify:%s Callback:%s)" %\ ('IGNORE' if tc.IsIgnore() else 'FAIL', defs.status.str(vfstatus), defs.status.str(cbstatus))) else: logger.info("TESTCASE FINAL STATUS = PASS") tc.TeardownCallback() return status
def __parse(self): if '://' in self.string: lst = self.string.split("://") prefix = lst[0] + '://' lst = self.string.split(prefix) string = lst[1] self.params = string.split("/") typ = prefix else: self.params = self.string.split("/") typ = self.params[0] del self.params[0] self.object = TemplateFieldValueToObject[typ]['object'] self.pcount = TemplateFieldValueToObject[typ]['pcount'] self.opcount = 0 if 'opcount' in TemplateFieldValueToObject[typ]: self.opcount = TemplateFieldValueToObject[typ]['opcount'] self.total_pcount = self.pcount + self.opcount if len(self.params) < self.pcount: logger.error( "Invalid # of params for field: %s [Expected %d params, Got %d params]" % (self.string, self.pcount, len(self.params))) assert (0) if len(self.params) > self.total_pcount: logger.error( "Invalid # of total params for field: %s [Expected %d params, Got %d params]" % (self.string, self.total_pcount, len(self.params))) assert (0) return
def ValidateGrpcRead(self, node, getResp): if utils.IsDryRun(): return True numObjs = 0 for obj in getResp: if not utils.ValidateGrpcResponse(obj): logger.error("GRPC get request failed for ", obj) continue #TODO handle singleton object resps = obj.Response for resp in resps: if not self.IsGrpcSpecMatching(resp.Spec): continue numObjs += 1 key = self.GetKeyfromSpec(resp.Spec) cfgObj = self.GetObjectByKey(node, key) if not utils.ValidateObject(cfgObj, resp): logger.error("GRPC read validation failed for ", obj) if cfgObj: cfgObj.Show() logger.info(f"Key:{key} Spec:{resp.Spec}") return False if hasattr(cfgObj, 'Status'): cfgObj.Status.Update(resp.Status) logger.info(f"GRPC read count {numObjs} for {self.ObjType.name} in {node}") return (numObjs == self.GetNumHwObjects(node))
def Create(self, node): if node in self.Objs.keys(): self.Objs[node].Create() else: logger.error('Failed to Create, No generator for node %s' % (node)) assert (0) return
def ProcessHALGetResponse(self, get_req_spec, get_resp_spec): if get_resp_spec.api_status == haldefs.common.ApiStatus.Value('API_STATUS_OK'): self.id = get_resp_spec.spec.key_or_handle.segment_id #self.vlan_id = get_resp_spec.spec.access_encap.encap_value if get_resp_spec.spec.wire_encap.encap_type == haldefs.common.ENCAP_TYPE_VXLAN: self.fabencap = 'VXLAN' self.vxlan_id = get_resp_spec.spec.wire_encap.encap_value self.gipo = get_resp_spec.spec.gipo elif get_resp_spec.spec.wire_encap.encap_type == haldefs.common.ENCAP_TYPE_DOT1Q: self.fabencap = 'VLAN' self.vlan_id = get_resp_spec.spec.wire_encap.encap_value else: self.fabencap = None self.multicast_policy = get_resp_spec.spec.mcast_fwd_policy self.broadcast_policy = get_resp_spec.spec.bcast_fwd_policy self.nw_ids = [] for nw_id in get_req_spec.spec.network_key_handle.nw_key.ip_prefix: self.nw_ids.append(nw_id) self.mbrif_ids = [] for ifkh in get_req_spec.spec.if_key_handle: self.mbrif_ids.append(ifkh.interface_id) else: logger.error("- Segment %s = %s is missing." %\ (self.GID(), haldefs.common.ApiStatus.Name(get_resp_spec.api_status))) self.id = None return
def ProcessHALGetResponse(self, get_req_spec, get_resp_spec): if get_resp_spec.api_status == haldefs.common.ApiStatus.Value( 'API_STATUS_OK'): self.device_mode = get_resp_spec.device.device_mode else: logger.error("- NIC = %s is missing." %\ (haldefs.common.ApiStatus.Name(get_resp_spec.api_status)))
def Read(self, node): if node in self.Objs.keys(): self.Objs[node].Read() else: logger.error('No generator for node %s' % (node)) assert (0) return
def SetupCfgFilesForUpgrade(self, spec=None): # For hardware nothing to setup specifically if not utils.IsDol(): return True if utils.IsDryRun(): return True mode = "hitless" self.failure_stage = None self.failure_reason = None args = "" if hasattr(spec, "UpgMode"): mode = getattr(spec, "UpgMode", "hitless") if hasattr(spec, "failure_stage"): self.failure_stage = getattr(spec, "failure_stage", None) if hasattr(spec, "failure_reason"): self.failure_reason = getattr(spec, "failure_reason", None) logger.info("Setup Upgrade Config Files for %s mode" % mode) logger.info("Setup Upgrade failure stage %s, failure reason %s" % (self.failure_stage, self.failure_reason)) if self.failure_stage != None and self.failure_reason != None: args = "%s %s" % (self.failure_stage, self.failure_reason) # For now cfg file setup done only for hitless mode if mode == "hitless": # setup hitless upgrade config files upg_setup_cmds = "apollo/test/tools/apulu/setup_hitless_upgrade_cfg_sim.sh %s" % args if not RunCmd(upg_setup_cmds, timeout=20, background=True): logger.error("Command Execution Failed: %s" % upg_setup_cmds) return False utils.Sleep(10) # setup is executed in the background. return True
def IsFilterMatch(self, filters): logger.verbose("IsFilterMatch(): Object %s" % self.GID()) if filters == None: return True for f in filters: attr = f[0] value = f[1] if attr == 'any' and value == None: continue if attr not in self.__dict__: logger.error("Attr:%s not present in %s." %\ (attr, self.__class__)) assert (0) return False fvalue = self.__dict__[attr] if isinstance(fvalue, objects.FrameworkFieldObject): fvalue = fvalue.get() if value.isdigit(): value = int(value) if value == 'None': value = None if value == 'True': value = True if value == 'False': value = False logger.verbose(" - %s: object" % attr, fvalue, "filter:", value) if fvalue != value: return False logger.verbose(" - Found Match !!") return True
def __verify_pktbuffers(self, epktbuf, apktbuf, tc): match = self.__compare_pktbuffers(epktbuf, apktbuf, tc) if match is False: logger.error("PacketBuffer Compare Result = Mismatch") return defs.status.ERROR logger.info("PacketBuffer Compare Result = Match") return defs.status.SUCCESS
def ProcessHALGetResponse(self, get_req_spec, get_resp_spec): if get_resp_spec.api_status == haldefs.common.ApiStatus.Value( 'API_STATUS_OK'): self.rmac = objects.MacAddressBase(integer=get_resp_spec.spec.rmac) else: logger.error("- Network %s = %s is missing." %\ (self.GID(), haldefs.common.ApiStatus.Name(get_resp_spec.api_status))) self.hal_handle = None
def ClearOnDevice(ep_mac_ip): status_ok, output = pdsctl.ExecutePdsctlClearCommand(ep_mac_ip.CliCmd, \ ep_mac_ip.ClearCliArgs) if not status_ok: logger.error(" - ERROR: pdstcl clear failed for cmd %s %s" % \ (ep_mac_ip.CliCmd, ep_mac_ip.ClearCliArgs)) return False return True
def __get_pktbuffer(self, buf, tc): if buf is not None: return buf.Read() if GlobalOptions.dryrun is False: logger.error("Trying to GetBuffer() on a None descriptor.") assert (0) return None
def ValidateResponse(self, resps): if utils.IsDryRun(): return None for r in resps: self.Status.Update(r) if not r.Status == upgrade_pb2.UPGRADE_STATUS_OK: logger.error(f"Upgrade request failed with {r}") continue return self.Status.GetLastUpgradeStatus()
def __get_buffer(self, descr, tc): if descr is not None: return utils.SafeFnCall(None, logger, descr.GetBuffer) if GlobalOptions.dryrun is False: logger.error("Trying to GetBuffer() on a None descriptor.") assert (0) return None
def __show_mismatch(self): logger.error("Packets Mismatch: %s" % self.__get_pair_id()) for hpair in self.mismatch_hdrs: ehdr = hpair[0] ahdr = hpair[1] assert(type(ehdr) == type(ahdr)) self.__show_hdr_mismatch(ehdr, ahdr) return
def GetNumLocalMapping(node): args = " | grep " + "\"No. of mappings:\"" ret, cli_op = utils.RunPdsctlShowCmd(node, "mapping internal local", args, yaml=False) if not ret: logger.error(f"show mapping internal local failed for node {node}") return -1 data = cli_op.split("No. of mappings: ", 1) return (int(data[1]) if len(data) > 1 else 0)
def ReadLifs(self, node): if utils.IsDryRun(): return ret, op = utils.RunPdsctlShowCmd(node, "lif", yaml=True) if not ret: logger.error(f"show lif failed for node {node}") return False cmdop = op.split("---") return cmdop
def VerifyLinkAlert(self, spec=None): eventObj = OperEventClient.Objects(self.Node) try: alert = next(eventObj.GetEvents()) except: logger.error(f"Got no event from {self.Node} for {self}") return True if utils.IsDryRun() else False return self.__validate_link_alert(alert)
def SpecUpdate(self, spec): for attr in vars(spec): if attr in self.Class.UpdateAttrFn: self.Class.UpdateAttrFn[attr](self, attr, spec) else: logger.error(f"update for {attr} in obj {self} missing") self.AddToReconfigState('update') return
def ValidateLearntMacEntries(self, node, ret, cli_op): if utils.IsDryRun(): return True if not ret: logger.error("pdsctl show learn mac cmd failed") return False # split output per object mac_entries = cli_op.split("---") for mac in mac_entries: yamlOp = utils.LoadYaml(mac) if not yamlOp: continue yamlOp = yamlOp['macentry']['entryauto'] mac_key = yamlOp['key']['macaddr'] subnet_uuid = utils.GetYamlSpecAttr(yamlOp['key'], 'subnetid') vnic_uuid_str = utils.List2UuidStr( utils.GetYamlSpecAttr(yamlOp, 'vnicid')) # verifying if the info learnt is expected from config vnic_obj = self.GetVnicByL2MappingKey(node, mac_key, subnet_uuid, 0) if vnic_obj == None: logger.error( f"vnic not found in client object store for key {mac_key} {subnet_uuid}{0}" ) return False # verifying if VNIC has been programmed correctly by Learn args = "--id " + vnic_uuid_str ret, op = utils.RunPdsctlShowCmd(node, "vnic", args, True) if not ret: logger.error(f"show vnic failed for vnic id {vnic_uuid_str}") return False cmdop = op.split("---") logger.info("Num entries returned for vnic show id %s is %s" % (vnic_uuid_str, (len(cmdop) - 1))) for vnic_entry in cmdop: yamlOp = utils.LoadYaml(vnic_entry) if not yamlOp: continue vnic_spec = yamlOp['spec'] hostif = vnic_spec['hostif'] if utils.PdsUuid( hostif).GetUuid() != vnic_obj.HostIfUuid.GetUuid(): logger.error( f"host interface did not match for {vnic_uuid_str}") return False if vnic_spec['macaddress'] != vnic_obj.MACAddr.getnum(): logger.error( f"mac address did not match for {vnic_uuid_str}") return False logger.info( "Found VNIC %s entry for learn MAC MAC:%s, Subnet:%s, VNIC:%s " % (utils.List2UuidStr(utils.GetYamlSpecAttr( vnic_spec, 'id')), vnic_obj.MACAddr.get(), vnic_obj.SUBNET.UUID, vnic_uuid_str)) return True
def Init(self, spec): super().Init(spec) if hasattr(spec.fields, 'session'): self.nvme_session = spec.fields.session else: logger.error( "Error!! nvme session needs to be specified for the buffer") exit
def GetMatchingConfigObjects(self, selectors=None): if selectors.IsFlowBased(): objs = self.__get_matching_flows(selectors) elif selectors.IsSessionBased(): objs = self.__get_matching_sessions(selectors) else: objs = [] logger.error("INVALID Config Filter in testspec.") return objs
def ValidateResponse(self, resps): if utils.IsDryRun(): return None for r in resps: if not utils.ValidateGrpcResponse(r): logger.error(f"TechSupport request failed with {r}") continue status = r.Response self.Status.Update(status) return self.Status.GetTechSupportFile()
def __validate(node): # Validate objects are generated within their scale limit for objtype in APIObjTypes: client = ObjectInfo.get(objtype.name.lower(), None) if not client: logger.error(f"Skipping scale validation for {objtype.name}") continue NodeObject.__validate_object_config(node, client) return
def DoRpc(self, rpcname, objs): resps = [] rpc = getattr(self.__stub, rpcname, None) if not rpc: logger.error(f"Invalid RPC {rpcname}") return resps for obj in objs: resps.append(rpc(obj, timeout=MAX_GRPC_WAIT)) return resps
def HttpRead(self, node): if not utils.IsNetAgentMode(): return True resp = api.client[node].GetHttp(self.ObjType) logger.info("HTTP read:", resp) if resp and not self.ValidateHttpRead(node, resp): logger.error("Http Read validation failed for ", self.ObjType) return False return True
def __get_topo_spec(): topofile = __get_topo_file() path = __get_topo_path() topospec = parser.ParseFile(path, topofile) if not topospec: logger.error("Invalid topofile %s" % (topofile)) assert (0) return None return topospec