def __build_dpm(self): for epktid,epkt in self.expkts.items(): logger.debug("Searching for BestPkt for Expkt:%s" % epktid) for rpktid,rpkt in self.rxpkts.items(): deg = epkt.GetMatchDegree(rpkt) self.__add_pair_to_dpm(deg, epktid, rpktid) return
def CreateCqs(self, spec): self.cqs = objects.ObjectDatabase() self.obj_helper_cq = cq.CqObjectHelper() self.obj_helper_cq.Generate(self, spec) if len(self.obj_helper_cq.cqs): self.cqs.SetAll(self.obj_helper_cq.cqs) logger.debug("In CreateCqs, Endpoint %s" % (self.GID()))
def CreateSlabs(self): logger.debug("In CreateSlabs, NVMe LIF %s" % (self.GID())) self.slab_allocator = objects.TemplateFieldObject("range/0/2048") self.slabs = objects.ObjectDatabase() self.obj_helper_slab = slab.SlabObjectHelper() self.obj_helper_slab.Generate2(self, self.total_slabs, self.spec.host_page_size, self.spec.host_page_size) self.obj_helper_slab.Configure() self.slabs.SetAll(self.obj_helper_slab.slabs)
def __add_pair_to_dpm(self, deg, epid, rpid): if deg == 0: return if deg not in self.dpm: self.dpm[deg] = [] logger.debug("Adding DPM entry Deg:%d Epid:%s Rpid:%s" %\ (deg, epid, rpid)) self.dpm[deg].append((epid, rpid)) return
def CreateSlabs(self, spec): logger.debug("In CreateSlabs, Endpoint %s" % (self.GID())) self.slab_allocator = objects.TemplateFieldObject("range/0/2048") self.slabs = objects.ObjectDatabase() self.mr_slabs = objects.ObjectDatabase() self.obj_helper_slab = slab.SlabObjectHelper() self.obj_helper_slab.Generate(self.intf.lif, spec) self.slabs.SetAll(self.obj_helper_slab.slabs) self.mr_slabs.SetAll(self.obj_helper_slab.slabs) self.total_mr_slabs += len(self.mr_slabs)
def __parse_drop_stats_get_response(self, resp, pre): if resp is None: return stats = self.pre_stats if pre else self.post_stats for entry in resp.drop_entries: for reason in stats.__dict__.keys(): if getattr(entry.reasons, reason, False): logger.debug("DropStats: %s reason count = %d" %\ (reason, entry.drop_count)) setattr(stats, reason, entry.drop_count) return
def __build_hdr_stack(self): logger.debug("Building HDR stack.") hdr = self.spkt.copy() pyld = hdr.payload hdr.remove_payload() while hdr.name != 'NoPayload': self.hdrs.append(hdr) hdrname = self.__get_hdr_name(hdr) logger.debug(" - Adding hdr:%s to stack" % hdrname) self.hdrnames.append(hdrname) hdr = pyld pyld = hdr.payload hdr.remove_payload() return
def __parse_drop_stats_get_response(self, resp, pre): if resp is None: return stats = self.PreStats if pre else self.PostStats for entry in resp.Response.Stats.Ingress: assert entry.Name in stats.Ingress, "Drops stats inconsistency" logger.debug("DropStats: %s reason count = %d" %\ (entry.Name, entry.Count)) stats.Ingress[entry.Name] = entry.Count for entry in resp.Response.Stats.Egress: assert entry.Name in stats.Egress, "Drops stats inconsistency" logger.debug("DropStats: %s reason count = %d" %\ (entry.Name, entry.Count)) stats.Egress[entry.Name] = entry.Count return
def update(self, spkt, hdr): if hdr.meta.scapy not in spkt: logger.error("SCAPY has no header = %s" % hdr.meta.scapy) assert(0) logger.debug("Updating Header: %s" % hdr.meta.scapy) shdr = spkt[hdr.meta.scapy] for key, value in hdr.fields.__dict__.items(): if objects.IsFrameworkObjectInternalAttr(key): continue logger.debug(" - %-10s =" % key, value) try: shdr.__setattr__(key, value) except: logger.error("ScapyHeaderBuilder: Failed to update %s.%s to" %\ (hdr.meta.id, key), value) assert(0) return shdr
def __update_icrc(self, packet): if packet.IsNewIcrcRequired(): spkt = packet.GetScapyPacket() builder = IcrcHeaderBuilder(spkt) icrc = builder.GetIcrc() #convert to network byte format icrc = (((icrc << 24) & 0xFF000000) |\ ((icrc << 8) & 0x00FF0000) |\ ((icrc >> 8) & 0x0000FF00) |\ ((icrc >> 24) & 0x000000FF)) elif packet.IsInheritIcrcRequired(): # no need to byte swap - as already swapped when icrc header constructed icrc = packet.GetBasePacketIcrc() logger.debug("ICRC after byte swap: 0x%x" % icrc) self.spkt[penscapy.ICRC].icrc = icrc return
def __build_pairs(self): for deg in sorted(self.dpm.keys(), reverse=True): pairs = self.dpm[deg] for p in pairs: logger.debug("Processing pair: Deg:%d Epid:%s Rpid:%s" %\ (deg, p[0], p[1])) self.__add_dpm_pair(p[0], p[1]) # Add all missing packets. for epid,epkt in self.expkts.items(): self.__add_pair(epkt, None) # Add all excess packets. for rpid,rpkt in self.rxpkts.items(): self.__add_pair(None, rpkt) return
def IsFilterMatch(self, selectors): logger.debug('Matching Nvme Session: %s' % self.GID()) #TBD: add nvme specific filters here print(str(selectors)) if hasattr(selectors.nvmesession, 'base'): match = super().IsFilterMatch(selectors.nvmesession.base.filters) if match == False: return match if hasattr(selectors.nvmesession, 'session'): match = super().IsFilterMatch( selectors.nvmesession.session.filters) logger.debug("- IsIPV6 Filter Match =", match) if match == False: return match match = super().IsFilterMatch(selectors.nvmesession.filters) if match == False: return match return True
def build(self, hdr): if hasattr(penscapy, hdr.meta.scapy) == False: logger.error("SCAPY has no header = %s" % hdr.meta.scapy) assert(0) logger.debug("Adding Header: %s" % hdr.meta.scapy) scapyhdl = getattr(penscapy, hdr.meta.scapy) shdr = scapyhdl() for key, value in hdr.fields.__dict__.items(): if objects.IsFrameworkObjectInternalAttr(key): continue logger.verbose(" - %-10s =" % key, value) try: shdr.__setattr__(key, value) except: logger.error("ScapyHeaderBuilder: Failed to set %s.%s to" %\ (hdr.meta.id, key), value) assert(0) return shdr
def __add_dpm_pair(self, epid, rpid): epkt = None rpkt = None if epid in self.expkts: epkt = self.expkts[epid] else: # Expkt is already added by a pair with better degree. logger.debug("- Epid:%s used already..skipping." % epid) return if rpid in self.rxpkts: rpkt = self.rxpkts[rpid] else: logger.debug("- Rpid:%s used already..skipping." % rpid) return del self.expkts[epid] del self.rxpkts[rpid] self.__add_pair(epkt, rpkt) return
def __build_from_packet_meta(self, packet): logger.debug("Generating SCAPY Packet.") for hdrid in packet.hdrsorder: hdrdata = packet.headers.__dict__[hdrid] if objects.IsFrameworkObject(hdrdata): self.__add_header(hdrdata) if packet.IsDolHeaderRequired(): self.__add_dol_header(packet.GetTcId(), packet.GetStepId()) if packet.IsIcrcEnabled(): self.__update_icrc(packet) self.pktbytes = bytes(self.spkt) self.rawbytes = bytes(self.spkt) #self.__add_crc_header() padsize = packet.GetPaddingSize() if padsize < 0: self.rawbytes = self.rawbytes[:len(self.rawbytes) + padsize] else: self.rawbytes += bytes([defs.PAD_BYTE] * padsize)
def CreatePds(self, spec): self.pds = objects.ObjectDatabase() self.obj_helper_pd = pd.PdObjectHelper() self.obj_helper_pd.Generate(self, spec) self.pds.SetAll(self.obj_helper_pd.pds) logger.debug("In CreatePds, Endpoint %s" % (self.GID())) for eppd in self.obj_helper_pd.pds: logger.debug(" Adding QPs for PD %s, Num of Qps %d" % (eppd.GID(), len(eppd.udqps))) pdudqps = eppd.udqps.GetAll() for qp in pdudqps: logger.debug(" Adding QP: PD %s, QP %s" % (eppd.GID(), qp.GID())) self.udqps.append(qp) logger.debug(" Total UDQPs in this endpoint: Qps %d" % (len(self.udqps)))
def IsFilterMatch(self, selectors): logger.debug("Matching Session %s" % self.GID()) # Match on Initiator Flow selectors.SetFlow(selectors.session.iflow) match = self.iflow.IsFilterMatch(selectors) logger.debug("- IFlow Filter Match =", match) if match == False: return match # Match on Responder Flow # Swap Src/Dst selectors for Rflow match. selectors.SwapSrcDst() selectors.SetFlow(selectors.session.rflow) match = self.rflow.IsFilterMatch(selectors) selectors.SwapSrcDst() logger.debug("- RFlow Filter Match =", match) if match == False: return match # Match on the Session match = super().IsFilterMatch(selectors.session.base.filters) logger.debug("- Session Filter Match =", match) return match
def IsFilterMatch(self, selectors): logger.debug('Matching RDMA Session: %s' % self.GID()) match = self.lqp.IsFilterMatch(selectors.rdmasession.lqp) logger.debug("- LQP Filter Match =", match) if match == False: return match match = self.rqp.IsFilterMatch(selectors.rdmasession.rqp) logger.debug("- RQP Filter Match =", match) if match == False: return match if hasattr(selectors.rdmasession, 'base'): match = super().IsFilterMatch(selectors.rdmasession.base.filters) if match == False: return match if hasattr(selectors.rdmasession, 'session'): match = super().IsFilterMatch( selectors.rdmasession.session.filters) logger.debug("- IsIPV6 Filter Match =", match) if match == False: return match return True
def IsFilterMatch(self, spec): logger.debug("Matching QID %d svc %d" % (self.id, self.svc)) match = super().IsFilterMatch(spec.filters) return match
def __add_pair(self, epkt, rpkt): self.pid += 1 logger.debug("- Adding new pair id = %s" % self.pid) pair = CrPacketPair(epkt, rpkt, self.pid, self.ignore_fields) self.pairs[self.pid] = pair return
def __parse_file(self, filename): logger.debug("Parsing file =", filename) return dyml.main(filename)
def PopulatePreQStates(tc): if GlobalOptions.dryrun: return #variable used for scale related test cases tc.pvtdata.curr_pkt_id = 0 tc.pvtdata.gbl = Store.objects.GetAllByClass(NvmeGlobalObject)[0] tc.pvtdata.resourcecb_pre_state = tc.pvtdata.gbl.ResourcecbRead() tc.pvtdata.hwxtstxcb_pre_state = tc.pvtdata.gbl.HwxtstxcbRead() tc.pvtdata.hwdgsttxcb_pre_state = tc.pvtdata.gbl.HwdgsttxcbRead() #get the cmdid tc.pvtdata.backend_cid = tc.pvtdata.gbl.CmdIdRead(tc.pvtdata.resourcecb_pre_state.cmdid_ring_proxy_ci) nscb = tc.config.nvmens.NscbRead() tc.pvtdata.nscb_pre_state = nscb start_session_id = getattr(nscb, "rr_session_id_to_be_served") nscb_bytes = bytes(nscb) # get the last 32 bytes bmap = nscb_bytes[32:] # and reverse the bytes bmap = bmap[::-1] bmap_size = len(bmap) loop_count = bmap_size byte_num = start_session_id//8 bit_num = start_session_id%8 # if we are starting with a partial byte, give us one more chance to # revisit this byte after all bytes are inspected. if bit_num: loop_count +=1 found = False logger.debug('bmap_len: %d start_session_id: %d byte_num: %d bit_num: %d' \ %(len(bmap), start_session_id, byte_num, bit_num)) for _ in range(loop_count): byte = bmap[byte_num] for bit_num in range(bit_num, 8): bmask = 1 << bit_num logger.debug('exploring byte: 0x%x byte_num: %d bit_num: %d mask: 0x%x' \ %(byte, byte_num, bit_num, bmask)) if byte & bmask: found = True break if found == True: break bit_num = 0 byte_num = (byte_num + 1)%bmap_size assert found == True, "could not find rr_session_id" tc.pvtdata.rr_session_id = (byte_num*8)+bit_num tc.pvtdata.rr_session_id_nxt = (tc.pvtdata.rr_session_id+1)%(bmap_size*8) tc.config.nvmesession = tc.config.nvmens.SessionGet(tc.pvtdata.rr_session_id) logger.info('found rr_session_id: %d byte_num: %d bit_num: %d nvme_session: %s ' \ 'rr_session_id_nxt: %d' \ %(tc.pvtdata.rr_session_id, byte_num, bit_num, tc.config.nvmesession.GID(), \ tc.pvtdata.rr_session_id_nxt)) tcb = Store.objects.Get(tc.config.nvmesession.tcp_other_cbid) tcb.GetObjValPd() tc.pvtdata.tcb_pre_state = copy.deepcopy(tcb) logger.info('pretcb: snd_nxt: %d snd_una: %d rcv_nxt: %d ' \ %(tcb.snd_nxt, tcb.snd_una, tcb.rcv_nxt)) tc.pvtdata.sqcb_pre_state = tc.config.nvmesession.sq.qstate.Read() tc.pvtdata.cqcb_pre_state = tc.config.nvmesession.cq.qstate.Read() tc.pvtdata.sessprodtxcb_pre_state = tc.config.nvmesession.NvmesessprodtxcbRead() tc.pvtdata.sessxtstxcb_pre_state = tc.config.nvmesession.tx_xtsq.qstate.Read() tc.pvtdata.sessdgsttxcb_pre_state = tc.config.nvmesession.tx_dgstq.qstate.Read() tc.pvtdata.sessprodrxcb_pre_state = tc.config.nvmesession.NvmesessprodrxcbRead() tc.pvtdata.sessxtsrxcb_pre_state = tc.config.nvmesession.rx_xtsq.qstate.Read() tc.pvtdata.sessdgstrxcb_pre_state = tc.config.nvmesession.rx_dgstq.qstate.Read() return
def IsFilterMatch(self, selectors): logger.debug("Matching %s Flow:%s, Session:%s" %\ (self.direction, self.GID(), self.__session.GID())) # Match Source Tenant match = self.__sten.IsFilterMatch(selectors.src.tenant) logger.debug("- Source Tenant Filter Match =", match) if match == False: return match # Match Destination Tenant match = self.__dten.IsFilterMatch(selectors.dst.tenant) logger.debug("- Destination Tenant Filter Match =", match) if match == False: return match # Match Source Segment match = self.__sseg.IsFilterMatch(selectors.src.segment) logger.debug("- Source Segment Filter Match =", match) if match == False: return match # Match Destination Segment match = self.__dseg.IsFilterMatch(selectors.dst.segment) logger.debug("- Destination Segment Filter Match =", match) if match == False: return match # Match Source Endpoint if self.__sep: match = self.__sep.IsFilterMatch(selectors.src.endpoint) logger.debug("- Source Endpoint Filter Match =", match) if match == False: return match # Match Destination Endpoint if self.__dep: match = self.__dep.IsFilterMatch(selectors.dst.endpoint) logger.debug("- Destination Endpoint Filter Match =", match) if match == False: return match # Match Source Interface if self.__sep: match = self.__sep.intf.IsFilterMatch(selectors.src.interface) logger.debug("- Source Interface Filter Match =", match) if match == False: return match # Match Destination Interface if self.__dep: match = self.__dep.intf.IsFilterMatch(selectors.dst.interface) logger.debug("- Destination Interface Filter Match =", match) if match == False: return match # Match Source Lif if self.__sep and self.__sep.remote == False: match = self.__sep.intf.lif.IsFilterMatch(selectors.src.lif) logger.debug("- Source Lif Filter Match =", match) if match == False: return match # Match Destination Lif if self.__dep and self.__dep.remote == False: match = self.__dep.intf.lif.IsFilterMatch(selectors.dst.lif) logger.debug("- Destination Lif Filter Match =", match) if match == False: return match # Match Flow match = super().IsFilterMatch(selectors.flow.filters) logger.debug("- Flow Filter Match =", match) if match == False: return match return True