def _impl(self): item_valid, item_write_lock, (_, write_ptr), (_, read_ptr) = super( FifoOutOfOrderReadFiltered, self)._impl() self.item_valid(item_valid) self.item_write_lock(item_write_lock) tc = self.tag_cam if self.HAS_READ_LOOKUP: tc.match[0](self.write_pre_lookup) self.write_pre_lookup_res.data(tc.out[0].data & item_valid & item_write_lock) StreamNode([tc.out[0]], [self.write_pre_lookup_res]).sync() tc.match[1](self.read_lookup) self.read_lookup_res.data(tc.out[1].data & item_valid) StreamNode([tc.out[1]], [self.read_lookup_res]).sync() else: tc.match(self.write_pre_lookup) self.write_pre_lookup_res.data(tc.out.data & item_valid & item_write_lock) StreamNode([tc.out], [self.write_pre_lookup_res]).sync() write_execute = self.write_execute tc.read.addr(read_ptr) for dst in [write_execute.index, tc.write.addr]: dst(write_ptr) tc.write.data(write_execute.key) StreamNode([], [tc.write, write_execute]).sync() self.read_execute.key(tc.read.data)
def axiWHandler(self, wErrFlag): w = self.w wIn = self.driver.w wInfo = self.writeInfoFifo.dataOut bInfo = self.bInfoFifo.dataIn if hasattr(w, "id"): # AXI3 has, AXI4 does not w.id(wInfo.id) w.data(wIn.data) w.strb(wIn.strb) if self.useTransSplitting(): wordCntr = self._reg("wWordCntr", self.a.len._dtype, 0) doSplit = wordCntr._eq(self.getAxiLenMax()) | wIn.last If( StreamNode([wInfo, wIn], [bInfo, w]).ack(), If(doSplit, wordCntr(0)).Else(wordCntr(wordCntr + 1))) else: doSplit = wIn.last extraConds = {wInfo: doSplit, bInfo: doSplit, w: ~wErrFlag} w.last(doSplit) bInfo.isLast(wIn.last) StreamNode(masters=[wIn, wInfo], slaves=[bInfo, w], extraConds=extraConds).sync()
def select_data_word_from_ouput_word(self, m, s): w_align_fifo = self.propagate_addr(m.aw, s.aw) r_align_fifo = self.propagate_addr(m.ar, s.ar) AL_IN_W = self.ALIGN_BITS_IN AL_OUT_W = self.ALIGN_BITS_OUT ALIG_W = AL_OUT_W - AL_IN_W assert ALIG_W > 0, ALIG_W r_align_cases = [] w_align_cases = [] for i in range(2**ALIG_W): r_align_cases.append((i, self.connect_shifted(s.r, m.r, i))) w_align_cases.append((i, self.connect_shifted(m.w, s.w, i))) s.w(m.w, exclude={m.w.data, m.w.strb, m.w.ready, m.w.valid}) StreamNode(masters=[m.w, w_align_fifo.dataOut], slaves=[s.w]).sync() Switch(w_align_fifo.dataOut.data).add_cases(w_align_cases)\ .Default( # case which was unexpected or was filtered out by IN_ADDR_GRANULARITY s.w.data(None), s.w.strb(None), ) m.r(s.r, exclude={s.r.data, s.r.ready, s.r.valid}) StreamNode(masters=[s.r, r_align_fifo.dataOut], slaves=[m.r]).sync() Switch(r_align_fifo.dataOut.data).add_cases(r_align_cases)\ .Default( # case which was unexpected or was filtered out by IN_ADDR_GRANULARITY m.r.data(None), ) m.b(s.b)
def lookupLogic(self, ramR): h = self.hash lookup = self.lookup res = self.lookupRes # tmp storage for original key and hash for later check origKeyReg = HandshakedReg(LookupKeyIntf) origKeyReg.KEY_WIDTH.set(self.KEY_WIDTH) self.origKeyReg = origKeyReg origKeyReg.dataIn.key(lookup.key) if lookup.LOOKUP_ID_WIDTH: origKeyReg.dataIn.lookupId(lookup.lookupId) origKeyReg.clk(self.clk) origKeyReg.rst_n(self.rst_n) origKey = origKeyReg.dataOut # hash key and address with has in table h.dataIn(lookup.key) # has can be wider connect(h.dataOut, ramR.addr.data, fit=True) inputSlaves = [ramR.addr, origKeyReg.dataIn] outputMasters = [ origKey, ramR.data, ] if self.LOOKUP_HASH: origHashReg = HandshakedReg(Handshaked) origHashReg.DATA_WIDTH.set(self.HASH_WITH) self.origHashReg = origHashReg origHashReg.clk(self.clk) origHashReg.rst_n(self.rst_n) connect(h.dataOut, origHashReg.dataIn.data, fit=True) inputSlaves.append(origHashReg.dataIn) outputMasters.append(origHashReg.dataOut) StreamNode(masters=[lookup], slaves=inputSlaves).sync() # propagate loaded data StreamNode(masters=outputMasters, slaves=[res]).sync() key, data, vldFlag = self.parseItem(ramR.data.data) if self.LOOKUP_HASH: res.hash(origHashReg.dataOut.data) if self.LOOKUP_KEY: res.key(origKey.key) if self.LOOKUP_ID_WIDTH: res.lookupId(origKey.lookupId) if self.DATA_WIDTH: res.data(data) res.occupied(vldFlag) res.found(origKey.key._eq(key) & vldFlag)
def lookupResOfTablesDriver(self, resRead, resAck): tables = self.tables # one hot encoded index where item should be stored (where was found # or where is place) targetOH = self._reg("targetOH", Bits(self.TABLE_CNT)) res = list(map(lambda t: t.lookupRes, tables)) # synchronize all lookupRes from all tables StreamNode(masters=res).sync(resAck) insertFinal = self._reg("insertFinal") # select empty space or victim witch which current insert item # should be swapped with lookupResAck = StreamNode(masters=map( lambda t: t.lookupRes, tables)).ack() insertFoundOH = list(map(lambda t: t.lookupRes.found, tables)) isEmptyOH = list(map(lambda t:~t.lookupRes.occupied, tables)) _insertFinal = Or(*insertFoundOH, *isEmptyOH) If(resRead & lookupResAck, If(Or(*insertFoundOH), targetOH(Concat(*reversed(insertFoundOH))) ).Else( SwitchLogic([(empty, targetOH(1 << i)) for i, empty in enumerate(isEmptyOH) ], default=If(targetOH, targetOH(ror(targetOH, 1)) ).Else( targetOH(1 << (self.TABLE_CNT - 1)) )) ), insertFinal(_insertFinal) ) return lookupResAck, insertFinal, insertFoundOH, targetOH
def axiAwHandler(self, wErrFlag): req = self.driver.req aw = self.a r = self._reg self.axiAddrDefaults() wInfo = self.writeInfoFifo.dataIn if self.useTransSplitting(): LEN_MAX = mask(aw.len._dtype.bit_length()) lastReqDispatched = r("lastReqDispatched", defVal=1) lenDebth = r("lenDebth", req.len._dtype) addrBackup = r("addrBackup", req.addr._dtype) req_idBackup = r("req_idBackup", req.id._dtype) _id = self._sig("id", aw.id._dtype) requiresSplit = req.len > LEN_MAX requiresDebtSplit = lenDebth > LEN_MAX If( lastReqDispatched, _id(req.id), aw.addr(req.addr), If(requiresSplit, aw.len(LEN_MAX)).Else(connect(req.len, aw.len, fit=True), ), req_idBackup(req.id), addrBackup(req.addr + self.getBurstAddrOffset()), lenDebth(req.len - (LEN_MAX + 1)), If( wInfo.rd & aw.ready & req.vld, If(requiresSplit, lastReqDispatched(0)).Else(lastReqDispatched(1))), StreamNode(masters=[req], slaves=[aw, wInfo], extraConds={ aw: ~wErrFlag }).sync(), ).Else( _id(req_idBackup), aw.addr(addrBackup), If(requiresDebtSplit, aw.len(LEN_MAX)).Else(connect(lenDebth, aw.len, fit=True)), StreamNode(slaves=[aw, wInfo], extraConds={ aw: ~wErrFlag }).sync(), req.rd(0), If( StreamNode(slaves=[wInfo, aw]).ack(), addrBackup(addrBackup + self.getBurstAddrOffset()), lenDebth(lenDebth - (LEN_MAX + 1)), If(lenDebth <= LEN_MAX, lastReqDispatched(1)))) aw.id(_id) wInfo.id(_id) else: aw.id(req.id) wInfo.id(req.id) aw.addr(req.addr) connect(req.len, aw.len, fit=True) StreamNode(masters=[req], slaves=[aw, wInfo]).sync()
def _impl(self): propagateClkRstn(self) dIn = AxiSBuilder(self, self.dataIn).buff().end sb = self.sizesBuff db = self.dataBuff wordCntr = self._reg("wordCntr", Bits(log2ceil(self.MAX_LEN) + 1), def_val=0) overflow = wordCntr._eq(self.MAX_LEN) last = dIn.last | overflow If( StreamNode(masters=[dIn], slaves=[sb.dataIn, db.dataIn]).ack(), If(last, wordCntr(0)).Else(wordCntr(wordCntr + 1))) length = self._sig("length", wordCntr._dtype) BYTE_CNT = dIn.data._dtype.bit_length() // 8 if dIn.USE_STRB: # compress strb mask as binary number rem = self._sig("rem", Bits(log2ceil(BYTE_CNT))) SwitchLogic(cases=[(dIn.strb[i], rem(0 if i == BYTE_CNT - 1 else i + 1)) for i in reversed(range(BYTE_CNT))], default=[ rem(0), ]) if self.EXPORT_ALIGNMENT_ERROR: errorAlignment = self._reg("errorAlignment_reg", def_val=0) self.errorAlignment(errorAlignment) If(dIn.valid & (dIn.strb != mask(BYTE_CNT)) & ~dIn.last, errorAlignment(1)) If(last & (dIn.strb != mask(BYTE_CNT)), length(wordCntr)).Else(length(wordCntr + 1)) else: length(wordCntr + 1) rem = Bits(log2ceil(BYTE_CNT)).from_py(0) sb.dataIn.data(Concat(length, rem)) db.dataIn(dIn, exclude=[dIn.valid, dIn.ready, dIn.last]) db.dataIn.last(last) StreamNode(masters=[dIn], slaves=[sb.dataIn, db.dataIn], extraConds={ sb.dataIn: last }).sync() self.sizes(sb.dataOut) self.dataOut(db.dataOut)
def _impl(self): din = self.dataIn dout = self.dataOut framePending = self._reg("framePending", defVal=False) ack = StreamNode([din], [dout]).ack() If(framePending & ack, framePending(~din.last)) dataEn = self.en | framePending StreamNode(masters=[din], slaves=[dout]).sync(dataEn) connect(din, dout, exclude=[din.ready, din.valid])
def connect_tag_lookup(self): in_ar, in_aw = self.s.ar, self.s.aw # connect address lookups to a tag array tags = self.tag_array for a, tag_lookup in zip((in_ar, in_aw), tags.lookup): tag_lookup.addr(a.addr) tag_lookup.id(a.id) if a is in_aw: rc = self.read_cancel rc.addr(a.addr) StreamNode([a], [tag_lookup, rc]).sync() else: StreamNode([a], [tag_lookup]).sync()
def _impl(self): V = self.VAL VAL_W = self.VAL._dtype.bit_length() D_W = self.DATA_WIDTH if not V._is_full_valid(): raise NotImplementedError() din = self.dataIn dout = self.dataOut if VAL_W <= D_W: # do comparison in single word dout.data(din.data[VAL_W:]._eq(V)) StreamNode([din], [dout]).sync() else: # build fsm for comparing word_cnt = ceil(VAL_W / D_W) word_index = self._reg("word_index", Bits(log2ceil(word_cnt - 1)), def_val=0) # true if all previous words were matching state = self._reg("state", def_val=1) offset = 0 word_cases = [] for is_last_word, i in iter_with_last(range(word_cnt)): val_low = offset val_high = min(offset + D_W, VAL_W) in_high = val_high - val_low state_update = din.data[in_high:]._eq(V[val_high:val_low]) if is_last_word: dout.data(state & state_update) else: word_cases.append((i, state(state & state_update))) If(StreamNode([din], [dout]).ack(), If(din.last, word_index(0), state(1), ).Else( word_index(word_index + 1), Switch(word_index)\ .add_cases(word_cases) ) ) StreamNode([din], [dout], extraConds={ dout: din.valid & din.last }, skipWhen={ dout: ~(din.valid & din.last) }).sync()
def handshakeLogicForWord(self, inPorts: List[Union[Handshaked, StreamNode]], lastInPorts: List[Union[Handshaked, StreamNode]], en: Union[bool, RtlSignal]): if lastInPorts: # instantiate rd logic of input streams StreamNode(masters=lastInPorts).sync(en) if inPorts: ack = StreamNode(masters=inPorts).ack() else: ack = True return ack
def propagateRequest(frame, indx): inNode = StreamNode(slaves=[req, ackPropageteInfo.dataIn]) ack = inNode.ack() isLastFrame = indx == len(self._frames) - 1 statements = [ req.addr(_set.data + frame.startBitAddr // 8), req.len(frame.getWordCnt() - 1), self.driveReqRem( req, frame.parts[-1].endOfPart - frame.startBitAddr), ackPropageteInfo.dataIn.data(SKIP if indx != 0 else PROPAGATE), inNode.sync(_set.vld), _set.rd(ack if isLastFrame else 0), ] return statements, ack & _set.vld
def _impl(self): req = self.wDatapump.req w = self.wDatapump.w ack = self.wDatapump.ack # multi frame ackPropageteInfo = HandshakedFifo(Handshaked) ackPropageteInfo.DATA_WIDTH.set(1) ackPropageteInfo.DEPTH.set(self.MAX_OVERLAP) self.ackPropageteInfo = ackPropageteInfo propagateClkRstn(self) if self.WRITE_ACK: _set = self.set else: _set = HsBuilder(self, self.set).buff().end req.id(self.ID) req.rem(0) def propagateRequests(frame, indx): ack = StreamNode(slaves=[req, ackPropageteInfo.dataIn]).ack() statements = [req.addr(_set.data + frame.startBitAddr // 8), req.len(frame.getWordCnt() - 1), StreamNode(slaves=[req, ackPropageteInfo.dataIn], ).sync(_set.vld) ] if indx != 0: prop = SKIP else: prop = PROPAGATE statements.append(ackPropageteInfo.dataIn.data(prop)) isLastFrame = indx == len(self._frames) - 1 if isLastFrame: statements.append(_set.rd(ack)) else: statements.append(_set.rd(0)) return statements, ack & _set.vld StaticForEach(self, self._frames, propagateRequests) # connect write channel w(self.frameAssember.dataOut) # propagate ack StreamNode(masters=[ack, ackPropageteInfo.dataOut], slaves=[self.writeAck], skipWhen={ self.writeAck: ackPropageteInfo.dataOut.data._eq(PROPAGATE) }).sync() # connect fields to assembler for _, transTmpl in self._tmpl.walkFlatten(): f = transTmpl.origin intf = self.frameAssember.dataIn._fieldsToInterfaces[f] intf(self.dataIn._fieldsToInterfaces[f])
def lookupOfTablesDriver(self, state, tableKey): fsm_t = state._dtype for t in self.tables: t.lookup.key(tableKey) # activate lookup only in lookup state en = state._eq(fsm_t.lookup) StreamNode(slaves=[t.lookup for t in self.tables]).sync(en)
def _impl(self): req = self.wDatapump.req w = self.wDatapump.w ack = self.wDatapump.ack # multi frame if self.MAX_OVERLAP > 1: ackPropageteInfo = HandshakedFifo(Handshaked) ackPropageteInfo.DEPTH = self.MAX_OVERLAP else: ackPropageteInfo = HandshakedReg(Handshaked) ackPropageteInfo.DATA_WIDTH = 1 self.ackPropageteInfo = ackPropageteInfo if self.WRITE_ACK: _set = self.set else: _set = HsBuilder(self, self.set).buff().end if self.ID_WIDTH: req.id(self.ID) def propagateRequest(frame, indx): inNode = StreamNode(slaves=[req, ackPropageteInfo.dataIn]) ack = inNode.ack() isLastFrame = indx == len(self._frames) - 1 statements = [ req.addr(_set.data + frame.startBitAddr // 8), req.len(frame.getWordCnt() - 1), self.driveReqRem( req, frame.parts[-1].endOfPart - frame.startBitAddr), ackPropageteInfo.dataIn.data(SKIP if indx != 0 else PROPAGATE), inNode.sync(_set.vld), _set.rd(ack if isLastFrame else 0), ] return statements, ack & _set.vld StaticForEach(self, self._frames, propagateRequest) # connect write channel w(self.frameAssember.dataOut) # propagate ack StreamNode(masters=[ack, ackPropageteInfo.dataOut], slaves=[self.writeAck], skipWhen={ self.writeAck: ackPropageteInfo.dataOut.data._eq(PROPAGATE) }).sync() # connect fields to assembler for _, transTmpl in self._tmpl.walkFlatten(): f = transTmpl.getFieldPath() intf = self.frameAssember.dataIn._fieldsToInterfaces[f] intf(self.dataIn._fieldsToInterfaces[f]) propagateClkRstn(self)
def axiBHandler(self): b = self.axi.b ack = self.driver.ack lastFlags = self.bInfoFifo.dataOut StreamNode(masters=[b, lastFlags], slaves=[ack], extraConds={ ack: lastFlags.isLast }).sync()
def tables_lookup_driver(self, state: RtlSignal, tableKey: RtlSignal, lookup_en: RtlSignal): """ Connect a lookup ports of all tables """ for t in self.tables: t.lookup.key(tableKey) # activate lookup only in lookup state (for insert/delete) or if idle and processing lookups StreamNode(slaves=[t.lookup for t in self.tables]).sync(lookup_en)
def _impl(self): propagateClkRstn(self) ITEM_WIDTH = int(self.ITEM_WIDTH) DATA_WIDTH = int(self.DATA_WIDTH) ITEMS_IN_DATA_WORD = self.ITEMS_IN_DATA_WORD ITEM_SIZE_IN_WORDS = 1 if ITEM_WIDTH % 8 != 0 or ITEM_SIZE_IN_WORDS * DATA_WIDTH != ITEMS_IN_DATA_WORD * ITEM_WIDTH: raise NotImplementedError(ITEM_WIDTH) req = self.rDatapump.req req.id(self.ID) req.len(ITEM_SIZE_IN_WORDS - 1) req.rem(0) if ITEMS_IN_DATA_WORD == 1: addr = Concat(self.index.data, vec(0, log2ceil(ITEM_WIDTH // 8))) req.addr(self.base + fitTo(addr, req.addr)) StreamNode(masters=[self.index], slaves=[req]).sync() self.item.data(self.rDatapump.r.data) StreamNode(masters=[self.rDatapump.r], slaves=[self.item]).sync() else: r = self.rDatapump.r.data f = self.itemSubIndexFifo subIndexBits = f.dataIn.data._dtype.bit_length() itemAlignBits = log2ceil(ITEM_WIDTH // 8) addr = Concat(self.index.data[:subIndexBits], vec(0, itemAlignBits + subIndexBits)) req.addr(self.base + fitTo(addr, req.addr)) f.dataIn.data(self.index.data[subIndexBits:]) StreamNode(masters=[self.index], slaves=[req, f.dataIn]).sync() Switch(f.dataOut.data).addCases([ (ITEMS_IN_DATA_WORD - i - 1, self.item.data(r[(ITEM_WIDTH * (i + 1)):(ITEM_WIDTH * i)])) for i in range(ITEMS_IN_DATA_WORD) ]) StreamNode(masters=[self.rDatapump.r, f.dataOut], slaves=[self.item]).sync()
def insertLogic(self, ramW): In = self.insert if self.DATA_WIDTH: rec = Concat(In.key, In.data, In.vldFlag) else: rec = Concat(In.key, In.vldFlag) ramW.data(rec) ramW.addr(In.hash) StreamNode(masters=[In], slaves=[ramW]).sync()
def connectPhyout(self, segfaultFlag): phyAddrBase = self.lvl2get.item pageOffset = self.pageOffsetFifo.dataOut segfault = segfaultFlag | phyAddrBase.data[0]._eq(FLAG_INVALID) StreamNode(masters=[phyAddrBase, pageOffset], slaves=[self.physOut], extraConds={self.physOut:~segfault}).sync() self.physOut.data(Concat(phyAddrBase.data[:self.PAGE_OFFSET_WIDTH], pageOffset.data))
def connectL1Load(self, lvl1readAddr): virtIn = self.virtIn lvl2indx = self.lvl2indxFifo.dataIn pageOffset = self.pageOffsetFifo lvl2indx.data(virtIn.data[(self.LVL2_PAGE_TABLE_INDX_WIDTH + self.PAGE_OFFSET_WIDTH):self.PAGE_OFFSET_WIDTH]) pageOffset.dataIn.data(virtIn.data, fit=True) lvl1readAddr.data(virtIn.data[:(self.LVL2_PAGE_TABLE_INDX_WIDTH + self.PAGE_OFFSET_WIDTH)]) StreamNode(masters=[virtIn], slaves=[lvl2indx, lvl1readAddr, pageOffset.dataIn]).sync()
def propagateRequest(frame, indx): isLastFrame = indx == len(self._frames) - 1 s = [ req.addr(get.data + frame.startBitAddr // 8), req.len(frame.getWordCnt() - 1), self.driveReqRem(req, frame.parts[-1].endOfPart - frame.startBitAddr), req.vld(get.vld), get.rd(req.rd if isLastFrame else 0) ] ack = StreamNode(masters=[get], slaves=[self.rDatapump.req]).ack() return s, ack
def reqHandler(self, dpReq, orderFifoIn): # join with roundrobin on requests form drivers and selected index is stored into orderFifo # because it is just proxy driversReq = list(map(lambda d: d.req, self.drivers)) b = HsBuilder.join_fair(self, driversReq, exportSelected=True) req = b.end reqJoin = b.lastComp StreamNode(masters=[req], slaves=[dpReq, orderFifoIn]).sync() connect(req, dpReq, exclude=[dpReq.vld, dpReq.rd]) orderFifoIn.data(oneHotToBin(self, reqJoin.selectedOneHot.data))
def propagateRequests(frame, indx): ack = StreamNode(slaves=[req, ackPropageteInfo.dataIn]).ack() statements = [req.addr(_set.data + frame.startBitAddr // 8), req.len(frame.getWordCnt() - 1), StreamNode(slaves=[req, ackPropageteInfo.dataIn], ).sync(_set.vld) ] if indx != 0: prop = SKIP else: prop = PROPAGATE statements.append(ackPropageteInfo.dataIn.data(prop)) isLastFrame = indx == len(self._frames) - 1 if isLastFrame: statements.append(_set.rd(ack)) else: statements.append(_set.rd(0)) return statements, ack & _set.vld
def read_data_section(self, read_ack: RtlSignal, waiting_transaction_id: RtlSignal, waiting_transaction_vld: RtlSignal, data_copy_override: VldSynced): s = self.s m = self.m fb = self.frame_buff data_out_node = StreamNode([fb.dataOut], [s.r]) data_out_node.sync() read_ack(data_out_node.ack()) fb.dataOut_copy_frame((fb.dataOut.valid & fb.dataOut.last & waiting_transaction_vld[fb.dataOut.id]) | data_copy_override.vld) If(data_copy_override.vld, fb.dataOut_replacement_id(data_copy_override.data)).Else( fb.dataOut_replacement_id( waiting_transaction_id[fb.dataOut.id])) s.r(fb.dataOut, exclude={s.r.valid, s.r.ready}) StreamNode( [m.r], [fb.dataIn], ).sync() fb.dataIn(m.r, exclude={m.r.valid, m.r.ready})
def matchHandler(self, mem, key: Handshaked, match_res: Handshaked): key_data = key.data if self.USE_VLD_BIT: key_data = Concat(key.data, BIT.from_py(1)) out_one_hot = [] for i in range(self.ITEMS): b = mem[i]._eq(key_data) out_one_hot.append(b) match_res.data(Concat(*reversed(out_one_hot))) StreamNode([key], [match_res]).sync()
def connectL2Load(self, lvl2base, segfaultFlag): lvl2get = self.lvl2get lvl2indx = self.lvl2indxFifo.dataOut self.rDatapump(lvl2get.rDatapump) lvl2get.base(lvl2base.data) lvl2get.index.data(lvl2indx.data) StreamNode(masters=[lvl2base, lvl2indx], slaves=[lvl2get.index], extraConds={ lvl2get.index: ~segfaultFlag }).sync()
def upscale(self, IN_DW, OUT_DW): if OUT_DW % IN_DW != 0: raise NotImplementedError() ITEMS = OUT_DW // IN_DW dIn = self.getDataWidthDependent(self.dataIn) dataOut = self.dataOut dOut = self.getDataWidthDependent(dataOut) itemCntr = self._reg("itemCntr", Bits(log2ceil(ITEMS + 1)), def_val=0) hs = StreamNode([self.dataIn], [dataOut]).ack() isLastItem = (itemCntr._eq(ITEMS - 1) | self.dataIn.last) vld = self.get_valid_signal(self.dataIn) outputs = {outp: [] for outp in dOut} for wordIndx in range(ITEMS): for inp, outp in zip(dIn, dOut): # generate register if is not last item s = self._sig(f"item_{wordIndx:d}_{inp._name:s}", inp._dtype) if wordIndx <= ITEMS - 1: r = self._reg(f"reg_{inp._name:s}_{wordIndx:d}", inp._dtype, def_val=0) If(hs & isLastItem, r(0)).Elif(vld & itemCntr._eq(wordIndx), r(inp)) If(itemCntr._eq(wordIndx), s(inp)).Else(s(r)) else: # last item does not need register If(itemCntr._eq(wordIndx), s(inp)).Else(s(0)) outputs[outp].append(s) # dataIn/dataOut hs self.get_ready_signal(self.dataIn)(self.get_ready_signal(dataOut)) self.get_valid_signal(dataOut)(vld & (isLastItem)) # connect others signals directly for inp, outp in zip(self.get_data(self.dataIn), self.get_data(dataOut)): if inp not in dIn: outp(inp) # connect data signals to utput for outp, outItems in outputs.items(): outp(Concat(*reversed(outItems))) # itemCntr next logic If(hs, If(isLastItem, itemCntr(0)).Else(itemCntr(itemCntr + 1)))
def downscale(self, IN_DW, OUT_DW): if IN_DW % OUT_DW != 0: raise NotImplementedError() dOut = self.getDataWidthDependent(self.dataOut) # instantiate AxiSReg, AxiSBuilder is not used to avoid dependencies inReg = AxiSReg(self.intfCls) inReg._updateParamsFrom(self.dataIn) self.inReg = inReg inReg.clk(self.clk) inReg.rst_n(self.rst_n) inReg.dataIn(self.dataIn) dataIn = inReg.dataOut dIn = self.getDataWidthDependent(dataIn) ITEMS = IN_DW // OUT_DW itemCntr = self._reg("itemCntr", Bits(log2ceil(ITEMS + 1)), def_val=0) hs = StreamNode([dataIn], [self.dataOut]).ack() isLastItem = itemCntr._eq(ITEMS - 1) strbLastOverride = self.nextAreNotValidLogic(dataIn.strb, itemCntr, ITEMS, OUT_DW) if strbLastOverride is not True: isLastItem = isLastItem | strbLastOverride # connected item selected by itemCntr to output for inp, outp in zip(dIn, dOut): w = outp._dtype.bit_length() Switch(itemCntr)\ .add_cases([ (wordIndx, outp(inp[((wordIndx + 1) * w):(w * wordIndx)])) for wordIndx in range(ITEMS) ])\ .Default( outp(None) ) # connect others signals directly for inp, outp in zip(self.get_data(dataIn), self.get_data(self.dataOut)): if inp not in dIn and inp is not dataIn.last: outp(inp) self.dataOut.last(dataIn.last & isLastItem) self.get_ready_signal(dataIn)(self.get_ready_signal(self.dataOut) & isLastItem & dataIn.valid) self.get_valid_signal(self.dataOut)(self.get_valid_signal(dataIn)) If(hs, If(isLastItem, itemCntr(0)).Else(itemCntr(itemCntr + 1)))
def gen_b_or_r_logic(self, inp, outp, fifo_out, propagete_only_on_last): """ Use counter to skip intermediate generated transactions and pass only confirmation from last beat of the original transaction """ name_prefix = outp._name rem = self._reg(name_prefix + "rem", self.s.aw.len._dtype) id_tmp = self._reg(name_prefix + "id_tmp", outp.id._dtype) rem_vld = self._reg(name_prefix + "rem_vld", def_val=0) StreamNode( [inp], [outp], extraConds={ outp: rem_vld & rem._eq(0) if propagete_only_on_last else rem_vld, inp: rem_vld, }).sync() If( rem_vld, fifo_out.rd(inp.valid & outp.ready & rem._eq(0)), If( inp.valid & outp.ready, # now processing next beat If( rem != 0, # this was not the last beat rem(rem - 1) ).Elif( fifo_out.vld, # this was the last beat and we can directly start new one rem(fifo_out.len), id_tmp(fifo_out.id), ).Else( # this was the last beat and there is no next transaction rem_vld(0), )), ).Else( # in iddle store the information from b_fifo rem(fifo_out.len), id_tmp(fifo_out.id), rem_vld(fifo_out.vld), fifo_out.rd(1), ) outp.id(id_tmp) already_connected = {outp.valid, outp.ready, outp.id} if hasattr(outp, "last"): outp.last(rem._eq(0) & rem_vld) already_connected.add(outp.last) outp(inp, exclude=already_connected)