def _impl(self): assert int(self.DRIVER_CNT ) > 1, "It makes no sense to use interconnect in this case" propagateClkRstn(self) self.reqHandler(self.rDatapump.req, self.orderInfoFifo.dataIn) fifoOut = self.orderInfoFifo.dataOut r = self.rDatapump.r driversR = list(map(lambda d: d.r, self.drivers)) selectedDriverReady = self._sig("selectedDriverReady") selectedDriverReady( Or(*map(lambda d: fifoOut.data._eq(d[0]) & d[1].ready, enumerate(driversR)))) # extra enable signals based on selected driver from orderInfoFifo # extraHsEnableConds = { # r : fifoOut.vld # on end of frame pop new item # } for i, d in enumerate(driversR): # extraHsEnableConds[d] d.valid(r.valid & fifoOut.vld & fifoOut.data._eq(i)) connect(r, d, exclude=[d.valid, d.ready]) r.ready(fifoOut.vld & selectedDriverReady) fifoOut.rd(r.valid & r.last & selectedDriverReady)
def test_IntfDirections_multistream_setSrc2(self): def m(i): return self.assertEqual(i._direction, INTF_DIRECTION.MASTER) def s(i): return self.assertEqual(i._direction, INTF_DIRECTION.SLAVE) i, i2 = createTwoAxiDuplexStreams() connect(i2.rx, i.rx) connect(i.tx, i2.tx) m(i) s(i.rx) s(i.rx.data) s(i.rx.last) s(i.rx.valid) m(i.rx.ready) m(i.tx.data) m(i.tx.last) m(i.tx.valid) s(i.tx.ready) s(i2) m(i2.rx.data) m(i2.rx.last) m(i2.rx.valid) s(i2.rx.ready) s(i2.tx.data) s(i2.tx.last) s(i2.tx.valid) m(i2.tx.ready) i._reverseDirection() i2._reverseDirection() s(i) m(i.rx) m(i.rx.data) m(i.rx.last) m(i.rx.valid) s(i.rx.ready) s(i.tx.data) s(i.tx.last) s(i.tx.valid) m(i.tx.ready) m(i2) s(i2.rx.data) s(i2.rx.last) s(i2.rx.valid) m(i2.rx.ready) m(i2.tx.data) m(i2.tx.last) m(i2.tx.valid) s(i2.tx.ready)
def lookupLogic(self, ramR): h = self.hash lookup = self.lookup res = self.lookupRes # tmp storage for original key and hash for later check origKeyReg = HandshakedReg(LookupKeyIntf) origKeyReg.KEY_WIDTH.set(self.KEY_WIDTH) self.origKeyReg = origKeyReg origKeyReg.dataIn.key(lookup.key) if lookup.LOOKUP_ID_WIDTH: origKeyReg.dataIn.lookupId(lookup.lookupId) origKeyReg.clk(self.clk) origKeyReg.rst_n(self.rst_n) origKey = origKeyReg.dataOut # hash key and address with has in table h.dataIn(lookup.key) # has can be wider connect(h.dataOut, ramR.addr.data, fit=True) inputSlaves = [ramR.addr, origKeyReg.dataIn] outputMasters = [ origKey, ramR.data, ] if self.LOOKUP_HASH: origHashReg = HandshakedReg(Handshaked) origHashReg.DATA_WIDTH.set(self.HASH_WITH) self.origHashReg = origHashReg origHashReg.clk(self.clk) origHashReg.rst_n(self.rst_n) connect(h.dataOut, origHashReg.dataIn.data, fit=True) inputSlaves.append(origHashReg.dataIn) outputMasters.append(origHashReg.dataOut) StreamNode(masters=[lookup], slaves=inputSlaves).sync() # propagate loaded data StreamNode(masters=outputMasters, slaves=[res]).sync() key, data, vldFlag = self.parseItem(ramR.data.data) if self.LOOKUP_HASH: res.hash(origHashReg.dataOut.data) if self.LOOKUP_KEY: res.key(origKey.key) if self.LOOKUP_ID_WIDTH: res.lookupId(origKey.lookupId) if self.DATA_WIDTH: res.data(data) res.occupied(vldFlag) res.found(origKey.key._eq(key) & vldFlag)
def _impl(self): assert int(self.DRIVER_CNT) > 1, "It makes no sense to use interconnect in this case" propagateClkRstn(self) self.reqHandler(self.rDatapump.req, self.orderInfoFifo.dataIn) fifoOut = self.orderInfoFifo.dataOut r = self.rDatapump.r driversR = list(map(lambda d: d.r, self.drivers)) selectedDriverReady = self._sig("selectedDriverReady") selectedDriverReady(Or(*map(lambda d: fifoOut.data._eq(d[0]) & d[1].ready, enumerate(driversR)) )) # extra enable signals based on selected driver from orderInfoFifo # extraHsEnableConds = { # r : fifoOut.vld # on end of frame pop new item # } for i, d in enumerate(driversR): # extraHsEnableConds[d] d.valid(r.valid & fifoOut.vld & fifoOut.data._eq(i)) connect(r, d, exclude=[d.valid, d.ready]) r.ready(fifoOut.vld & selectedDriverReady) fifoOut.rd(r.valid & r.last & selectedDriverReady)
def _impl(self): propagateClkRstn(self) self.axi_ep.bus(self.cntrl) ep = self.axi_ep.decoded doClr = ep.control.dout.vld ep.control.din(1) self.master(self.slave) s, m = self.slave, self.master for dir_, name in [(1, "ar"), (1, "aw"), (1, "w"), (0, "r"), (0, "b")]: sCh = getattr(s, name) mCh = getattr(m, name) if not dir_: sCh, mCh = mCh, sCh cntrl = getattr(ep, name) ack = StreamNode(masters={sCh}, slaves={mCh}).ack() cntr = self._reg("cntr_" + name, Bits(self.CNTR_WIDTH), defVal=0) If(doClr, cntr(0) ).Elif(ack, cntr(cntr + 1) ) connect(cntr, cntrl.din, fit=True)
def test_IntfDirections_multistream_setSrc(self): def m(i): return self.assertEqual(i._direction, INTF_DIRECTION.MASTER) def s(i): return self.assertEqual(i._direction, INTF_DIRECTION.SLAVE) i, i2 = createTwoAxiDuplexStreams() n = RtlNetlist() i._signalsForInterface(n) i2._signalsForInterface(n) connect(i, i2) m(i) s(i.rx.data) s(i.rx.last) s(i.rx.valid) m(i.rx.ready) m(i.tx.data) m(i.tx.last) m(i.tx.valid) s(i.tx.ready) m(i2.rx.data) m(i2.rx.last) m(i2.rx.valid) s(i2.rx.ready) s(i2.tx.data) s(i2.tx.last) s(i2.tx.valid) m(i2.tx.ready)
def _impl(self): intern = self._sig("internSig", Bits(2)) intern[0](self.a) intern[1](self.b) connect(intern[0], self.c) connect(intern[1], self.d)
def axiAwHandler(self, wErrFlag): req = self.driver.req aw = self.a r = self._reg self.axiAddrDefaults() wInfo = self.writeInfoFifo.dataIn if self.useTransSplitting(): LEN_MAX = mask(aw.len._dtype.bit_length()) lastReqDispatched = r("lastReqDispatched", defVal=1) lenDebth = r("lenDebth", req.len._dtype) addrBackup = r("addrBackup", req.addr._dtype) req_idBackup = r("req_idBackup", req.id._dtype) _id = self._sig("id", aw.id._dtype) requiresSplit = req.len > LEN_MAX requiresDebtSplit = lenDebth > LEN_MAX If( lastReqDispatched, _id(req.id), aw.addr(req.addr), If(requiresSplit, aw.len(LEN_MAX)).Else(connect(req.len, aw.len, fit=True), ), req_idBackup(req.id), addrBackup(req.addr + self.getBurstAddrOffset()), lenDebth(req.len - (LEN_MAX + 1)), If( wInfo.rd & aw.ready & req.vld, If(requiresSplit, lastReqDispatched(0)).Else(lastReqDispatched(1))), StreamNode(masters=[req], slaves=[aw, wInfo], extraConds={ aw: ~wErrFlag }).sync(), ).Else( _id(req_idBackup), aw.addr(addrBackup), If(requiresDebtSplit, aw.len(LEN_MAX)).Else(connect(lenDebth, aw.len, fit=True)), StreamNode(slaves=[aw, wInfo], extraConds={ aw: ~wErrFlag }).sync(), req.rd(0), If( StreamNode(slaves=[wInfo, aw]).ack(), addrBackup(addrBackup + self.getBurstAddrOffset()), lenDebth(lenDebth - (LEN_MAX + 1)), If(lenDebth <= LEN_MAX, lastReqDispatched(1)))) aw.id(_id) wInfo.id(_id) else: aw.id(req.id) wInfo.id(req.id) aw.addr(req.addr) connect(req.len, aw.len, fit=True) StreamNode(masters=[req], slaves=[aw, wInfo]).sync()
def test_IntfDirections_multistream_setSrc2(self): def m(i): return self.assertEqual(i._direction, INTF_DIRECTION.MASTER) def s(i): return self.assertEqual(i._direction, INTF_DIRECTION.SLAVE) i, i2 = createTwoAxiDuplexStreams() connect(i2.rx, i.rx) connect(i.tx, i2.tx) m(i) s(i.rx) s(i.rx.data) s(i.rx.last) s(i.rx.valid) m(i.rx.ready) m(i.tx.data) m(i.tx.last) m(i.tx.valid) s(i.tx.ready) s(i2) m(i2.rx.data) m(i2.rx.last) m(i2.rx.valid) s(i2.rx.ready) s(i2.tx.data) s(i2.tx.last) s(i2.tx.valid) m(i2.tx.ready) i._reverseDirection() i2._reverseDirection() s(i) m(i.rx) m(i.rx.data) m(i.rx.last) m(i.rx.valid) s(i.rx.ready) s(i.tx.data) s(i.tx.last) s(i.tx.valid) m(i.tx.ready) m(i2) s(i2.rx.data) s(i2.rx.last) s(i2.rx.valid) m(i2.rx.ready) m(i2.tx.data) m(i2.tx.last) m(i2.tx.valid) s(i2.tx.ready)
def connectDrivers(self, drivers, datapump): """ Connect drivers to datapump using this component """ for i, driver in enumerate(drivers): # width of signals should be configured by the widest # others drivers can have smaller widths of some signals for example id connect(self.getDpIntf(driver), self.drivers[i], fit=True) datapump.driver(self.getDpIntf(self))
def connectDrivers(self, drivers, datapump): """ Connect drivers to datapump using this component """ for i, driver in enumerate(drivers): # width of signals should be configured by the widest # others drivers can have smaller widths of some signals for example id connect(self.getDpIntf(driver), self.drivers[i], fit=True) datapump.driver(self.getDpIntf(self))
def _impl(self): propagateClkRstn(self) cntr = self._reg("wordCntr", Bits(log2ceil(self.MAX_LEN)), defVal=0) en = self._reg("enable", defVal=0) _len = self._reg("wordCntr", Bits(log2ceil(self.MAX_LEN)), defVal=0) self.conv.bus(self.cntrl) cEn = self.conv.decoded.enable If(cEn.dout.vld, connect(cEn.dout.data, en, fit=True)) connect(en, cEn.din, fit=True) cLen = self.conv.decoded.len If(cLen.dout.vld, connect(cLen.dout.data, _len, fit=True)) connect(_len, cLen.din, fit=True) out = self.axis_out connect(cntr, out.data, fit=True) if self.USE_STRB: out.strb(mask(self.axis_out.strb._dtype.bit_length())) out.last(cntr._eq(0)) out.valid(en) If(cLen.dout.vld, connect(cLen.dout.data, cntr, fit=True)).Else( If(out.ready & en, If(cntr._eq(0), cntr(_len)).Else(cntr(cntr - 1))))
def _impl(self): dataOut = list(reversed(self.dataOut)) self.getRd(self.dataIn)(Or(*map(lambda x: self.getRd(x), dataOut))) for i, out in enumerate(dataOut): allWitLowerPriority = dataOut[i+1:] vld = self.getVld(self.dataIn) for _vld in map(lambda x: ~self.getRd(x), allWitLowerPriority): vld = vld & _vld connect(self.dataIn, out, exclude={self.getRd(out), self.getVld(out)}) self.getVld(out)(vld)
def _impl(self): rdSignals = self.isSelectedLogic() for dout in self.dataOut: connect(self.dataIn, dout, exclude={self.getRd(dout), self.getVld(dout)}) if self.EXPORT_SELECTED: self.getRd(self.dataIn)(Or(*rdSignals) & self.selectedOneHot.rd) else: self.getRd(self.dataIn)(Or(*rdSignals))
def _impl(self): propagateClkRstn(self) dIn = AxiSBuilder(self, self.dataIn).buff().end sb = self.sizesBuff db = self.dataBuff wordCntr = self._reg("wordCntr", Bits(log2ceil(self.MAX_LEN) + 1), defVal=0) overflow = wordCntr._eq(self.MAX_LEN) last = dIn.last | overflow If( StreamNode(masters=[dIn], slaves=[sb.dataIn, db.dataIn]).ack(), If(last, wordCntr(0)).Else(wordCntr(wordCntr + 1))) length = self._sig("length", wordCntr._dtype) BYTE_CNT = dIn.data._dtype.bit_length() // 8 if dIn.USE_STRB: # compress strb mask as binary number rem = self._sig("rem", Bits(log2ceil(BYTE_CNT))) SwitchLogic(cases=[(dIn.strb[i], rem(0 if i == BYTE_CNT - 1 else i + 1)) for i in reversed(range(BYTE_CNT))], default=[ rem(0), ]) if self.EXPORT_ALIGNMENT_ERROR: errorAlignment = self._reg("errorAlignment_reg", defVal=0) self.errorAlignment(errorAlignment) If(dIn.valid & (dIn.strb != mask(BYTE_CNT)) & ~dIn.last, errorAlignment(1)) If(last & (dIn.strb != mask(BYTE_CNT)), length(wordCntr)).Else(length(wordCntr + 1)) else: length(wordCntr + 1) rem = vec(0, log2ceil(BYTE_CNT)) sb.dataIn.data(Concat(length, rem)) connect(dIn, db.dataIn, exclude=[dIn.valid, dIn.ready, dIn.last]) db.dataIn.last(last) StreamNode(masters=[dIn], slaves=[sb.dataIn, db.dataIn], extraConds={ sb.dataIn: last }).sync() self.sizes(sb.dataOut) connect(db.dataOut, self.dataOut)
def connected_reg(name, input_=None, inputEn=None, fit=False): if input_ is not None: assert inputEn is not None port = getattr(ep, name) reg = self._reg(name, port.din._dtype) e = If(port.dout.vld, connect(port.dout.data, reg, fit=fit)) if input_ is not None: e.Elif(inputEn, connect(input_, reg, fit=fit)) port.din(reg) return reg
def _tryConnect(src, unit, intfName): """ Try connect src to interface of specified name on unit. Ignore if interface is not present or if it already has driver. """ try: dst = getattr(unit, intfName) except AttributeError: return if not dst._sig.drivers: connect(src, dst)
def reqHandler(self, dpReq, orderFifoIn): # join with roundrobin on requests form drivers and selected index is stored into orderFifo # because it is just proxy driversReq = list(map(lambda d: d.req, self.drivers)) b = HsBuilder.join_fair(self, driversReq, exportSelected=True) req = b.end reqJoin = b.lastComp StreamNode(masters=[req], slaves=[dpReq, orderFifoIn]).sync() connect(req, dpReq, exclude=[dpReq.vld, dpReq.rd]) orderFifoIn.data(oneHotToBin(self, reqJoin.selectedOneHot.data))
def connectL1Load(self, lvl1readAddr): virtIn = self.virtIn lvl2indx = self.lvl2indxFifo.dataIn pageOffset = self.pageOffsetFifo lvl2indx.data(virtIn.data[(self.LVL2_PAGE_TABLE_INDX_WIDTH + self.PAGE_OFFSET_WIDTH):self.PAGE_OFFSET_WIDTH]) connect(virtIn.data, pageOffset.dataIn.data, fit=True) lvl1readAddr.data(virtIn.data[:(self.LVL2_PAGE_TABLE_INDX_WIDTH + self.PAGE_OFFSET_WIDTH)]) StreamNode(masters=[virtIn], slaves=[lvl2indx, lvl1readAddr, pageOffset.dataIn]).sync()
def connectL1Load(self, lvl1readAddr): virtIn = self.virtIn lvl2indx = self.lvl2indxFifo.dataIn pageOffset = self.pageOffsetFifo lvl2indx.data( virtIn.data[(self.LVL2_PAGE_TABLE_INDX_WIDTH + self.PAGE_OFFSET_WIDTH):self.PAGE_OFFSET_WIDTH]) connect(virtIn.data, pageOffset.dataIn.data, fit=True) lvl1readAddr.data(virtIn.data[:(self.LVL2_PAGE_TABLE_INDX_WIDTH + self.PAGE_OFFSET_WIDTH)]) StreamNode(masters=[virtIn], slaves=[lvl2indx, lvl1readAddr, pageOffset.dataIn]).sync()
def _impl(self): rdSignals = self.isSelectedLogic() for dout in self.dataOut: connect(self.dataIn, dout, exclude={self.getRd(dout), self.getVld(dout)}) if self.EXPORT_SELECTED: self.getRd(self.dataIn)(Or(*rdSignals) & self.selectedOneHot.rd) else: self.getRd(self.dataIn)(Or(*rdSignals))
def _impl(self): din = self.dataIn dout = self.dataOut framePending = self._reg("framePending", defVal=False) ack = StreamNode([din], [dout]).ack() If(framePending & ack, framePending(~din.last)) dataEn = self.en | framePending StreamNode(masters=[din], slaves=[dout]).sync(dataEn) connect(din, dout, exclude=[din.ready, din.valid])
def reqHandler(self, dpReq, orderFifoIn): # join with roundrobin on requests form drivers and selected index is stored into orderFifo # because it is just proxy driversReq = list(map(lambda d: d.req, self.drivers)) b = HsBuilder.join_fair(self, driversReq, exportSelected=True) req = b.end reqJoin = b.lastComp StreamNode(masters=[req], slaves=[dpReq, orderFifoIn]).sync() connect(req, dpReq, exclude=[dpReq.vld, dpReq.rd]) orderFifoIn.data(oneHotToBin(self, reqJoin.selectedOneHot.data))
def _impl(self): dataOut = list(reversed(self.dataOut)) self.getRd(self.dataIn)(Or(*map(lambda x: self.getRd(x), dataOut))) for i, out in enumerate(dataOut): allWitLowerPriority = dataOut[i + 1:] vld = self.getVld(self.dataIn) for _vld in map(lambda x: ~self.getRd(x), allWitLowerPriority): vld = vld & _vld connect(self.dataIn, out, exclude={self.getRd(out), self.getVld(out)}) self.getVld(out)(vld)
def connected_reg(name, input_=None, inputEn=None, fit=False): if input_ is not None: assert inputEn is not None port = getattr(ep, name) reg = self._reg(name, port.din._dtype) e = If(port.dout.vld, connect(port.dout.data, reg, fit=fit) ) if input_ is not None: e.Elif(inputEn, connect(input_, reg, fit=fit) ) port.din(reg) return reg
def _impl(self): propagateClkRstn(self) connect(self.axi, self.conv.bus, fit=True) reg0 = self._reg("reg0", Bits(32), defVal=0) reg1 = self._reg("reg1", Bits(32), defVal=1) conv = self.conv def connectRegToConveror(convPort, reg): If(convPort.dout.vld, reg(convPort.dout.data)) convPort.din(reg) connectRegToConveror(conv.decoded.reg0, reg0) connectRegToConveror(conv.decoded.reg1, reg1)
def _impl(self): din = self.dataIn dout = self.dataOut framePending = self._reg("framePending", defVal=False) ack = StreamNode([din], [dout]).ack() If(framePending & ack, framePending(~din.last) ) dataEn = self.en | framePending StreamNode(masters=[din], slaves=[dout]).sync(dataEn) connect(din, dout, exclude=[din.ready, din.valid])
def wReqDriver(self, en, baseIndex, lenByPtrs, inBlockRemain): s = self._sig wReq = self.wDatapump.req BURST_LEN = self.BUFFER_CAPACITY // 2 - 1 inBlockRemain_asPtrSize = fitTo(inBlockRemain, lenByPtrs) # wReq driver ringSpace_t = Bits(self.PTR_WIDTH) constraingLen = s("constraingSpace", ringSpace_t) If(inBlockRemain_asPtrSize < lenByPtrs, constraingLen(inBlockRemain_asPtrSize) ).Else( constraingLen(lenByPtrs) ) reqLen = s("reqLen", wReq.len._dtype) If(constraingLen > BURST_LEN, reqLen(BURST_LEN) ).Else( connect(constraingLen, reqLen, fit=True) ) wReq.id(self.ID) wReq.addr(self.indexToAddr(baseIndex)) wReq.rem(0) wReq.len(reqLen) wReq.vld(en) return reqLen
def wHandler(self): w = self.wDatapump.w fWOut = self.orderInfoFifoW.dataOut fAckIn = self.orderInfoFifoAck.dataIn driversW = list(map(lambda d: d.w, self.drivers)) selectedDriverVld = self._sig("selectedDriverWVld") selectedDriverVld(Or(*map(lambda d: fWOut.data._eq(d[0]) & d[1].valid, enumerate(driversW)) )) selectedDriverLast = self._sig("selectedDriverLast") selectedDriverLast(Or(*map(lambda d: fWOut.data._eq(d[0]) & d[1].last, enumerate(driversW)) )) Switch(fWOut.data).addCases( [(i, connect(d, w, exclude=[d.valid, d.ready])) for i, d in enumerate(driversW)] ).Default( w.data(None), w.strb(None), w.last(None) ) fAckIn.data(fWOut.data) # handshake logic fWOut.rd(selectedDriverVld & selectedDriverLast & w.ready & fAckIn.rd) for i, d in enumerate(driversW): d.ready(fWOut.data._eq(i) & w.ready & fWOut.vld & fAckIn.rd) w.valid(selectedDriverVld & fWOut.vld & fAckIn.rd) fAckIn.vld(selectedDriverVld & selectedDriverLast & w.ready & fWOut.vld)
def logChange(self, nowTime, sig, nextVal): """ This method is called for every value change of any signal. """ try: hwProc = self.registered[sig] except KeyError: # not every signal has to be registered return if hwProc.actualTime < nowTime: a = hwProc.actualTime if a < 0: a = 0 delay = int(nowTime - a) if delay > 0: hwProc.statements.append( WaitStm(int(delay) // 1000) ) hwProc.actualTime = nowTime try: # SimBits type does not have forceVector flag, # but serializer requires it nextVal._dtype.forceVector = hwProc.driverFor._dtype.forceVector except AttributeError: pass hwProc.statements.extend( connect(nextVal, hwProc.driverFor) )
def _impl(self, clks: Optional[Tuple[Clk, Clk]]=None): """ :clks: optional tuple (inClk, outClk) """ rd = self.getRd vld = self.getVld # connect clock and resets if clks is None: propagateClkRstn(self) inClk, outClk = (None, None) else: propagateRstn(self) inClk, outClk = clks self.fifo.dataIn_clk(inClk) self.fifo.dataOut_clk(outClk) # to fifo fIn = self.fifo.dataIn din = self.dataIn wr_en = ~fIn.wait rd(din)(wr_en) fIn.data(packIntf(din, exclude=[vld(din), rd(din)])) fIn.en(vld(din) & wr_en) # from fifo fOut = self.fifo.dataOut dout = self.dataOut out_vld = self._reg("out_vld", defVal=0, clk=outClk) vld(dout)(out_vld) connectPacked(fOut.data, dout, exclude=[vld(dout), rd(dout)]) fOut.en((rd(dout) | ~out_vld) & ~fOut.wait) If(rd(dout) | ~out_vld, out_vld(~fOut.wait) ) if self.EXPORT_SIZE: sizeTmp = self._sig("sizeTmp", self.size._dtype) connect(self.fifo.size, sizeTmp, fit=True) If(out_vld, self.size(sizeTmp + 1) ).Else( connect(self.fifo.size, self.size, fit=True) )
def ackHandler(self): ack = self.wDatapump.ack fAckOut = self.orderInfoFifoAck.dataOut driversAck = list(map(lambda d: d.ack, self.drivers)) selectedDriverAckReady = self._sig("selectedDriverAckReady") selectedDriverAckReady(Or(*map(lambda d: fAckOut.data._eq(d[0]) & d[1].rd, enumerate(driversAck)) )) ack.rd(fAckOut.vld & selectedDriverAckReady) fAckOut.rd(ack.vld & selectedDriverAckReady) for i, d in enumerate(driversAck): connect(ack, d, exclude=[d.vld, d.rd]) d.vld(ack.vld & fAckOut.vld & fAckOut.data._eq(i))
def writePart(self): sig = self._sig reg = self._reg wSt_t = HEnum('wSt_t', ['wrIdle', 'wrData', 'wrResp']) aw = self.bus.aw w = self.bus.w b = self.bus.b # write fsm wSt = FsmBuilder(self, wSt_t, "wSt")\ .Trans(wSt_t.wrIdle, (aw.valid, wSt_t.wrData) ).Trans(wSt_t.wrData, (w.valid, wSt_t.wrResp) ).Trans(wSt_t.wrResp, (b.ready, wSt_t.wrIdle) ).stateReg awAddr = reg('awAddr', aw.addr._dtype) w_hs = sig('w_hs') awRd = wSt._eq(wSt_t.wrIdle) aw.ready(awRd) aw_hs = awRd & aw.valid wRd = wSt._eq(wSt_t.wrData) w.ready(wRd) w_hs(w.valid & wRd) # save aw addr If(aw_hs, awAddr(aw.addr)).Else(awAddr(awAddr)) self.connect_directly_mapped_write(awAddr, w.data, w_hs) for (_, _), t in self._bramPortMapped: # en, we handled in readPart din = self.getPort(t).din connect(w.data, din, fit=True) self.writeRespPart(awAddr, wSt._eq(wSt_t.wrResp)) return awAddr, w_hs
def _impl(self): propagateClkRstn(self) req = self.rDatapump.req req.rem(0) if self.READ_ACK: get = self.get else: get = HsBuilder(self, self.get).buff().end def f(frame, indx): s = [ req.addr(get.data + frame.startBitAddr // 8), req.len(frame.getWordCnt() - 1), req.vld(get.vld) ] isLastFrame = indx == len(self._frames) - 1 if isLastFrame: rd = req.rd else: rd = 0 s.append(get.rd(rd)) ack = StreamNode(masters=[get], slaves=[self.rDatapump.req]).ack() return s, ack StaticForEach(self, self._frames, f) r = self.rDatapump.r data_sig_to_exclude = [] req.id(self.ID) if hasattr(r, "id"): data_sig_to_exclude.append(r.id) if hasattr(r, "strb"): data_sig_to_exclude.append(r.strb) connect(r, self.parser.dataIn, exclude=data_sig_to_exclude) for _, field in self._tmpl.walkFlatten(): myIntf = self.dataOut._fieldsToInterfaces[field.origin] parserIntf = self.parser.dataOut._fieldsToInterfaces[field.origin] myIntf(parserIntf)
def _impl(self): propagateClkRstn(self) req = self.rDatapump.req req.rem(0) if self.READ_ACK: get = self.get else: get = HsBuilder(self, self.get).buff().end def f(frame, indx): s = [req.addr(get.data + frame.startBitAddr // 8), req.len(frame.getWordCnt() - 1), req.vld(get.vld) ] isLastFrame = indx == len(self._frames) - 1 if isLastFrame: rd = req.rd else: rd = 0 s.append(get.rd(rd)) ack = StreamNode(masters=[get], slaves=[self.rDatapump.req]).ack() return s, ack StaticForEach(self, self._frames, f) r = self.rDatapump.r data_sig_to_exclude = [] req.id(self.ID) if hasattr(r, "id"): data_sig_to_exclude.append(r.id) if hasattr(r, "strb"): data_sig_to_exclude.append(r.strb) connect(r, self.parser.dataIn, exclude=data_sig_to_exclude) for _, field in self._tmpl.walkFlatten(): myIntf = self.dataOut._fieldsToInterfaces[field.origin] parserIntf = self.parser.dataOut._fieldsToInterfaces[field.origin] myIntf(parserIntf)
def _impl(self) -> None: if len(self._masters) > 1: raise NotImplementedError() m_offset, _ = self._masters[0] if m_offset != 0: raise NotImplementedError() m = self.s[0] err = hBit(0) rdack = hBit(0) wrack = hBit(0) AW = int(self.ADDR_WIDTH) wdata = [] for i, (s, (s_offset, s_size, _)) in enumerate(zip(self.m, self._slaves)): connect(m.bus2ip_addr, s.bus2ip_addr, fit=True) s.bus2ip_be(m.bus2ip_be) s.bus2ip_rnw(m.bus2ip_rnw) s.bus2ip_data(m.bus2ip_data) bitsOfSubAddr = int(log2ceil(s_size - 1)) prefix = selectBitRange( s_offset, bitsOfSubAddr, AW - bitsOfSubAddr) cs = self._sig("m_cs_%d" % i) cs(m.bus2ip_addr[AW:bitsOfSubAddr]._eq(prefix)) s.bus2ip_cs(m.bus2ip_cs & cs) err = err | (cs & s.ip2bus_error) rdack = rdack | (cs & s.ip2bus_rdack) wrack = wrack | (cs & s.ip2bus_wrack) wdata.append((cs, s.ip2bus_data)) m.ip2bus_error(err) m.ip2bus_rdack(rdack) m.ip2bus_wrack(wrack) SwitchLogic( [(sel, m.ip2bus_data(data)) for sel, data in wdata], default=m.ip2bus_data(None) )
def _impl(self): r0 = self._reg("r0", defVal=0) self.uart.tx(self.uart.rx) self.ram1(self.ram0) If(self.hsIn.vld, r0(self.difIn.p & ~self.difIn.n)) If(r0, self.hsOut(self.hsIn)).Else(connect(r0, self.hsOut.data, fit=True), self.hsOut.vld(1)) self.axi3m0(self.axi3s0) self.axi3m1(self.axi3s1)
def _impl(self): propagateClkRstn(self) self.axi_ep.bus(self.cntrl) ep = self.axi_ep.decoded doClr = ep.control.dout.vld ep.control.din(1) self.master(self.slave) s, m = self.slave, self.master for dir_, name in [(1, "ar"), (1, "aw"), (1, "w"), (0, "r"), (0, "b")]: sCh = getattr(s, name) mCh = getattr(m, name) if not dir_: sCh, mCh = mCh, sCh cntrl = getattr(ep, name) ack = StreamNode(masters={sCh}, slaves={mCh}).ack() cntr = self._reg("cntr_" + name, Bits(self.CNTR_WIDTH), defVal=0) If(doClr, cntr(0)).Elif(ack, cntr(cntr + 1)) connect(cntr, cntrl.din, fit=True)
def _impl(self, clks: Optional[Tuple[Clk, Clk]] = None): """ :clks: optional tuple (inClk, outClk) """ rd = self.getRd vld = self.getVld # connect clock and resets if clks is None: propagateClkRstn(self) inClk, outClk = (None, None) else: propagateRstn(self) inClk, outClk = clks self.fifo.dataIn_clk(inClk) self.fifo.dataOut_clk(outClk) # to fifo fIn = self.fifo.dataIn din = self.dataIn wr_en = ~fIn.wait rd(din)(wr_en) fIn.data(packIntf(din, exclude=[vld(din), rd(din)])) fIn.en(vld(din) & wr_en) # from fifo fOut = self.fifo.dataOut dout = self.dataOut out_vld = self._reg("out_vld", defVal=0, clk=outClk) vld(dout)(out_vld) connectPacked(fOut.data, dout, exclude=[vld(dout), rd(dout)]) fOut.en((rd(dout) | ~out_vld) & ~fOut.wait) If(rd(dout) | ~out_vld, out_vld(~fOut.wait)) if self.EXPORT_SIZE: sizeTmp = self._sig("sizeTmp", self.size._dtype) connect(self.fifo.size, sizeTmp, fit=True) If(out_vld, self.size(sizeTmp + 1)).Else( connect(self.fifo.size, self.size, fit=True))
def _impl(self) -> None: if len(self._masters) > 1: raise NotImplementedError() m_offset, _ = self._masters[0] if m_offset != 0: raise NotImplementedError() m = self.s[0] err = hBit(0) rdack = hBit(0) wrack = hBit(0) AW = int(self.ADDR_WIDTH) wdata = [] for i, (s, (s_offset, s_size, _)) in enumerate(zip(self.m, self._slaves)): connect(m.bus2ip_addr, s.bus2ip_addr, fit=True) s.bus2ip_be(m.bus2ip_be) s.bus2ip_rnw(m.bus2ip_rnw) s.bus2ip_data(m.bus2ip_data) bitsOfSubAddr = int(log2ceil(s_size - 1)) prefix = selectBitRange(s_offset, bitsOfSubAddr, AW - bitsOfSubAddr) cs = self._sig("m_cs_%d" % i) cs(m.bus2ip_addr[AW:bitsOfSubAddr]._eq(prefix)) s.bus2ip_cs(m.bus2ip_cs & cs) err = err | (cs & s.ip2bus_error) rdack = rdack | (cs & s.ip2bus_rdack) wrack = wrack | (cs & s.ip2bus_wrack) wdata.append((cs, s.ip2bus_data)) m.ip2bus_error(err) m.ip2bus_rdack(rdack) m.ip2bus_wrack(wrack) SwitchLogic([(sel, m.ip2bus_data(data)) for sel, data in wdata], default=m.ip2bus_data(None))
def connectDp(parent, controller, datapump, axi, exclude=None): """ Connect datapump with it's controller(s) and axi :param controller: (controller compatible with Axi_wDatapump or Axi_rDatapump) or list/tuple/generator of them :param datapump: Axi_wDatapump or Axi_rDatapump :param axi: axi(3/4) interface which datapump should use """ if isgenerator(controller): controller = list(controller) if isinstance(controller, (list, tuple)) and len(controller) == 1: controller = controller[0] if isinstance(datapump, Axi_rDatapump): connect(datapump.a, axi.ar, exclude=exclude) datapump.r(axi.r) if isinstance(controller, (list, tuple)): interconnect = RStrictOrderInterconnect() # @for cntrl, reqIn in zip(controller, req_join.dataIn): # @ reqIn(HsBuilder(parent, cntrl.rDatapump.req).reg().end) # datapump.driver.req(req_join.dataOut) else: datapump.driver(controller.rDatapump) return elif isinstance(datapump, Axi_wDatapump): connect(datapump.a, axi.aw, exclude=exclude) # axi3/4 connection if not hasattr(axi.w, "id") and hasattr(datapump.w, "id"): exclude_ = [datapump.w.id] else: exclude_ = [] if exclude: exclude_.extend(exclude) connect(datapump.w, axi.w, exclude=exclude_) datapump.b(axi.b) if isinstance(controller, (list, tuple)): interconnect = WStrictOrderInterconnect() else: datapump.driver(controller.wDatapump) return else: raise TypeError("Unsupported datapump type %r" % (datapump.__class__)) interconnect.configureFromDrivers(controller, datapump, byInterfaces=True) setattr(parent, datapump._name + "_interconnect", interconnect) interconnect.clk(parent.clk) interconnect.rst_n(parent.rst_n) interconnect.connectDrivers(controller, datapump)
def lookupResDriver(self, state, lookupOrigin, lookupAck, insertFoundOH): """ If lookup request comes from external interface "lookup" propagate results from tables to "lookupRes". """ fsm_t = state._dtype lookupRes = self.lookupRes lookupRes.vld(state._eq(fsm_t.lookupResAck) & lookupOrigin._eq(ORIGIN_TYPE.LOOKUP) & lookupAck) SwitchLogic([(insertFoundOH[i], connect(t.lookupRes, lookupRes, exclude={lookupRes.vld, lookupRes.rd})) for i, t in enumerate(self.tables)], default=[ connect(self.tables[0].lookupRes, lookupRes, exclude={lookupRes.vld, lookupRes.rd})] )
def test_IntfDirections_multistream_setSrc(self): def m(i): return self.assertEqual(i._direction, INTF_DIRECTION.MASTER) def s(i): return self.assertEqual(i._direction, INTF_DIRECTION.SLAVE) i, i2 = createTwoAxiDuplexStreams() n = RtlNetlist() i._signalsForInterface(n) i2._signalsForInterface(n) connect(i, i2) m(i) s(i.rx.data) s(i.rx.last) s(i.rx.valid) m(i.rx.ready) m(i.tx.data) m(i.tx.last) m(i.tx.valid) s(i.tx.ready) m(i2.rx.data) m(i2.rx.last) m(i2.rx.valid) s(i2.rx.ready) s(i2.tx.data) s(i2.tx.last) s(i2.tx.valid) m(i2.tx.ready)
def _impl(self): r0 = self._reg("r0", defVal=0) self.uart.tx(self.uart.rx) self.ram1(self.ram0) If(self.hsIn.vld, r0(self.difIn.p & ~self.difIn.n) ) If(r0, self.hsOut(self.hsIn) ).Else( connect(r0, self.hsOut.data, fit=True), self.hsOut.vld(1) ) self.axi3m0(self.axi3s0) self.axi3m1(self.axi3s1)
def _impl(self): propagateClkRstn(self) cntr = self._reg("wordCntr", Bits(log2ceil(self.MAX_LEN)), defVal=0) en = self._reg("enable", defVal=0) _len = self._reg("wordCntr", Bits(log2ceil(self.MAX_LEN)), defVal=0) self.conv.bus(self.cntrl) cEn = self.conv.decoded.enable If(cEn.dout.vld, connect(cEn.dout.data, en, fit=True) ) connect(en, cEn.din, fit=True) cLen = self.conv.decoded.len If(cLen.dout.vld, connect(cLen.dout.data, _len, fit=True) ) connect(_len, cLen.din, fit=True) out = self.axis_out connect(cntr, out.data, fit=True) if self.USE_STRB: out.strb(mask(self.axis_out.strb._dtype.bit_length())) out.last(cntr._eq(0)) out.valid(en) If(cLen.dout.vld, connect(cLen.dout.data, cntr, fit=True) ).Else( If(out.ready & en, If(cntr._eq(0), cntr(_len) ).Else( cntr(cntr - 1) ) ) )
def _impl(self): s = self bramR = s.bramR bramW = s.bramW all_bram_ports = [bramR.a, bramR.b, bramW.a, bramW.b] connect(s.clk, *[i.clk for i in all_bram_ports]) connect(s.en, *[i.en for i in all_bram_ports]) connect(s.we, *[i.we for i in all_bram_ports]) connect(s.addr, *[i.addr for i in all_bram_ports]) bramW.a.din(s.in_w_a) bramW.b.din(s.in_w_b) bramR.a.din(s.in_r_a) bramR.b.din(s.in_r_b) s.out_w_a(bramW.a.dout) s.out_w_b(bramW.b.dout) s.out_r_a(bramR.a.dout) s.out_r_b(bramR.b.dout)
def _impl(self): s = self bramR = s.bramR bramW = s.bramW all_bram_ports = [bramR.a, bramR.b, bramW.a, bramW.b] connect(s.clk, *[i.clk for i in all_bram_ports]) connect(s.en, *[i.en for i in all_bram_ports]) connect(s.we, *[i.we for i in all_bram_ports]) connect(s.addr, *[i.addr for i in all_bram_ports]) bramW.a.din(s.in_w_a) bramW.b.din(s.in_w_b) bramR.a.din(s.in_r_a) bramR.b.din(s.in_r_b) s.out_w_a(bramW.a.dout) s.out_w_b(bramW.b.dout) s.out_r_a(bramR.a.dout) s.out_r_b(bramR.b.dout)
def axiWAddrHandler(self, st, baseAddr, actualAddr, lenRem): """ AXI write addr logic """ axi = self.axi st_t = st._dtype axi.aw.valid(st._eq(st_t.writeAddr)) axi.aw.addr(actualAddr) axi.aw.id(0) axi.aw.burst(BURST_INCR) axi.aw.cache(CACHE_DEFAULT) If(lenRem > self.MAX_BUTST_LEN, axi.aw.len(self.MAX_BUTST_LEN - 1) ).Else( connect(lenRem - 1, axi.aw.len, fit=True) ) axi.aw.lock(LOCK_DEFAULT) axi.aw.prot(PROT_DEFAULT) axi.aw.size(BYTES_IN_TRANS(self.DATA_WIDTH // 8)) axi.aw.qos(QOS_DEFAULT) # lenRem, actualAddr logic Switch(st)\ .Case(st_t.fullIdle, lenRem(self.DATA_LEN), actualAddr(baseAddr) ).Case(st_t.writeAddr, If(axi.aw.ready, If(lenRem > self.MAX_BUTST_LEN, actualAddr(actualAddr + (self.MAX_BUTST_LEN * self.DATA_WIDTH // 8)), lenRem(lenRem - self.MAX_BUTST_LEN) ).Else( actualAddr(actualAddr + fitTo(lenRem, actualAddr)), lenRem(0) ) ) )
def dataWFeed(self, st, lenRem, actualLenRem): """ Connection between din and axi.w channel """ w = self.axi.w din = self.dataIn st_t = st._dtype last = st._eq(st_t.writeDataLast) w_en = st._eq(st_t.writeData) | last w.valid(din.vld & w_en) w.data(din.data) w.strb(mask(w.strb._dtype.bit_length())) w.last(last) din.rd(w_en & w.ready) w_allAck = self.w_allAck(st) # actualLenRem driver Switch(st)\ .Case(st_t.writeData, If(w_allAck, actualLenRem(actualLenRem - 1) ) ).Case(st_t.writeDataLast, If(w_allAck, actualLenRem(0) ) ).Default( If(lenRem > self.MAX_BUTST_LEN, actualLenRem(self.MAX_BUTST_LEN) ).Else( connect(lenRem, actualLenRem, fit=True) ) )
def dataWFeed(self, st, lenRem, actualLenRem): """ Connection between din and axi.w channel """ w = self.axi.w din = self.dataIn st_t = st._dtype last = st._eq(st_t.writeDataLast) w_en = st._eq(st_t.writeData) | last w.valid(din.vld & w_en) w.data(din.data) w.strb(mask(w.strb._dtype.bit_length())) w.last(last) din.rd(w_en & w.ready) w_allAck = self.w_allAck(st) # actualLenRem driver Switch(st)\ .Case(st_t.writeData, If(w_allAck, actualLenRem(actualLenRem - 1) ) ).Case(st_t.writeDataLast, If(w_allAck, actualLenRem(0) ) ).Default( If(lenRem > self.MAX_BUTST_LEN, actualLenRem(self.MAX_BUTST_LEN) ).Else( connect(lenRem, actualLenRem, fit=True) ) )
def writePart(self): DW = int(self.DATA_WIDTH) sig = self._sig reg = self._reg addrWidth = int(self.ADDR_WIDTH) ADDR_STEP = self._getAddrStep() wSt_t = HEnum('wSt_t', ['wrIdle', 'wrData', 'wrResp']) aw = self.bus.aw w = self.bus.w b = self.bus.b # write fsm wSt = FsmBuilder(self, wSt_t, "wSt")\ .Trans(wSt_t.wrIdle, (aw.valid, wSt_t.wrData) ).Trans(wSt_t.wrData, (w.valid, wSt_t.wrResp) ).Trans(wSt_t.wrResp, (b.ready, wSt_t.wrIdle) ).stateReg awAddr = reg('awAddr', aw.addr._dtype) w_hs = sig('w_hs') awRd = wSt._eq(wSt_t.wrIdle) aw.ready(awRd) aw_hs = awRd & aw.valid wRd = wSt._eq(wSt_t.wrData) w.ready(wRd) w_hs(w.valid & wRd) # save aw addr If(aw_hs, awAddr(aw.addr) ).Else( awAddr(awAddr) ) # output vld for t in self._directlyMapped: out = self.getPort(t).dout try: width = t.getItemWidth() except TypeError: width = t.bitAddrEnd - t.bitAddr if width > DW: raise NotImplementedError("Fields wider than DATA_WIDTH not supported yet", t) offset = t.bitAddr % DW out.data(w.data[(offset + width): offset]) out.vld(w_hs & (awAddr._eq(vec(t.bitAddr // DW * (DW // ADDR_STEP), addrWidth)))) for t in self._bramPortMapped: din = self.getPort(t).din connect(w.data, din, fit=True) self.writeRespPart(awAddr, wSt._eq(wSt_t.wrResp)) return awAddr, w_hs
def _impl(self): propagateClkRstn(self) dIn = AxiSBuilder(self, self.dataIn).buff().end sb = self.sizesBuff db = self.dataBuff wordCntr = self._reg("wordCntr", Bits(log2ceil(self.MAX_LEN) + 1), defVal=0) overflow = wordCntr._eq(self.MAX_LEN) last = dIn.last | overflow If(StreamNode(masters=[dIn], slaves=[sb.dataIn, db.dataIn]).ack(), If(last, wordCntr(0) ).Else( wordCntr(wordCntr + 1) ) ) length = self._sig("length", wordCntr._dtype) BYTE_CNT = dIn.data._dtype.bit_length() // 8 if dIn.USE_STRB: # compress strb mask as binary number rem = self._sig("rem", Bits(log2ceil(BYTE_CNT))) SwitchLogic( cases=[ (dIn.strb[i], rem(0 if i == BYTE_CNT - 1 else i + 1)) for i in reversed(range(BYTE_CNT))], default=[ rem(0), ] ) if self.EXPORT_ALIGNMENT_ERROR: errorAlignment = self._reg("errorAlignment_reg", defVal=0) self.errorAlignment(errorAlignment) If(dIn.valid & (dIn.strb != mask(BYTE_CNT)) & ~dIn.last, errorAlignment(1) ) If(last & (dIn.strb != mask(BYTE_CNT)), length(wordCntr) ).Else( length(wordCntr + 1) ) else: length(wordCntr + 1) rem = vec(0, log2ceil(BYTE_CNT)) sb.dataIn.data(Concat(length, rem)) connect(dIn, db.dataIn, exclude=[dIn.valid, dIn.ready, dIn.last]) db.dataIn.last(last) StreamNode(masters=[dIn], slaves=[sb.dataIn, db.dataIn], extraConds={sb.dataIn: last }).sync() self.sizes(sb.dataOut) connect(db.dataOut, self.dataOut)
def axiAwHandler(self, wErrFlag): req = self.driver.req aw = self.a r = self._reg self.axiAddrDefaults() wInfo = self.writeInfoFifo.dataIn if self.useTransSplitting(): LEN_MAX = mask(aw.len._dtype.bit_length()) lastReqDispatched = r("lastReqDispatched", defVal=1) lenDebth = r("lenDebth", req.len._dtype) addrBackup = r("addrBackup", req.addr._dtype) req_idBackup = r("req_idBackup", req.id._dtype) _id = self._sig("id", aw.id._dtype) requiresSplit = req.len > LEN_MAX requiresDebtSplit = lenDebth > LEN_MAX If(lastReqDispatched, _id(req.id), aw.addr(req.addr), If(requiresSplit, aw.len(LEN_MAX) ).Else( connect(req.len, aw.len, fit=True), ), req_idBackup(req.id), addrBackup(req.addr + self.getBurstAddrOffset()), lenDebth(req.len - (LEN_MAX + 1)), If(wInfo.rd & aw.ready & req.vld, If(requiresSplit, lastReqDispatched(0) ).Else( lastReqDispatched(1) ) ), StreamNode(masters=[req], slaves=[aw, wInfo], extraConds={aw: ~wErrFlag}).sync(), ).Else( _id(req_idBackup), aw.addr(addrBackup), If(requiresDebtSplit, aw.len(LEN_MAX) ).Else( connect(lenDebth, aw.len, fit=True) ), StreamNode(slaves=[aw, wInfo], extraConds={aw:~wErrFlag}).sync(), req.rd(0), If(StreamNode(slaves=[wInfo, aw]).ack(), addrBackup(addrBackup + self.getBurstAddrOffset()), lenDebth(lenDebth - (LEN_MAX+1)), If(lenDebth <= LEN_MAX, lastReqDispatched(1) ) ) ) aw.id(_id) wInfo.id(_id) else: aw.id(req.id) wInfo.id(req.id) aw.addr(req.addr) connect(req.len, aw.len, fit=True) StreamNode(masters=[req], slaves=[aw, wInfo]).sync()
def _impl(self): propagateClkRstn(self) r, s = self._reg, self._sig req = self.rDatapump.req f = self.dataFifo dIn = self.rDatapump.r dBuffIn = f.dataIn ALIGN_BITS = self.addrAlignBits() ID = self.ID BUFFER_CAPACITY = self.BUFFER_CAPACITY BURST_LEN = BUFFER_CAPACITY // 2 ID_LAST = self.ID_LAST bufferHasSpace = s("bufferHasSpace") bufferHasSpace(f.size < (BURST_LEN + 1)) # we are counting base next addr as item as well inBlock_t = Bits(log2ceil(self.ITEMS_IN_BLOCK + 1)) ringSpace_t = Bits(self.PTR_WIDTH) downloadPending = r("downloadPending", defVal=0) baseIndex = r("baseIndex", Bits(self.ADDR_WIDTH - ALIGN_BITS)) inBlockRemain = r("inBlockRemain_reg", inBlock_t, defVal=self.ITEMS_IN_BLOCK) self.inBlockRemain(inBlockRemain) # Logic of tail/head rdPtr = r("rdPtr", ringSpace_t, defVal=0) wrPtr = r("wrPtr", ringSpace_t, defVal=0) If(self.wrPtr.dout.vld, wrPtr(self.wrPtr.dout.data) ) self.wrPtr.din(wrPtr) self.rdPtr.din(rdPtr) # this means items are present in memory hasSpace = s("hasSpace") hasSpace(wrPtr != rdPtr) doReq = s("doReq") doReq(bufferHasSpace & hasSpace & ~downloadPending & req.rd) req.rem(0) self.dataOut(f.dataOut) # logic of baseAddr and baseIndex baseAddr = Concat(baseIndex, vec(0, ALIGN_BITS)) req.addr(baseAddr) self.baseAddr.din(baseAddr) dataAck = dIn.valid & In(dIn.id, [ID, ID_LAST]) & dBuffIn.rd If(self.baseAddr.dout.vld, baseIndex(self.baseAddr.dout.data[:ALIGN_BITS]) ).Elif(dataAck & downloadPending, If(dIn.last & dIn.id._eq(ID_LAST), baseIndex(dIn.data[self.ADDR_WIDTH:ALIGN_BITS]) ).Else( baseIndex(baseIndex + 1) ) ) sizeByPtrs = s("sizeByPtrs", ringSpace_t) sizeByPtrs(wrPtr - rdPtr) inBlockRemain_asPtrSize = fitTo(inBlockRemain, sizeByPtrs) constraingSpace = s("constraingSpace", ringSpace_t) If(inBlockRemain_asPtrSize < sizeByPtrs, constraingSpace(inBlockRemain_asPtrSize) ).Else( constraingSpace(sizeByPtrs) ) constrainedByInBlockRemain = s("constrainedByInBlockRemain") constrainedByInBlockRemain(fitTo(sizeByPtrs, inBlockRemain) >= inBlockRemain) If(constraingSpace > BURST_LEN, # download full burst req.id(ID), req.len(BURST_LEN - 1), If(doReq, inBlockRemain(inBlockRemain - BURST_LEN) ) ).Elif(constrainedByInBlockRemain & (inBlockRemain < BURST_LEN), # we know that sizeByPtrs <= inBlockRemain thats why we can resize it # we will download next* as well req.id(ID_LAST), connect(constraingSpace, req.len, fit=True), If(doReq, inBlockRemain(self.ITEMS_IN_BLOCK) ) ).Else( # download data leftover req.id(ID), connect(constraingSpace - 1, req.len, fit=True), If(doReq, inBlockRemain(inBlockRemain - fitTo(constraingSpace, inBlockRemain)) ) ) # logic of req dispatching If(downloadPending, req.vld(0), If(dataAck & dIn.last, downloadPending(0) ) ).Else( req.vld(bufferHasSpace & hasSpace), If(req.rd & bufferHasSpace & hasSpace, downloadPending(1) ) ) # into buffer pushing logic dBuffIn.data(dIn.data) isMyData = s("isMyData") isMyData(dIn.id._eq(ID) | (~dIn.last & dIn.id._eq(ID_LAST))) If(self.rdPtr.dout.vld, rdPtr(self.rdPtr.dout.data) ).Else( If(dIn.valid & downloadPending & dBuffIn.rd & isMyData, rdPtr(rdPtr + 1) ) ) # push data into buffer and increment rdPtr StreamNode(masters=[dIn], slaves=[dBuffIn], extraConds={dIn: downloadPending, dBuffIn: (dIn.id._eq(ID) | (dIn.id._eq(ID_LAST) & ~dIn.last)) & downloadPending }).sync()
def addrHandler(self, addRmSize, rErrFlag): ar = self.a req = self.driver.req r, s = self._reg, self._sig self.axiAddrDefaults() # if axi len is smaller we have to use transaction splitting if self.useTransSplitting(): LEN_MAX = mask(ar.len._dtype.bit_length()) ADDR_STEP = self.getBurstAddrOffset() lastReqDispatched = r("lastReqDispatched", defVal=1) lenDebth = r("lenDebth", req.len._dtype) remBackup = r("remBackup", req.rem._dtype) rAddr = r("r_addr", req.addr._dtype) reqLen = s("reqLen", req.len._dtype) reqRem = s("reqRem", req.rem._dtype) ack = s("ar_ack") self.arIdHandler(lastReqDispatched) If(reqLen > LEN_MAX, ar.len(LEN_MAX), addRmSize.rem(0), addRmSize.propagateLast(0) ).Else( # connect only lower bits of len connect(reqLen, ar.len, fit=True), addRmSize.rem(reqRem), addRmSize.propagateLast(1) ) If(ack, If(reqLen > LEN_MAX, lenDebth(reqLen - (LEN_MAX + 1)), lastReqDispatched(0) ).Else( lastReqDispatched(1) ) ) If(lastReqDispatched, ar.addr(req.addr), rAddr(req.addr + ADDR_STEP), reqLen(req.len), reqRem(req.rem), remBackup(req.rem), ack(req.vld & addRmSize.rd & ar.ready), StreamNode(masters=[req], slaves=[addRmSize, ar], extraConds={ar: ~rErrFlag}).sync(), ).Else( req.rd(0), ar.addr(rAddr), ack(addRmSize.rd & ar.ready), If(ack, rAddr(rAddr + ADDR_STEP) ), reqLen(lenDebth), reqRem(remBackup), StreamNode(slaves=[addRmSize, ar], extraConds={ar: ~rErrFlag}).sync(), ) else: # if axi len is wider we can directly translate requests to axi ar.id(req.id) ar.addr(req.addr) connect(req.len, ar.len, fit=True) addRmSize.rem(req.rem) addRmSize.propagateLast(1) StreamNode(masters=[req], slaves=[ar, addRmSize], extraConds={ar: ~rErrFlag}).sync()