def translate_addr_signal(self, mem_map, sig_in, sig_out): cases = [] AW = sig_in._dtype.bit_length() for (offset_in, size, offset_out) in mem_map: in_is_aligned = offset_in % size == 0 and isPow2(size) out_is_aligned = offset_out % size == 0 and isPow2(size) if in_is_aligned: L = (size - 1).bit_length() en_sig = sig_in[:L]._eq(offset_in >> L) _sig_in = sig_in[L:] if out_is_aligned: addr_drive = Concat(vec(offset_out, AW - L), _sig_in) else: addr_drive = Concat(vec(0, AW - L), _sig_in) + offset_out else: en_sig = inRange(sig_in, offset_in, offset_in + size) if offset_in == offset_out: addr_drive = sig_in elif offset_in < offset_out: addr_drive = sig_in + (offset_out - offset_in) else: # offset_in > offset_out: addr_drive = sig_in - (offset_in - offset_out) cases.append((en_sig, sig_out(addr_drive))) SwitchLogic(cases, default=sig_out(sig_in))
def setConfig(self, crcConfigCls): """ Apply configuration from CRC configuration class """ self.POLY.set(vec(crcConfigCls.POLY, crcConfigCls.WIDTH)) self.POLY_WIDTH.set(crcConfigCls.WIDTH) self.REFIN.set(crcConfigCls.REFIN) self.REFOUT.set(crcConfigCls.REFOUT) self.XOROUT.set(vec(crcConfigCls.XOROUT, crcConfigCls.WIDTH)) self.INIT.set(vec(crcConfigCls.INIT, crcConfigCls.WIDTH))
def test_slice_bits_sig(self): n = RtlNetlist() sig = n.sig("sig", uint8_t, defVal=128) with self.assertRaises(IndexError): self.assertEqual(sig[8], hBit(1)) self.assertEqual(sig[7], hBit(1)) self.assertStrEq(sig[7], "sig(7)") self.assertEqual(sig[1], hBit(0)) self.assertStrEq(sig[1], "sig(1)") self.assertEqual(sig[0], hBit(0)) self.assertStrEq(sig[0], "sig(0)") with self.assertRaises(IndexError): self.assertEqual(sig[-1], hBit(0)) with self.assertRaises(IndexError): self.assertEqual(sig[9:-1], hBit(0)) with self.assertRaises(IndexError): self.assertEqual(sig[9:], hBit(0)) with self.assertRaises(IndexError): self.assertEqual(sig[9:0], hBit(0)) with self.assertRaises(IndexError): self.assertEqual(sig[0:], hBit(0)) with self.assertRaises(IndexError): self.assertEqual(sig[0:0], hBit(0)) self.assertEqual(sig[8:], sig) self.assertStrEq(sig[8:], "sig") self.assertEqual(sig[8:0], sig) self.assertStrEq(sig[8:0], "sig") self.assertEqual(sig[:0], sig) self.assertStrEq(sig[:0], "sig") self.assertEqual(sig[:1], vec(64, 7)) self.assertStrEq(sig[:1], "sig(7DOWNTO1)") self.assertEqual(sig[:2], vec(32, 6)) self.assertStrEq(sig[:2], "sig(7DOWNTO2)") self.assertEqual(sig[:7], vec(1, 1)) self.assertStrEq(sig[:7], "sig(7DOWNTO7)") self.assertEqual(sig[7:6], vec(0, 1)) self.assertStrEq(sig[7:6], "sig(6DOWNTO6)")
def test_ADD_eval(self): for a_in, b_in, out in [(0, 0, 0), (0, 1, 1), (1, 0, 1), (1, 1, 2)]: res = hInt(a_in) + hInt(b_in) b_w = 2 self.assertTrue(res.vldMask) self.assertEqual(res.val, out, "a_in %d, b_in %d, out %d" % (a_in, b_in, out)) resBit = vec(a_in, b_w) + vec(b_in, b_w) self.assertEqual(resBit.vldMask, 3) self.assertEqual(resBit.val, out, "a_in %d, b_in %d, out %d" % (a_in, b_in, out))
def _impl(self): a = self.a b = self.b self.c( Concat( hBit(1), vec(7, 3), a != b, a < b, a <= b, a._eq(b), a >= b, a > b, vec(0, 22), ))
def _impl(self): a = self.a b = self.b self.c( Concat( hBit(1), vec(7, 3), a != b, a < b, a <= b, a._eq(b), a >= b, a > b, vec(0, 22), ) )
def IndexOps(): t = Bits(8) n = RtlNetlist() s_in = n.sig("s_in", t) s_out = n.sig("s_out", t) s_in2 = n.sig("s_in2", t) s_out2 = n.sig("s_out2", t) s_in3 = n.sig("s_in3", Bits(16)) s_out3 = n.sig("s_out3", t) s_in4a = n.sig("s_in4a", t) s_in4b = n.sig("s_in4b", t) s_out4 = n.sig("s_out4", Bits(16)) s_out(s_in[4:]._concat(vec(2, 4))) s_out2[4:](s_in2[4:]) s_out2[:4](s_in2[:4]) s_out3(s_in3[8:]) s_out4[8:](s_in4a) s_out4[(8 + 8):8](s_in4b) interf = [ s_in, s_out, s_in2, s_out2, s_in3, s_out3, s_in4a, s_in4b, s_out4 ] return n, interf
def IndexOps(): t = Bits(8) n = RtlNetlist() s_in = n.sig("s_in", t) s_out = n.sig("s_out", t) s_in2 = n.sig("s_in2", t) s_out2 = n.sig("s_out2", t) s_in3 = n.sig("s_in3", Bits(16)) s_out3 = n.sig("s_out3", t) s_in4a = n.sig("s_in4a", t) s_in4b = n.sig("s_in4b", t) s_out4 = n.sig("s_out4", Bits(16)) s_out(s_in[4:]._concat(vec(2, 4))) s_out2[4:](s_in2[4:]) s_out2[:4](s_in2[:4]) s_out3(s_in3[8:]) s_out4[8:](s_in4a) s_out4[(8 + 8):8](s_in4b) interf = [s_in, s_out, s_in2, s_out2, s_in3, s_out3, s_in4a, s_in4b, s_out4] return n, interf
def _impl(self): w = self.port[0] ram_w = self.ram.port[0] # True if each byte of the mask is 0xff or 0x00 we_bytes = list(iterBits(w.we, bitsInOne=8, fillup=True)) # cut off padding we_for_we_bytes = [] for last, b in iter_with_last(we_bytes): if last and self.MASK_PADDING_W: mask_rem_w = self.MASK_W % 8 b = b[mask_rem_w:] we_for_we_bytes.append(b != 0) we_for_we_bytes = rename_signal( self, Concat(*[ b | ~w.do_accumulate | w.do_overwrite for b in reversed(we_for_we_bytes) ]), "we_for_we_bytes") preload = self._reg("preload", def_val=0) If(w.en.vld, preload(~preload & w.do_accumulate & ~w.do_overwrite)) w.en.rd(~w.do_accumulate | w.do_overwrite | preload) ram_w.addr(w.addr) ram_w.en(w.en.vld & (w.do_overwrite | ~w.do_accumulate | preload)) ram_w.we(Concat(w.we, we_for_we_bytes)) w_mask = w.we if self.MASK_PADDING_W: w_mask = Concat(vec(0, self.MASK_PADDING_W), w_mask) is_first_read_port = True for ram_r, r in zip(self.ram.port[1:], self.port[1:]): if is_first_read_port: w_mask = preload._ternary( w_mask | ram_r.dout[self.MASK_PADDING_W + self.MASK_W:], w_mask) w_mask = rename_signal(self, w_mask, "w_mask") ram_w.din(Concat(w.din, w_mask)) will_preload_for_accumulate = rename_signal( self, w.en.vld & w.do_accumulate & ~w.do_overwrite, "will_preload_for_accumulate") ram_r.addr(will_preload_for_accumulate._ternary( w.addr, r.addr)) ram_r.en(will_preload_for_accumulate | r.en.vld) # [TODO] check if r.en.rd is according to spec r.en.rd(~will_preload_for_accumulate | preload) is_first_read_port = False else: ram_r.addr(r.addr) ram_r.en(r.en.vld) r.en.rd(1) r.dout(ram_r.dout[:self.MASK_PADDING_W + self.MASK_W]) r.dout_mask(ram_r.dout[self.MASK_W:]) propagateClkRstn(self)
def extend_to_width_multiple_of_8(sig): """ make width of signal modulo 8 equal to 0 """ w = sig._dtype.bit_length() cosest_multiple_of_8 = ceil((w // 8) / 8) * 8 if cosest_multiple_of_8 == w: return sig else: return Concat(vec(0, cosest_multiple_of_8 - w), sig)
def _config(self): self.PARAM_0 = Param(0) self.PARAM_10 = Param(10) try: self.PARAM_1_sll_512 = Param(1 << 512) raise AssertionError("Parameter with int value which is" "too big to fit in integer type of target hdl language") except TypeError: # portable type for large int, generally int in verilog/vhdl is 32b wide self.PARAM_1_sll_512 = Param(vec(1 << 512, width=512 + 1))
def __rshift__(self, other): """ shift right :note: arithmetic sift if type is signed else logical shift """ if self._dtype.signed: raise NotImplementedError() return vec(0, int(other))._concat(self[:other])
def _impl(self): propagateClkRstn(self) ITEM_WIDTH = int(self.ITEM_WIDTH) DATA_WIDTH = int(self.DATA_WIDTH) ITEMS_IN_DATA_WORD = self.ITEMS_IN_DATA_WORD ITEM_SIZE_IN_WORDS = 1 if ITEM_WIDTH % 8 != 0 or ITEM_SIZE_IN_WORDS * DATA_WIDTH != ITEMS_IN_DATA_WORD * ITEM_WIDTH: raise NotImplementedError(ITEM_WIDTH) req = self.rDatapump.req req.id(self.ID) req.len(ITEM_SIZE_IN_WORDS - 1) req.rem(0) if ITEMS_IN_DATA_WORD == 1: addr = Concat(self.index.data, vec(0, log2ceil(ITEM_WIDTH // 8))) req.addr(self.base + fitTo(addr, req.addr)) StreamNode(masters=[self.index], slaves=[req]).sync() self.item.data(self.rDatapump.r.data) StreamNode(masters=[self.rDatapump.r], slaves=[self.item]).sync() else: r = self.rDatapump.r.data f = self.itemSubIndexFifo subIndexBits = f.dataIn.data._dtype.bit_length() itemAlignBits = log2ceil(ITEM_WIDTH // 8) addr = Concat(self.index.data[:subIndexBits], vec(0, itemAlignBits + subIndexBits)) req.addr(self.base + fitTo(addr, req.addr)) f.dataIn.data(self.index.data[subIndexBits:]) StreamNode(masters=[self.index], slaves=[req, f.dataIn]).sync() Switch(f.dataOut.data).addCases([ (ITEMS_IN_DATA_WORD - i - 1, self.item.data(r[(ITEM_WIDTH * (i + 1)):(ITEM_WIDTH * i)])) for i in range(ITEMS_IN_DATA_WORD) ]) StreamNode(masters=[self.rDatapump.r, f.dataOut], slaves=[self.item]).sync()
def _impl(self): propagateClkRstn(self) ITEM_WIDTH = int(self.ITEM_WIDTH) DATA_WIDTH = int(self.DATA_WIDTH) ITEMS_IN_DATA_WORD = self.ITEMS_IN_DATA_WORD ITEM_SIZE_IN_WORDS = 1 if ITEM_WIDTH % 8 != 0 or ITEM_SIZE_IN_WORDS * DATA_WIDTH != ITEMS_IN_DATA_WORD * ITEM_WIDTH: raise NotImplementedError(ITEM_WIDTH) req = self.rDatapump.req req.id(self.ID) req.len(ITEM_SIZE_IN_WORDS - 1) req.rem(0) if ITEMS_IN_DATA_WORD == 1: addr = Concat(self.index.data, vec(0, log2ceil(ITEM_WIDTH // 8))) req.addr(self.base + fitTo(addr, req.addr)) StreamNode(masters=[self.index], slaves=[req]).sync() self.item.data(self.rDatapump.r.data) StreamNode(masters=[self.rDatapump.r], slaves=[self.item]).sync() else: r = self.rDatapump.r.data f = self.itemSubIndexFifo subIndexBits = f.dataIn.data._dtype.bit_length() itemAlignBits = log2ceil(ITEM_WIDTH // 8) addr = Concat(self.index.data[:subIndexBits], vec(0, itemAlignBits + subIndexBits)) req.addr(self.base + fitTo(addr, req.addr)) f.dataIn.data(self.index.data[subIndexBits:]) StreamNode(masters=[self.index], slaves=[req, f.dataIn]).sync() Switch(f.dataOut.data).addCases([ (ITEMS_IN_DATA_WORD - i - 1, self.item.data(r[(ITEM_WIDTH * (i + 1)): (ITEM_WIDTH * i)])) for i in range(ITEMS_IN_DATA_WORD) ]) StreamNode(masters=[self.rDatapump.r, f.dataOut], slaves=[self.item]).sync()
def setUpCrc(self, poly, dataWidth=None, refin=True, refout=True, initval=mask(32), finxor=mask(32)): if dataWidth is None: dataWidth = poly.WIDTH self.DATA_WIDTH = dataWidth self.POLY_WIDTH = poly.WIDTH u = self.u = Crc() u.INIT.set(vec(initval, poly.WIDTH)) u.DATA_WIDTH.set(dataWidth) u.REFIN.set(refin) u.REFOUT.set(refout) u.POLY_WIDTH.set(poly.WIDTH) u.POLY.set(vec(poly.POLY, poly.WIDTH)) u.XOROUT.set(vec(finxor, poly.WIDTH)) self.prepareUnit(u) return u
def _config(self): self.PARAM_0 = Param(0) self.PARAM_10 = Param(10) try: self.PARAM_1_sll_512 = Param(1 << 512) raise AssertionError( "Parameter with int value which is" "too big to fit in integer type of target hdl language") except TypeError: # portable type for large int, generally int in verilog/vhdl is 32b wide self.PARAM_1_sll_512 = Param(vec(1 << 512, width=512 + 1))
def __lshift__(self, other): """ shift left :note: arithmetic sift if type is signed else logical shift """ width = self._dtype.bit_length() if self._dtype.signed: raise NotImplementedError() return self[(width - int(other)):]._concat(vec(0, int(other)))
def _impl(self): propagateClkRstn(self) dIn = AxiSBuilder(self, self.dataIn).buff().end sb = self.sizesBuff db = self.dataBuff wordCntr = self._reg("wordCntr", Bits(log2ceil(self.MAX_LEN) + 1), defVal=0) overflow = wordCntr._eq(self.MAX_LEN) last = dIn.last | overflow If( StreamNode(masters=[dIn], slaves=[sb.dataIn, db.dataIn]).ack(), If(last, wordCntr(0)).Else(wordCntr(wordCntr + 1))) length = self._sig("length", wordCntr._dtype) BYTE_CNT = dIn.data._dtype.bit_length() // 8 if dIn.USE_STRB: # compress strb mask as binary number rem = self._sig("rem", Bits(log2ceil(BYTE_CNT))) SwitchLogic(cases=[(dIn.strb[i], rem(0 if i == BYTE_CNT - 1 else i + 1)) for i in reversed(range(BYTE_CNT))], default=[ rem(0), ]) if self.EXPORT_ALIGNMENT_ERROR: errorAlignment = self._reg("errorAlignment_reg", defVal=0) self.errorAlignment(errorAlignment) If(dIn.valid & (dIn.strb != mask(BYTE_CNT)) & ~dIn.last, errorAlignment(1)) If(last & (dIn.strb != mask(BYTE_CNT)), length(wordCntr)).Else(length(wordCntr + 1)) else: length(wordCntr + 1) rem = vec(0, log2ceil(BYTE_CNT)) sb.dataIn.data(Concat(length, rem)) connect(dIn, db.dataIn, exclude=[dIn.valid, dIn.ready, dIn.last]) db.dataIn.last(last) StreamNode(masters=[dIn], slaves=[sb.dataIn, db.dataIn], extraConds={ sb.dataIn: last }).sync() self.sizes(sb.dataOut) connect(db.dataOut, self.dataOut)
def test_value(self): self.assertTrue(vec(1, 1)._eq(vec(1, 1))) self.assertTrue(vec(0, 1)._eq(vec(0, 1))) self.assertTrue(vec(0, 2)._eq(vec(0, 2))) self.assertTrue(hBool(True)._eq(hBool(True))) v0 = vec(2, 2) v1 = v0.clone() self.assertTrue(v0._eq(v1)) v1.updateTime = 2 self.assertTrue(v0._eq(v1))
def connectRegisters(self, st, onoff, baseAddr): """ connection of axilite registers """ idle = st._eq(st._dtype.fullIdle) regs = self.regsConventor.decoded regs.control.din(Concat(onoff, idle, vec(0, self.DATA_WIDTH - 2))) If(regs.control.dout.vld, onoff(regs.control.dout.data[0])) c(baseAddr, regs.baseAddr.din) If(regs.baseAddr.dout.vld, baseAddr(regs.baseAddr.dout.data))
def test_ADD_eval(self): for a_in, b_in, out in [(0, 0, 0), (0, 1, 1), (1, 0, 1), (1, 1, 2)]: res = hInt(a_in) + hInt(b_in) b_w = 2 self.assertTrue(res.vldMask) self.assertEqual( res.val, out, "a_in %d, b_in %d, out %d" % (a_in, b_in, out)) resBit = vec(a_in, b_w) + vec(b_in, b_w) self.assertEqual(resBit.vldMask, 3) self.assertEqual( resBit.val, out, "a_in %d, b_in %d, out %d" % (a_in, b_in, out))
def test_ADD_IntBits(self): a = vec(7, 8) b = hInt(1) c = a + b self.assertEqual(c.val, 8) self.assertEqual(c.vldMask, mask(8)) a = vec(255, 8) b = hInt(1) c = a + b self.assertEqual(c.val, 0) self.assertEqual(c.vldMask, mask(8)) a = vec(7, 8, False) b = hInt(1) c = a + b self.assertEqual(c.val, 8) self.assertEqual(c.vldMask, mask(8)) a = vec(255, 8, False) b = hInt(1) c = a + b self.assertEqual(c.val, 0) self.assertEqual(c.vldMask, mask(8))
def hstruct_reinterpret_to_bits(self, sigOrVal, toType: HdlType): assert toType.bit_length() == self.bit_length() parts = [] for f in self.fields: if f.name is None: width = f.bit_length() part = vec(None, width) else: part = getattr(sigOrVal, f.name) if not isinstance(part, (Value, RtlSignalBase)): part = f.dtype.from_py(part) parts.append(part) return Concat(*reversed(parts))
def connectRegisters(self, st, onoff, baseAddr): """ connection of axilite registers """ idle = st._eq(st._dtype.fullIdle) regs = self.regsConventor.decoded regs.control.din(Concat(onoff, idle, vec(0, self.DATA_WIDTH - 2))) If(regs.control.dout.vld, onoff(regs.control.dout.data[0]) ) c(baseAddr, regs.baseAddr.din) If(regs.baseAddr.dout.vld, baseAddr(regs.baseAddr.dout.data) )
def fitTo_t(what: Union[RtlSignal, HValue], where_t: HdlType, extend: bool = True, shrink: bool = True): """ Slice signal "what" to fit in "where" or arithmetically (for signed by MSB / unsigned, vector with 0) extend "what" to same width as "where" little-endian impl. :param extend: allow increasing of the signal width :param shrink: allow shrinking of the signal width """ whatWidth = what._dtype.bit_length() toWidth = where_t.bit_length() if toWidth == whatWidth: return what elif toWidth < whatWidth: # slice if not shrink: raise BitWidthErr() return what[toWidth:] else: if not extend: raise BitWidthErr() w = toWidth - whatWidth if what._dtype.signed: # signed extension msb = what[whatWidth - 1] ext = reduce(lambda a, b: a._concat(b), [msb for _ in range(w)]) else: # 0 extend ext = vec(0, w) return ext._concat(what)
def test_slice_bits(self): v128 = uint8_t.fromPy(128) v1 = uint8_t.fromPy(1) with self.assertRaises(IndexError): self.assertEqual(v128[8], hBit(1)) self.assertEqual(v128[7], hBit(1)) self.assertEqual(v128[1], hBit(0)) self.assertEqual(v128[0], hBit(0)) with self.assertRaises(IndexError): self.assertEqual(v128[-1], hBit(0)) with self.assertRaises(IndexError): self.assertEqual(v128[9:-1], hBit(0)) with self.assertRaises(IndexError): self.assertEqual(v128[9:], hBit(0)) with self.assertRaises(IndexError): self.assertEqual(v128[9:0], hBit(0)) with self.assertRaises(IndexError): self.assertEqual(v128[0:], hBit(0)) with self.assertRaises(IndexError): self.assertEqual(v128[0:0], hBit(0)) self.assertEqual(v128[8:], v128) self.assertEqual(v128[8:0], v128) self.assertEqual(v128[:0], v128) self.assertEqual(v128[:1], vec(64, 7)) self.assertEqual(v128[:2], vec(32, 6)) self.assertEqual(v128[:7], vec(1, 1)) self.assertEqual(v1[1:], vec(1, 1)) self.assertEqual(v1[2:], vec(1, 2)) self.assertEqual(v1[8:], vec(1, 8))
def strbToRem(strbBits, remBits): for i in range(strbBits): strb = vec(mask(i + 1), strbBits) rem = vec(i, remBits) yield strb, rem
def indexToAddr(self, indx): return Concat(indx, vec(0, self.ALIGN_BITS))
def writePart(self): DW = int(self.DATA_WIDTH) sig = self._sig reg = self._reg addrWidth = int(self.ADDR_WIDTH) ADDR_STEP = self._getAddrStep() wSt_t = HEnum('wSt_t', ['wrIdle', 'wrData', 'wrResp']) aw = self.bus.aw w = self.bus.w b = self.bus.b # write fsm wSt = FsmBuilder(self, wSt_t, "wSt")\ .Trans(wSt_t.wrIdle, (aw.valid, wSt_t.wrData) ).Trans(wSt_t.wrData, (w.valid, wSt_t.wrResp) ).Trans(wSt_t.wrResp, (b.ready, wSt_t.wrIdle) ).stateReg awAddr = reg('awAddr', aw.addr._dtype) w_hs = sig('w_hs') awRd = wSt._eq(wSt_t.wrIdle) aw.ready(awRd) aw_hs = awRd & aw.valid wRd = wSt._eq(wSt_t.wrData) w.ready(wRd) w_hs(w.valid & wRd) # save aw addr If(aw_hs, awAddr(aw.addr)).Else(awAddr(awAddr)) # output vld for t in self._directlyMapped: out = self.getPort(t).dout try: width = t.getItemWidth() except TypeError: width = t.bitAddrEnd - t.bitAddr if width > DW: raise NotImplementedError( "Fields wider than DATA_WIDTH not supported yet", t) offset = t.bitAddr % DW out.data(w.data[(offset + width):offset]) out.vld(w_hs & ( awAddr._eq(vec(t.bitAddr // DW * (DW // ADDR_STEP), addrWidth)))) for t in self._bramPortMapped: din = self.getPort(t).din connect(w.data, din, fit=True) self.writeRespPart(awAddr, wSt._eq(wSt_t.wrResp)) return awAddr, w_hs
def _impl(self): propagateClkRstn(self) r, s = self._reg, self._sig req = self.rDatapump.req f = self.dataFifo dIn = self.rDatapump.r dBuffIn = f.dataIn ALIGN_BITS = self.addrAlignBits() ID = self.ID BUFFER_CAPACITY = self.BUFFER_CAPACITY BURST_LEN = BUFFER_CAPACITY // 2 ID_LAST = self.ID_LAST bufferHasSpace = s("bufferHasSpace") bufferHasSpace(f.size < (BURST_LEN + 1)) # we are counting base next addr as item as well inBlock_t = Bits(log2ceil(self.ITEMS_IN_BLOCK + 1)) ringSpace_t = Bits(self.PTR_WIDTH) downloadPending = r("downloadPending", defVal=0) baseIndex = r("baseIndex", Bits(self.ADDR_WIDTH - ALIGN_BITS)) inBlockRemain = r("inBlockRemain_reg", inBlock_t, defVal=self.ITEMS_IN_BLOCK) self.inBlockRemain(inBlockRemain) # Logic of tail/head rdPtr = r("rdPtr", ringSpace_t, defVal=0) wrPtr = r("wrPtr", ringSpace_t, defVal=0) If(self.wrPtr.dout.vld, wrPtr(self.wrPtr.dout.data) ) self.wrPtr.din(wrPtr) self.rdPtr.din(rdPtr) # this means items are present in memory hasSpace = s("hasSpace") hasSpace(wrPtr != rdPtr) doReq = s("doReq") doReq(bufferHasSpace & hasSpace & ~downloadPending & req.rd) req.rem(0) self.dataOut(f.dataOut) # logic of baseAddr and baseIndex baseAddr = Concat(baseIndex, vec(0, ALIGN_BITS)) req.addr(baseAddr) self.baseAddr.din(baseAddr) dataAck = dIn.valid & In(dIn.id, [ID, ID_LAST]) & dBuffIn.rd If(self.baseAddr.dout.vld, baseIndex(self.baseAddr.dout.data[:ALIGN_BITS]) ).Elif(dataAck & downloadPending, If(dIn.last & dIn.id._eq(ID_LAST), baseIndex(dIn.data[self.ADDR_WIDTH:ALIGN_BITS]) ).Else( baseIndex(baseIndex + 1) ) ) sizeByPtrs = s("sizeByPtrs", ringSpace_t) sizeByPtrs(wrPtr - rdPtr) inBlockRemain_asPtrSize = fitTo(inBlockRemain, sizeByPtrs) constraingSpace = s("constraingSpace", ringSpace_t) If(inBlockRemain_asPtrSize < sizeByPtrs, constraingSpace(inBlockRemain_asPtrSize) ).Else( constraingSpace(sizeByPtrs) ) constrainedByInBlockRemain = s("constrainedByInBlockRemain") constrainedByInBlockRemain(fitTo(sizeByPtrs, inBlockRemain) >= inBlockRemain) If(constraingSpace > BURST_LEN, # download full burst req.id(ID), req.len(BURST_LEN - 1), If(doReq, inBlockRemain(inBlockRemain - BURST_LEN) ) ).Elif(constrainedByInBlockRemain & (inBlockRemain < BURST_LEN), # we know that sizeByPtrs <= inBlockRemain thats why we can resize it # we will download next* as well req.id(ID_LAST), connect(constraingSpace, req.len, fit=True), If(doReq, inBlockRemain(self.ITEMS_IN_BLOCK) ) ).Else( # download data leftover req.id(ID), connect(constraingSpace - 1, req.len, fit=True), If(doReq, inBlockRemain(inBlockRemain - fitTo(constraingSpace, inBlockRemain)) ) ) # logic of req dispatching If(downloadPending, req.vld(0), If(dataAck & dIn.last, downloadPending(0) ) ).Else( req.vld(bufferHasSpace & hasSpace), If(req.rd & bufferHasSpace & hasSpace, downloadPending(1) ) ) # into buffer pushing logic dBuffIn.data(dIn.data) isMyData = s("isMyData") isMyData(dIn.id._eq(ID) | (~dIn.last & dIn.id._eq(ID_LAST))) If(self.rdPtr.dout.vld, rdPtr(self.rdPtr.dout.data) ).Else( If(dIn.valid & downloadPending & dBuffIn.rd & isMyData, rdPtr(rdPtr + 1) ) ) # push data into buffer and increment rdPtr StreamNode(masters=[dIn], slaves=[dBuffIn], extraConds={dIn: downloadPending, dBuffIn: (dIn.id._eq(ID) | (dIn.id._eq(ID_LAST) & ~dIn.last)) & downloadPending }).sync()
def srl(sig, howMany) -> RtlSignalBase: "Logical shift right" return vec(0, howMany)._concat(sig[:howMany])
def _impl(self): propagateClkRstn(self) r, s = self._reg, self._sig req = self.rDatapump.req f = self.dataFifo dIn = self.rDatapump.r dBuffIn = f.dataIn ALIGN_BITS = self.addrAlignBits() ID = self.ID BUFFER_CAPACITY = self.BUFFER_CAPACITY BURST_LEN = BUFFER_CAPACITY // 2 ID_LAST = self.ID_LAST bufferHasSpace = s("bufferHasSpace") bufferHasSpace(f.size < (BURST_LEN + 1)) # we are counting base next addr as item as well inBlock_t = Bits(log2ceil(self.ITEMS_IN_BLOCK + 1)) ringSpace_t = Bits(self.PTR_WIDTH) downloadPending = r("downloadPending", defVal=0) baseIndex = r("baseIndex", Bits(self.ADDR_WIDTH - ALIGN_BITS)) inBlockRemain = r("inBlockRemain_reg", inBlock_t, defVal=self.ITEMS_IN_BLOCK) self.inBlockRemain(inBlockRemain) # Logic of tail/head rdPtr = r("rdPtr", ringSpace_t, defVal=0) wrPtr = r("wrPtr", ringSpace_t, defVal=0) If(self.wrPtr.dout.vld, wrPtr(self.wrPtr.dout.data)) self.wrPtr.din(wrPtr) self.rdPtr.din(rdPtr) # this means items are present in memory hasSpace = s("hasSpace") hasSpace(wrPtr != rdPtr) doReq = s("doReq") doReq(bufferHasSpace & hasSpace & ~downloadPending & req.rd) req.rem(0) self.dataOut(f.dataOut) # logic of baseAddr and baseIndex baseAddr = Concat(baseIndex, vec(0, ALIGN_BITS)) req.addr(baseAddr) self.baseAddr.din(baseAddr) dataAck = dIn.valid & In(dIn.id, [ID, ID_LAST]) & dBuffIn.rd If(self.baseAddr.dout.vld, baseIndex(self.baseAddr.dout.data[:ALIGN_BITS])).Elif( dataAck & downloadPending, If(dIn.last & dIn.id._eq(ID_LAST), baseIndex(dIn.data[self.ADDR_WIDTH:ALIGN_BITS])).Else( baseIndex(baseIndex + 1))) sizeByPtrs = s("sizeByPtrs", ringSpace_t) sizeByPtrs(wrPtr - rdPtr) inBlockRemain_asPtrSize = fitTo(inBlockRemain, sizeByPtrs) constraingSpace = s("constraingSpace", ringSpace_t) If(inBlockRemain_asPtrSize < sizeByPtrs, constraingSpace(inBlockRemain_asPtrSize)).Else( constraingSpace(sizeByPtrs)) constrainedByInBlockRemain = s("constrainedByInBlockRemain") constrainedByInBlockRemain( fitTo(sizeByPtrs, inBlockRemain) >= inBlockRemain) If( constraingSpace > BURST_LEN, # download full burst req.id(ID), req.len(BURST_LEN - 1), If(doReq, inBlockRemain(inBlockRemain - BURST_LEN)) ).Elif( constrainedByInBlockRemain & (inBlockRemain < BURST_LEN), # we know that sizeByPtrs <= inBlockRemain thats why we can resize it # we will download next* as well req.id(ID_LAST), connect(constraingSpace, req.len, fit=True), If(doReq, inBlockRemain(self.ITEMS_IN_BLOCK))).Else( # download data leftover req.id(ID), connect(constraingSpace - 1, req.len, fit=True), If( doReq, inBlockRemain(inBlockRemain - fitTo(constraingSpace, inBlockRemain)))) # logic of req dispatching If(downloadPending, req.vld(0), If(dataAck & dIn.last, downloadPending(0))).Else( req.vld(bufferHasSpace & hasSpace), If(req.rd & bufferHasSpace & hasSpace, downloadPending(1))) # into buffer pushing logic dBuffIn.data(dIn.data) isMyData = s("isMyData") isMyData(dIn.id._eq(ID) | (~dIn.last & dIn.id._eq(ID_LAST))) If(self.rdPtr.dout.vld, rdPtr(self.rdPtr.dout.data)).Else( If(dIn.valid & downloadPending & dBuffIn.rd & isMyData, rdPtr(rdPtr + 1))) # push data into buffer and increment rdPtr StreamNode(masters=[dIn], slaves=[dBuffIn], extraConds={ dIn: downloadPending, dBuffIn: (dIn.id._eq(ID) | (dIn.id._eq(ID_LAST) & ~dIn.last)) & downloadPending }).sync()
def test_bits_le(self): a = vec(8, 8) b = vec(16, 8) self.assertTrue((a <= b).val) self.assertFalse((b <= a).val)
def _impl(self): propagateClkRstn(self) dIn = AxiSBuilder(self, self.dataIn).buff().end sb = self.sizesBuff db = self.dataBuff wordCntr = self._reg("wordCntr", Bits(log2ceil(self.MAX_LEN) + 1), defVal=0) overflow = wordCntr._eq(self.MAX_LEN) last = dIn.last | overflow If(StreamNode(masters=[dIn], slaves=[sb.dataIn, db.dataIn]).ack(), If(last, wordCntr(0) ).Else( wordCntr(wordCntr + 1) ) ) length = self._sig("length", wordCntr._dtype) BYTE_CNT = dIn.data._dtype.bit_length() // 8 if dIn.USE_STRB: # compress strb mask as binary number rem = self._sig("rem", Bits(log2ceil(BYTE_CNT))) SwitchLogic( cases=[ (dIn.strb[i], rem(0 if i == BYTE_CNT - 1 else i + 1)) for i in reversed(range(BYTE_CNT))], default=[ rem(0), ] ) if self.EXPORT_ALIGNMENT_ERROR: errorAlignment = self._reg("errorAlignment_reg", defVal=0) self.errorAlignment(errorAlignment) If(dIn.valid & (dIn.strb != mask(BYTE_CNT)) & ~dIn.last, errorAlignment(1) ) If(last & (dIn.strb != mask(BYTE_CNT)), length(wordCntr) ).Else( length(wordCntr + 1) ) else: length(wordCntr + 1) rem = vec(0, log2ceil(BYTE_CNT)) sb.dataIn.data(Concat(length, rem)) connect(dIn, db.dataIn, exclude=[dIn.valid, dIn.ready, dIn.last]) db.dataIn.last(last) StreamNode(masters=[dIn], slaves=[sb.dataIn, db.dataIn], extraConds={sb.dataIn: last }).sync() self.sizes(sb.dataOut) connect(db.dataOut, self.dataOut)
def writePart(self): DW = int(self.DATA_WIDTH) sig = self._sig reg = self._reg addrWidth = int(self.ADDR_WIDTH) ADDR_STEP = self._getAddrStep() wSt_t = HEnum('wSt_t', ['wrIdle', 'wrData', 'wrResp']) aw = self.bus.aw w = self.bus.w b = self.bus.b # write fsm wSt = FsmBuilder(self, wSt_t, "wSt")\ .Trans(wSt_t.wrIdle, (aw.valid, wSt_t.wrData) ).Trans(wSt_t.wrData, (w.valid, wSt_t.wrResp) ).Trans(wSt_t.wrResp, (b.ready, wSt_t.wrIdle) ).stateReg awAddr = reg('awAddr', aw.addr._dtype) w_hs = sig('w_hs') awRd = wSt._eq(wSt_t.wrIdle) aw.ready(awRd) aw_hs = awRd & aw.valid wRd = wSt._eq(wSt_t.wrData) w.ready(wRd) w_hs(w.valid & wRd) # save aw addr If(aw_hs, awAddr(aw.addr) ).Else( awAddr(awAddr) ) # output vld for t in self._directlyMapped: out = self.getPort(t).dout try: width = t.getItemWidth() except TypeError: width = t.bitAddrEnd - t.bitAddr if width > DW: raise NotImplementedError("Fields wider than DATA_WIDTH not supported yet", t) offset = t.bitAddr % DW out.data(w.data[(offset + width): offset]) out.vld(w_hs & (awAddr._eq(vec(t.bitAddr // DW * (DW // ADDR_STEP), addrWidth)))) for t in self._bramPortMapped: din = self.getPort(t).din connect(w.data, din, fit=True) self.writeRespPart(awAddr, wSt._eq(wSt_t.wrResp)) return awAddr, w_hs
def sll(sig, howMany) -> RtlSignalBase: "Logical shift left" width = sig._dtype.bit_length() return sig[(width - howMany):]._concat(vec(0, howMany))
def _config(self): self.INIT = Param(vec(0, DATA_WIDTH)) self.IS_WCLK_INVERTED = Param(hBit(0))
def _impl(self): ALIGN_BITS = log2ceil(self.DATA_WIDTH // 8 - 1).val TIMEOUT_MAX = self.TIMEOUT - 1 ITEMS = self.ITEMS buff = self.buff reqAck = self.wDatapump.ack req = self.wDatapump.req w = self.wDatapump.w propagateClkRstn(self) sizeOfitems = self._reg("sizeOfItems", Bits( buff.size._dtype.bit_length())) # aligned base addr baseAddr = self._reg("baseAddrReg", Bits(self.ADDR_WIDTH - ALIGN_BITS)) If(self.baseAddr.dout.vld, baseAddr(self.baseAddr.dout.data[:ALIGN_BITS]) ) self.baseAddr.din(Concat(baseAddr, vec(0, ALIGN_BITS))) # offset in buffer and its complement offset_t = Bits(log2ceil(ITEMS + 1), signed=False) offset = self._reg("offset", offset_t, defVal=0) remaining = self._reg("remaining", Bits( log2ceil(ITEMS + 1), signed=False), defVal=ITEMS) connect(remaining, self.buff_remain, fit=True) addrTmp = self._sig("baseAddrTmp", baseAddr._dtype) addrTmp(baseAddr + fitTo(offset, baseAddr)) # req values logic req.id(self.ID) req.addr(Concat(addrTmp, vec(0, ALIGN_BITS))) req.rem(0) sizeTmp = self._sig("sizeTmp", buff.size._dtype) assert req.len._dtype.bit_length() == buff.size._dtype.bit_length() - 1, ( req.len._dtype.bit_length(), buff.size._dtype.bit_length()) buffSizeAsLen = self._sig("buffSizeAsLen", buff.size._dtype) buffSizeAsLen(buff.size - 1) buffSize_tmp = self._sig("buffSize_tmp", remaining._dtype) connect(buff.size, buffSize_tmp, fit=True) endOfLenBlock = (remaining - 1) < buffSize_tmp remainingAsLen = self._sig("remainingAsLen", remaining._dtype) remainingAsLen(remaining - 1) If(endOfLenBlock, connect(remainingAsLen, req.len, fit=True), connect(remaining, sizeTmp, fit=True) ).Else( connect(buffSizeAsLen, req.len, fit=True), sizeTmp(buff.size) ) lastWordCntr = self._reg("lastWordCntr", buff.size._dtype, 0) w_last = lastWordCntr._eq(1) w_ack = w.ready & buff.dataOut.vld # timeout logic timeoutCntr = self._reg("timeoutCntr", Bits(log2ceil(self.TIMEOUT), False), defVal=TIMEOUT_MAX) # buffer is full or timeout beginReq = buff.size._eq(self.BUFF_DEPTH) | timeoutCntr._eq(0) reqAckHasCome = self._sig("reqAckHasCome") reqAckHasCome(reqAck.vld & reqAck.data._eq(self.ID)) st = FsmBuilder(self, stT)\ .Trans(stT.waitOnInput, (beginReq & req.rd, stT.waitOnDataTx) ).Trans(stT.waitOnDataTx, (w_last & w_ack, stT.waitOnAck) ).Trans(stT.waitOnAck, (reqAckHasCome, stT.waitOnInput) ).stateReg If(st._eq(stT.waitOnInput) & beginReq, # timeout is counting only when there is pending data # start new request req.vld(1), If(req.rd, If(endOfLenBlock, offset(0), remaining(ITEMS) ).Else( offset(offset + fitTo(buff.size, offset)), remaining(remaining - fitTo(buff.size, remaining)) ), sizeOfitems(sizeTmp), timeoutCntr(TIMEOUT_MAX) ) ).Else( req.vld(0), If(buff.dataOut.vld & st._eq(stT.waitOnInput) & (timeoutCntr != 0), timeoutCntr(timeoutCntr - 1) ) ) reqAck.rd(st._eq(stT.waitOnAck)) self.uploadedCntrHandler(st, reqAckHasCome, sizeOfitems) # it does not matter when lastWordCntr is changing when there is no # request startSendingData = st._eq(stT.waitOnInput) & beginReq & req.rd If(startSendingData, lastWordCntr(sizeTmp) ).Elif((lastWordCntr != 0) & w_ack, lastWordCntr(lastWordCntr - 1) ) buff.dataIn(self.items) connect(buff.dataOut.data, w.data, fit=True) StreamNode(masters=[buff.dataOut], slaves=[w] ).sync(st._eq(stT.waitOnDataTx)) w.strb(mask(w.strb._dtype.bit_length())) w.last(w_last)