def _downscale(self, factor): inputRegs_cntr = self._reg("inputRegs_cntr", Bits(log2ceil(factor + 1), False), def_val=0) # instantiate HandshakedReg, handshaked builder is not used to avoid dependencies inReg = HandshakedReg(self.intfCls) inReg._updateParamsFrom(self.dataIn) self.inReg = inReg inReg.clk(self.clk) inReg.rst_n(self.rst_n) inReg.dataIn(self.dataIn) dataIn = inReg.dataOut dataOut = self.dataOut # create output mux for din, dout in zip(self.get_data(dataIn), self.get_data(dataOut)): widthOfPart = din._dtype.bit_length() // factor inParts = iterBits(din, bitsInOne=widthOfPart) Switch(inputRegs_cntr).add_cases( [(i, dout(inPart)) for i, inPart in enumerate(inParts)] ) vld = self.get_valid_signal rd = self.get_ready_signal vld(dataOut)(vld(dataIn)) self.get_ready_signal(dataIn)(inputRegs_cntr._eq(factor - 1) & rd(dataOut)) If(vld(dataIn) & rd(dataOut), If(inputRegs_cntr._eq(factor - 1), inputRegs_cntr(0) ).Else( inputRegs_cntr(inputRegs_cntr + 1) ) )
def _downscale(self, factor): inputRegs_cntr = self._reg("inputRegs_cntr", Bits(log2ceil(factor + 1), False), defVal=0) # instantiate HandshakedReg, handshaked builder is not used to avoid dependencies inReg = HandshakedReg(self.intfCls) inReg._updateParamsFrom(self.dataIn) self.inReg = inReg inReg.clk(self.clk) inReg.rst_n(self.rst_n) inReg.dataIn(self.dataIn) dataIn = inReg.dataOut dataOut = self.dataOut # create output mux for din, dout in zip(self.getData(dataIn), self.getData(dataOut)): widthOfPart = din._dtype.bit_length() // factor inParts = iterBits(din, bitsInOne=widthOfPart) Switch(inputRegs_cntr).addCases( [(i, dout(inPart)) for i, inPart in enumerate(inParts)] ) self.getVld(dataOut)(self.getVld(dataIn)) self.getRd(dataIn)(inputRegs_cntr._eq(factor - 1) & self.getRd(dataOut)) If(self.getVld(dataIn) & self.getRd(dataOut), If(inputRegs_cntr._eq(factor - 1), inputRegs_cntr(0) ).Else( inputRegs_cntr(inputRegs_cntr + 1) ) )
def _impl(self) -> None: regs = self.regs = HObjList( HandshakedReg(HandshakeSync) for _ in range(2)) regs[0].dataIn(self.dataIn) regs[1].dataIn(regs[0].dataOut) self.dataOut(regs[1].dataOut) propagateClkRstn(self)
def _declr(self): addClkRstn(self) with self._paramsShared(): self.s = Axi4Lite() self.din0 = Handshaked() self.dout0 = Handshaked()._m() self.reg = HandshakedReg(Handshaked) self.din1 = Handshaked() self.dout1 = Handshaked()._m() self.other_clk = Clk() self.other_clk.FREQ = self.clk.FREQ * 2 with self._associated(clk=self.other_clk): self.other_rst_n = Rst_n() self.din2 = Handshaked() self.dout2 = Handshaked()._m()
def setUpClass(cls): u = cls.u = HandshakedReg(Handshaked) u.DELAY = cls.DELAY u.LATENCY = cls.LATENCY cls.MAX_LATENCY = cls.LATENCY if isinstance(cls.LATENCY, int) else max( cls.LATENCY) cls.compileSim(u)
def _impl(self): req = self.wDatapump.req w = self.wDatapump.w ack = self.wDatapump.ack # multi frame if self.MAX_OVERLAP > 1: ackPropageteInfo = HandshakedFifo(Handshaked) ackPropageteInfo.DEPTH = self.MAX_OVERLAP else: ackPropageteInfo = HandshakedReg(Handshaked) ackPropageteInfo.DATA_WIDTH = 1 self.ackPropageteInfo = ackPropageteInfo if self.WRITE_ACK: _set = self.set else: _set = HsBuilder(self, self.set).buff().end if self.ID_WIDTH: req.id(self.ID) def propagateRequest(frame, indx): inNode = StreamNode(slaves=[req, ackPropageteInfo.dataIn]) ack = inNode.ack() isLastFrame = indx == len(self._frames) - 1 statements = [ req.addr(_set.data + frame.startBitAddr // 8), req.len(frame.getWordCnt() - 1), self.driveReqRem( req, frame.parts[-1].endOfPart - frame.startBitAddr), ackPropageteInfo.dataIn.data(SKIP if indx != 0 else PROPAGATE), inNode.sync(_set.vld), _set.rd(ack if isLastFrame else 0), ] return statements, ack & _set.vld StaticForEach(self, self._frames, propagateRequest) # connect write channel w(self.frameAssember.dataOut) # propagate ack StreamNode(masters=[ack, ackPropageteInfo.dataOut], slaves=[self.writeAck], skipWhen={ self.writeAck: ackPropageteInfo.dataOut.data._eq(PROPAGATE) }).sync() # connect fields to assembler for _, transTmpl in self._tmpl.walkFlatten(): f = transTmpl.getFieldPath() intf = self.frameAssember.dataIn._fieldsToInterfaces[f] intf(self.dataIn._fieldsToInterfaces[f]) propagateClkRstn(self)
def _impl(self) -> None: r = HandshakedReg(HandshakeSync) # r.DELAY = 1 # r.LATENCY = 2 # to break ready signal chain self.reg = r if self.loop_connector_cls == HandshakedReg: c = self.loop_connector_cls(HandshakeSync) else: c = self.loop_connector_cls() self.con = c # circle r <-> c r.dataIn(c.dataOut) c.dataIn(r.dataOut) self.rd(r.dataOut.rd) self.vld(r.dataOut.vld) propagateClkRstn(self)
def _impl(self): In = self.dataIn rd = self.get_ready_signal sel = self.selectOneHot r = HandshakedReg(Handshaked) r.DATA_WIDTH = sel.data._dtype.bit_length() self.selReg = r r.dataIn(sel) propagateClkRstn(self) sel = r.dataOut for index, outIntf in enumerate(self.dataOut): for ini, outi in zip(In._interfaces, outIntf._interfaces): if ini == self.get_valid_signal(In): # out.vld outi(sel.vld & ini & sel.data[index]) elif ini == rd(In): pass else: # data outi(ini) din = self.dataIn SwitchLogic( cases=[(~sel.vld, [sel.rd(0), rd(In)(0)]) ] + [(sel.data[index], [rd(In)(rd(out)), sel.rd(rd(out) & self.get_valid_signal(din) & sel.vld & self._select_consume_en())]) for index, out in enumerate(self.dataOut)], default=[ sel.rd(None), rd(In)(None) ] )
def _impl(self): In = self.dataIn rd = self.getRd sel = self.selectOneHot r = HandshakedReg(Handshaked) r.DATA_WIDTH.set(sel.data._dtype.bit_length()) self.selReg = r r.dataIn(sel) propagateClkRstn(self) sel = r.dataOut for index, outIntf in enumerate(self.dataOut): for ini, outi in zip(In._interfaces, outIntf._interfaces): if ini == self.getVld(In): # out.vld outi(sel.vld & ini & sel.data[index]) elif ini == rd(In): pass else: # data outi(ini) din = self.dataIn SwitchLogic( cases=[(~sel.vld, [sel.rd(0), rd(In)(0)]) ] + [(sel.data[index], [rd(In)(rd(out)), sel.rd(rd(out) & self.getVld(din) & sel.vld & self._select_consume_en())]) for index, out in enumerate(self.dataOut)], default=[ sel.rd(None), rd(In)(None) ] )
def add_addr_cam_out_reg(self, item_vld: RtlSignal): addr_cam = self.addr_cam addr_cam_out = addr_cam.out[ 0] #HsBuilder(self, addr_cam.out).buff(1).end addr_cam_out_reg = HandshakedReg(addr_cam_out.__class__) addr_cam_out_reg._updateParamsFrom(addr_cam_out) self.addr_cam_out_reg = addr_cam_out_reg addr_cam_out_reg.dataIn(addr_cam_out, exclude=[addr_cam_out.data]) addr_cam_out_reg.dataIn.data(addr_cam_out.data & item_vld) addr_cam_out = addr_cam_out_reg.dataOut return addr_cam_out
def _declr(self): addClkRstn(self) AxiWriteAggregatorWriteDispatcher.precompute_constants(self) with self._paramsShared(): self.w = w = AxiWriteAggregatorWriteIntf() self.w_in_reg = w_in_reg = HandshakedReg( AxiWriteAggregatorWriteTmpIntf) w.ADDR_WIDTH = w_in_reg.ADDR_WIDTH = self.CACHE_LINE_ADDR_WIDTH w.DATA_WIDTH = w_in_reg.DATA_WIDTH = self.CACHE_LINE_SIZE * 8 self.m = axi = Axi4()._m() axi.HAS_R = False self.write_dispatch = AxiWriteAggregatorWriteDispatcher() self.ooo_fifo = of = FifoOutOfOrderReadFiltered() of.ITEMS = w_in_reg.ITEMS = 2**self.ID_WIDTH of.KEY_WIDTH = self.CACHE_LINE_ADDR_WIDTH self.data_ram = self._declr_data_ram()
def ar_dispatch(self): """ Send read request on AXI and store transaction in to state array and ooo_fifo for later wake up """ ooo_fifo = self.ooo_fifo ar = self.m.ar din = self.dataIn assert din.addr._dtype.bit_length() == self.ADDR_WIDTH - self.ADDR_OFFSET_W, ( din.addr._dtype.bit_length(), self.ADDR_WIDTH, self.ADDR_OFFSET_W) dataIn_reg = HandshakedReg(din.__class__) dataIn_reg._updateParamsFrom(din) self.dataIn_reg = dataIn_reg StreamNode( [din], [dataIn_reg.dataIn, ooo_fifo.write_confirm] ).sync() dataIn_reg.dataIn(din, exclude=[din.rd, din.vld]) ar_node = StreamNode( [dataIn_reg.dataOut, ooo_fifo.read_execute], [ar] ) ar_node.sync() state_arr = self.state_array state_write = state_arr.port[0] state_write.en(ar_node.ack()) state_write.addr(ooo_fifo.read_execute.index) din_data = dataIn_reg.dataOut state_write.din(packIntf(din_data, exclude=[din_data.rd, din_data.vld])) ar.id(ooo_fifo.read_execute.index) ar.addr(Concat(din_data.addr, Bits(self.ADDR_OFFSET_W).from_py(0))) self._axi_addr_defaults(ar, 1)
def speculative_read_handler(self): """ Connect the speculative_read port to internal storages of the :class:`AxiWriteAggregator` We need to handle several cases: 1. the data is currently in tmp register 2. the data was in tmp register and now is in data memory 3. the data is in data memory 4. the data was in data memory and now it is deallocated 5. the data was not found anywhere Handling of speculative read has following stages: 1. search input register and main address CAM for data 2. optionaly load the data from ram 3. send data to speculative_read_data and set resp to error if was not found it may also happen that the data was flushed in the mean time .. figure:: ./_static/AxiStoreQueueWritePropagating_speculativeRead.png :note: speculative read never block the write channel and thus data may be invalid if the speculative read data is stalled. This should be handled in master of speculative read port (Other component which is using this component). """ sra = self.speculative_read_addr # CLOCK_PERIOD 0 ooo_fifo = self.ooo_fifo ooo_fifo.read_lookup.data(sra.addr[:self.CACHE_LINE_OFFSET_BITS]) w_in_reg = self.w_in_reg.dataOut w_in_reg_tmp = HObjList( HandshakedReg(AxiWriteAggregatorWriteTmpIntf) for _ in range(2)) for r in w_in_reg_tmp: r._updateParamsFrom(w_in_reg) r.ID_WIDTH = self.ID_WIDTH self.w_in_reg_tmp = w_in_reg_tmp w_i = w_in_reg_tmp[0].dataIn w_i.orig_request_addr(sra.addr[:self.CACHE_LINE_OFFSET_BITS]) w_i.orig_request_addr_eq(w_in_reg.addr._eq(w_i.orig_request_addr)) w_i.orig_request_id(sra.id) w_i.orig_request_valid(sra.vld) w_i.addr(w_in_reg.addr) w_i.data(w_in_reg.data) w_i.valid(w_in_reg.vld) StreamNode( [sra], [ooo_fifo.read_lookup, w_i], skipWhen={ sra: ~sra.vld }, # flush the pipeline if no request ).sync() # CLK_PERIOD 1 read_lookup_res = HsBuilder(self, ooo_fifo.read_lookup_res).buff(1).end StreamNode([read_lookup_res, w_in_reg_tmp[0].dataOut], [w_in_reg_tmp[1].dataIn]).sync() w_in_reg_tmp[1].dataIn( w_in_reg_tmp[0].dataOut, exclude=[w_in_reg_tmp[1].dataIn.vld, w_in_reg_tmp[1].dataIn.rd]) in_ram_flag = rename_signal(self, read_lookup_res.data & ooo_fifo.item_valid, "in_ram_flag") found_in_ram_flag = self._reg("found_in_ram_flag", def_val=0) If(read_lookup_res.vld & read_lookup_res.rd, found_in_ram_flag(in_ram_flag != 0)) ram_r = self.data_ram.port[2] ram_r.en.vld(found_in_ram_flag.next) ram_r.addr(oneHotToBin(self, in_ram_flag, "in_ram_index")) # CLK_PERIOD 2 srd = self.speculative_read_data w_in_reg_tmp_o = w_in_reg_tmp[1].dataOut StreamNode( [w_in_reg_tmp_o], [srd], # filter out pipeline flushes extraConds={ srd: w_in_reg_tmp_o.orig_request_valid }, skipWhen={ srd: ~w_in_reg_tmp_o.orig_request_valid }, ).sync() # read from in_tmp req has to be postponed so we can potentially load the data from ram first found_in_actual_w_in_reg = rename_signal( self, w_in_reg.vld & w_in_reg.addr._eq(w_in_reg_tmp_o.orig_request_addr), "spec_read_found_in_actual_w_in_reg") w_in_reg_tmp_1_o = w_in_reg_tmp[0].dataOut found_in_w_in_reg_1 = rename_signal( self, w_in_reg_tmp_1_o.vld & w_in_reg_tmp_1_o.valid & w_in_reg_tmp_1_o.addr._eq(w_in_reg_tmp_o.orig_request_addr), "spec_read_found_in_w_in_reg_1") found_in_write_tmp_reg_2 = rename_signal( self, w_in_reg_tmp_o.vld & w_in_reg_tmp_o.valid & w_in_reg_tmp_o.orig_request_addr_eq, "spec_read_found_in_write_tmp_reg_2") srd.id(w_in_reg_tmp_o.orig_request_id) If( found_in_actual_w_in_reg, # found in tmp register just now srd.data(w_in_reg.data), srd.resp(RESP_OKAY), srd.last(1), ).Elif( found_in_w_in_reg_1, # found in tmp register in clock cycle -2 srd.data(w_in_reg_tmp_1_o.data), srd.resp(RESP_OKAY), srd.last(1), ).Elif( found_in_write_tmp_reg_2, # found in tmp register in clock cycle -2 srd.data(w_in_reg_tmp_o.data), srd.resp(RESP_OKAY), srd.last(1), ).Elif( found_in_ram_flag, # found in write data memory srd.data(ram_r.dout), srd.resp(RESP_OKAY), srd.last(1), ).Else( # not found anywhere srd.data(None), srd.resp(RESP_EXOKAY), srd.last(1), )
def data_array_io( self, aw_lru_incr: IndexWayHs, # out aw_tagRes: AxiCacheTagArrayLookupResIntf, # in victim_req: AddrHs, victim_way: Handshaked, # out, in data_arr_read_req: IndexWayHs, data_arr_read: Axi4_r, # in, out data_arr_r_port: BramPort_withoutClk, data_arr_w_port: BramPort_withoutClk, # out, out tag_update: AxiCacheTagArrayUpdateIntf # out ): """ :ivar aw_lru_incr: an interface to increment LRU for write channel :ivar victim_req: an interface to get a victim from LRU array for a specified index :ivar victim_way: return interface for victim_req :ivar aw_tagRes: an interface with a results from tag lookup :ivar data_arr_read_req: an input interface with read requests from read section :ivar data_arr_read: an output interface with a read data to read section :ivar data_arr_r_port: read port of main data array :ivar data_arr_w_port: write port of main data array """ # note that the lru update happens even if the data is stalled # but that is not a problem because it wont change the order of the usage # of the cahceline self.incr_lru_on_hit(aw_lru_incr, aw_tagRes) st0 = self._reg( "victim_load_status0", HStruct( (self.s.aw.id._dtype, "write_id" ), # the original id and address of a write transaction (self.s.aw.addr._dtype, "replacement_addr"), (aw_tagRes.TAG_T[aw_tagRes.WAY_CNT], "tags"), (BIT, "tag_found"), (BIT, "had_empty"), # had some empty tag (aw_tagRes.way._dtype, "found_way"), (BIT, "valid"), ), def_val={ "valid": 0, }) # resolve if we need to select a victim and optianally ask for it st0_ready = self._sig("victim_load_status0_ready") has_empty = rename_signal(self, Or(*(~t.valid for t in aw_tagRes.tags)), "has_empty") If( st0_ready, st0.write_id(aw_tagRes.id), st0.replacement_addr(aw_tagRes.addr), st0.tags(aw_tagRes.tags), st0.tag_found(aw_tagRes.found), st0.found_way(aw_tagRes.way), st0.had_empty(has_empty), # this register is beeing flushed, the values can become invalid # the st0.valid is used to detect this state st0.valid(aw_tagRes.vld), ) victim_req.addr(self.parse_addr(aw_tagRes.addr)[1]) tag_check_node = StreamNode( [aw_tagRes], [victim_req], skipWhen={ victim_req: aw_tagRes.vld & (aw_tagRes.found | has_empty) }, extraConds={victim_req: ~aw_tagRes.found & ~has_empty}) st1_ready = self._sig("victim_load_status1_ready") tag_check_node.sync(~st0.valid | st1_ready) tag_check_node_ack = rename_signal(self, tag_check_node.ack(), "tag_check_node_ack") st0_ready((st0.valid & tag_check_node_ack & st1_ready) | ~st0.valid | st1_ready) victim_load_st = HStruct( # and address constructed from an original tag in cache which is beeing replaced (self.s.aw.addr._dtype, "victim_addr"), # new data to write to data_array # (replacement data is still in in_w buffer because it was not consumed # if the tag was not found) (aw_tagRes.way._dtype, "victim_way"), (self.s.ar.id._dtype, "read_id"), (self.s.aw.id._dtype, "write_id"), (self.s.aw.addr._dtype, "replacement_addr" ), # the original address used to resolve new tag (Bits(2), "data_array_op"), # type of operation with data_array ) ########################## st1 - pre (read request resolution, victim address resolution) ############## d_arr_r, d_arr_w = self.instantiate_data_array_to_hs( data_arr_r_port, data_arr_w_port) # :note: flush with higher priority than regular read need_to_flush = rename_signal( self, st0.valid & (~st0.had_empty & ~st0.tag_found), "need_to_flush") If( need_to_flush, d_arr_r.addr.data( self.addr_in_data_array( victim_way.data, self.parse_addr(st0.replacement_addr)[1])), ).Else( d_arr_r.addr.data( self.addr_in_data_array(data_arr_read_req.way, data_arr_read_req.index))) _victim_way = self._sig("victim_way_tmp", Bits(log2ceil(self.WAY_CNT))) _victim_tag = self._sig("victim_tag_tmp", Bits(self.TAG_W)) SwitchLogic( [ # select first empty tag (~tag.valid, [ _victim_way(i), _victim_tag(tag.tag), ]) for i, tag in enumerate(st0.tags) ], default=[ # select an victim specified by victim_way _victim_way(victim_way.data), SwitchLogic([(victim_way.data._eq(i), _victim_tag(tag.tag)) for i, tag in enumerate(st0.tags)], default=_victim_tag(None)) ]) victim_load_status = HObjList( HandshakedReg(HsStructIntf) for _ in range(2)) for i, st in enumerate(victim_load_status): st.T = victim_load_st if i == 0: st.LATENCY = (1, 2) # to break a ready chain self.victim_load_status = victim_load_status st1_in = victim_load_status[0].dataIn.data # placed between st0, st1 pure_write = rename_signal( self, st0.valid & ~need_to_flush & ~data_arr_read_req.vld, "pure_write") pure_read = rename_signal(self, ~st0.valid & data_arr_read_req.vld, "pure_read") read_plus_write = rename_signal( self, st0.valid & ~need_to_flush & data_arr_read_req.vld, "read_plus_write") flush_write = rename_signal( self, st0.valid & need_to_flush & ~data_arr_read_req.vld, "flush_write") read_flush_write = rename_signal( self, st0.valid & need_to_flush & data_arr_read_req.vld, "read_flush_write") # not dispatched at once read_req_node = StreamNode( [victim_way, data_arr_read_req], [d_arr_r.addr, victim_load_status[0].dataIn], extraConds={ victim_way: flush_write | read_flush_write, # 0 # only write without flush not write at all but read request data_arr_read_req: pure_read | read_plus_write, # pure_read | read_plus_write, # d_arr_r.addr: pure_read | read_plus_write | flush_write | read_flush_write, # need_to_flush | data_arr_read_req.vld, # 1 # victim_load_status[0].dataIn: st0.valid | data_arr_read_req.vld, }, skipWhen={ victim_way: pure_write | pure_read | read_plus_write, data_arr_read_req: pure_write | flush_write | read_flush_write, d_arr_r.addr: pure_write, }) read_req_node.sync() st1_ready(victim_load_status[0].dataIn.rd & read_req_node.ack()) st1_in.victim_addr( self.deparse_addr(_victim_tag, self.parse_addr(st0.replacement_addr)[1], 0)) st1_in.victim_way(st0.tag_found._ternary(st0.found_way, _victim_way)), st1_in.read_id(data_arr_read_req.id) st1_in.write_id(st0.write_id) st1_in.replacement_addr(st0.replacement_addr) If(pure_write, st1_in.data_array_op(data_trans_t.write)).Elif( pure_read, st1_in.data_array_op(data_trans_t.read)).Elif( read_plus_write, st1_in.data_array_op(data_trans_t.read_and_write) ).Else( # .Elif(flush_write | read_flush_write, st1_in.data_array_op(data_trans_t.write_and_flush)) # If(st0.valid, # If(need_to_flush, # st1_in.data_array_op(data_trans_t.write_and_flush) # ).Elif(st0.tag_found & data_arr_read_req.vld, # st1_in.data_array_op(data_trans_t.read_and_write) # ).Else( # st1_in.data_array_op(data_trans_t.write) # ) # ).Else( # st1_in.data_array_op(data_trans_t.read) # ) victim_load_status[1].dataIn(victim_load_status[0].dataOut) self.flush_or_read_node(d_arr_r, d_arr_w, victim_load_status[1].dataOut, data_arr_read, tag_update)
class DebugBusMonitorExampleAxi(Unit): """ An example how to use :class:`hwtLib.abstract.debug_bus_monitor.DebugBusMonitor` .. hwt-autodoc:: """ def _config(self): Axi4Lite._config(self) def _declr(self): addClkRstn(self) with self._paramsShared(): self.s = Axi4Lite() self.din0 = Handshaked() self.dout0 = Handshaked()._m() self.reg = HandshakedReg(Handshaked) self.din1 = Handshaked() self.dout1 = Handshaked()._m() self.other_clk = Clk() self.other_clk.FREQ = self.clk.FREQ * 2 with self._associated(clk=self.other_clk): self.other_rst_n = Rst_n() self.din2 = Handshaked() self.dout2 = Handshaked()._m() def _impl(self): # some connections self.dout0(self.din0) self.reg.dataIn(self.din1) self.dout1(self.reg.dataOut) self.dout2(self.din2) # spy on previously generated circuit db = DebugBusMonitor(Axi4Lite, AxiLiteEndpoint) for i in [ self.din0, self.dout0, self.din1, self.reg.dataIn, self.reg.dataOut, self.dout1, self.din2, self.dout2, ]: # cdc if the interface ussing a different clock signal cdc = i._getAssociatedClk() is not self.clk db.register(i, cdc=cdc) db.register(i, name=i._name + "_snapshot", cdc=cdc, trigger=i.vld & i.rd) # we need to add register for ".s" because otherwise there would be # a combinational loop #db.register(self.s, add_reg=True) #for i in self.s._interfaces: # db.register(i, name="s_" + i._name + "_snapshot", # trigger=i.valid & i.ready) with self._paramsShared(): self.db = db db.s(self.s) # there we actually connect the monitored interface # to the monitor instance db.apply_connections() propagateClkRstn(self)
def lookupLogic(self, ramR): h = self.hash lookup = self.lookup res = self.lookupRes # tmp storage for original key and hash for later check origKeyReg = HandshakedReg(LookupKeyIntf) origKeyReg.KEY_WIDTH.set(self.KEY_WIDTH) self.origKeyReg = origKeyReg origKeyReg.dataIn.key(lookup.key) if lookup.LOOKUP_ID_WIDTH: origKeyReg.dataIn.lookupId(lookup.lookupId) origKeyReg.clk(self.clk) origKeyReg.rst_n(self.rst_n) origKey = origKeyReg.dataOut # hash key and address with has in table h.dataIn(lookup.key) # has can be wider connect(h.dataOut, ramR.addr.data, fit=True) inputSlaves = [ramR.addr, origKeyReg.dataIn] outputMasters = [ origKey, ramR.data, ] if self.LOOKUP_HASH: origHashReg = HandshakedReg(Handshaked) origHashReg.DATA_WIDTH.set(self.HASH_WITH) self.origHashReg = origHashReg origHashReg.clk(self.clk) origHashReg.rst_n(self.rst_n) connect(h.dataOut, origHashReg.dataIn.data, fit=True) inputSlaves.append(origHashReg.dataIn) outputMasters.append(origHashReg.dataOut) StreamNode(masters=[lookup], slaves=inputSlaves).sync() # propagate loaded data StreamNode(masters=outputMasters, slaves=[res]).sync() key, data, vldFlag = self.parseItem(ramR.data.data) if self.LOOKUP_HASH: res.hash(origHashReg.dataOut.data) if self.LOOKUP_KEY: res.key(origKey.key) if self.LOOKUP_ID_WIDTH: res.lookupId(origKey.lookupId) if self.DATA_WIDTH: res.data(data) res.occupied(vldFlag) res.found(origKey.key._eq(key) & vldFlag)
def setUp(self): SimTestCase.setUp(self) self.u = HandshakedReg(Handshaked) self.u.DELAY.set(self.DELAY) self.u.LATENCY.set(self.LATENCY) self.prepareUnit(self.u)
def connect_w(self, s_w: AddrDataHs, axi: Axi4, w_cntr: RtlSignal, CNTR_MAX: int, in_axi_t: HStruct): def axi_w_deparser_parametrization(u: AxiS_frameDeparser): # [TODO] specify _frames or maxFrameLen if required (AXI3 16beats, AXI4 256) u.DATA_WIDTH = axi.DATA_WIDTH u.ID_WIDTH = 0 # component to create a axi-stream like packet from AddrDataHs write data w_builder, w_in = AxiSBuilder.deparse(self, in_axi_t, Axi4.W_CLS, axi_w_deparser_parametrization) w_in = w_in.data self.addr_defaults(axi.aw) if self.data_words_in_axi_word <= 1: self.connect_addr(s_w.addr, axi.aw.addr) w_in.data(s_w.data, fit=True) aw_sn = StreamNode([s_w], [axi.aw, w_in]) else: addr, sub_addr = self.split_subaddr(s_w.addr) self.connect_addr(addr, axi.aw.addr) w_in._select.data(sub_addr) # sel = HsBuilder(self, w_in._select, master_to_slave=False)\ # .buff(self.MAX_TRANS_OVERLAP).end # sel.data(sub_addr) w_reg = HandshakedReg(Handshaked) w_reg.DATA_WIDTH = s_w.DATA_WIDTH self.w_data_reg = w_reg w_reg.dataIn.data(s_w.data) aw_sn = StreamNode([s_w], [axi.aw, w_reg.dataIn, w_in._select]) data_items = [ getattr(w_in, f"data{i:d}").data for i in range(self.data_words_in_axi_word) ] for w in data_items: w.vld(w_reg.dataOut.vld) w.data(w_reg.dataOut.data) # ready is not important because it is part of ._select.rd w_reg.dataOut.rd(Or(*[d.rd for d in data_items])) w_start_en = w_cntr != CNTR_MAX aw_sn.sync(w_start_en) # s_w.rd(win.rd) # axi.aw.valid(s_w.vld & w_start_en & ~waiting_for_w_data & win.rd) # win.vld(s_w.vld & w_start_en & axi.aw.ready) if hasattr(axi.w, "id"): # axi3 axi.w(w_builder.end, exclude={axi.w.id}) axi.w.id(0) else: # axi4 axi.w(w_builder.end) If(axi.aw.ready & axi.aw.valid, If(~axi.b.valid, w_cntr(w_cntr + 1))).Elif(axi.b.valid, w_cntr(w_cntr - 1)) axi.b.ready(1)