コード例 #1
0
    def read_data_section(self, read_ack: RtlSignal,
                          waiting_transaction_id: RtlSignal,
                          waiting_transaction_vld: RtlSignal,
                          data_copy_override: VldSynced):
        s = self.s
        m = self.m

        fb = self.frame_buff

        data_out_node = StreamNode([fb.dataOut], [s.r])
        data_out_node.sync()
        read_ack(data_out_node.ack())

        fb.dataOut_copy_frame((fb.dataOut.valid & fb.dataOut.last
                               & waiting_transaction_vld[fb.dataOut.id])
                              | data_copy_override.vld)
        If(data_copy_override.vld,
           fb.dataOut_replacement_id(data_copy_override.data)).Else(
               fb.dataOut_replacement_id(
                   waiting_transaction_id[fb.dataOut.id]))
        s.r(fb.dataOut, exclude={s.r.valid, s.r.ready})

        StreamNode(
            [m.r],
            [fb.dataIn],
        ).sync()
        fb.dataIn(m.r, exclude={m.r.valid, m.r.ready})
コード例 #2
0
ファイル: addr_data_hs_to_Axi.py プロジェクト: mfkiwl/hwtLib
    def connect_r(self, s_r: RamHsR, axi: Axi4, r_cntr: RtlSignal,
                  CNTR_MAX: int, in_axi_t: Union[HStruct, HUnion]):
        self.addr_defaults(axi.ar)

        # rm id from r channel as it is not currently supported in frame parser
        r_tmp = AxiStream()
        r_tmp.USE_STRB = False
        r_tmp.DATA_WIDTH = axi.r.DATA_WIDTH
        self.r_tmp = r_tmp
        r_tmp(axi.r, exclude=(
            axi.r.id,
            axi.r.resp,
        ))
        r_data = AxiSBuilder(self, r_tmp)\
            .parse(in_axi_t).data

        if self.data_words_in_axi_word <= 1:
            self.connect_addr(s_r.addr.data, axi.ar.addr)

            s_r.data.data(r_data.data[s_r.DATA_WIDTH:])

            ar_sn = StreamNode([s_r.addr], [axi.ar])
            r_sn = StreamNode([r_data], [s_r.data])

        else:
            addr, sub_addr = self.split_subaddr(s_r.addr.data)
            self.connect_addr(addr, axi.ar.addr)

            sel = HsBuilder(self, r_data._select, master_to_slave=False)\
                .buff(self.MAX_TRANS_OVERLAP).end
            sel.data(sub_addr)

            data_items = [
                getattr(r_data, f"data{i:d}").data
                for i in range(self.data_words_in_axi_word)
            ]
            r_data_selected = HsBuilder.join_prioritized(self, data_items).end
            s_r.data.data(r_data_selected.data)

            ar_sn = StreamNode([s_r.addr], [axi.ar, sel])
            r_sn = StreamNode([r_data_selected], [s_r.data])

        ar_sn.sync(r_cntr != CNTR_MAX)
        r_sn.sync()
        r_en = r_sn.ack()
        If(axi.ar.ready & axi.ar.valid,
           If(~r_en, r_cntr(r_cntr + 1))).Elif(r_en, r_cntr(r_cntr - 1))
コード例 #3
0
    def connect_r_fifo(self, avalon: AvalonMM, axi: Axi4):
        # buffer for read data to allow forward dispatch of the read requests
        # the availability of the space in fifo is checked using r_data_fifo_capacity counter
        f = HandshakedFifo(Handshaked)
        f.DEPTH = self.R_DATA_FIFO_DEPTH
        f.DATA_WIDTH = self.DATA_WIDTH
        self.r_data_fifo = f

        # f.dataIn.rd ignored because the request should not be dispatched in the first place
        f.dataIn.data(avalon.readData)
        f.dataIn.vld(avalon.readDataValid)

        sf = self.r_size_fifo
        wordIndexCntr = self._reg(
            "wordIndexCntr",
            HStruct(
                (sf.dataOut.len._dtype, "len"),
                (axi.r.id._dtype, "id"),
                (BIT, "vld")
            ),
            def_val={"vld": 0}
        )

        r_out_node = StreamNode([f.dataOut], [axi.r])
        r_out_node.sync(wordIndexCntr.vld)

        # load word index counter if it is invalid else decrement on data transaction
        newSizeAck = (~wordIndexCntr.vld | (wordIndexCntr.len._eq(0) & r_out_node.ack()))
        If(newSizeAck,
            wordIndexCntr.id(sf.dataOut.id),
            wordIndexCntr.len(sf.dataOut.len),
            wordIndexCntr.vld(sf.dataOut.vld),
        ).Elif(r_out_node.ack(),
            wordIndexCntr.len(wordIndexCntr.len - 1),
            wordIndexCntr.vld(wordIndexCntr.len != 0),
        )
        sf.dataOut.rd(newSizeAck)

        axi.r.id(wordIndexCntr.id)
        axi.r.data(f.dataOut.data)
        axi.r.resp(RESP_OKAY)
        axi.r.last(wordIndexCntr.len._eq(0))
        return rename_signal(self, r_out_node.ack(), "r_data_ack")
コード例 #4
0
ファイル: structWriter.py プロジェクト: mfkiwl/hwtLib
        def propagateRequest(frame, indx):
            inNode = StreamNode(slaves=[req, ackPropageteInfo.dataIn])
            ack = inNode.ack()
            isLastFrame = indx == len(self._frames) - 1
            statements = [
                req.addr(_set.data + frame.startBitAddr // 8),
                req.len(frame.getWordCnt() - 1),
                self.driveReqRem(
                    req, frame.parts[-1].endOfPart - frame.startBitAddr),
                ackPropageteInfo.dataIn.data(SKIP if indx != 0 else PROPAGATE),
                inNode.sync(_set.vld),
                _set.rd(ack if isLastFrame else 0),
            ]

            return statements, ack & _set.vld
コード例 #5
0
    def ar_dispatch(self):
        """
        Send read request on AXI and store transaction in to state array and ooo_fifo for later wake up
        """
        ooo_fifo = self.ooo_fifo
        ar = self.m.ar
        din = self.dataIn
        assert din.addr._dtype.bit_length() == self.ADDR_WIDTH - self.ADDR_OFFSET_W, (
            din.addr._dtype.bit_length(), self.ADDR_WIDTH, self.ADDR_OFFSET_W)
        dataIn_reg = HandshakedReg(din.__class__)
        dataIn_reg._updateParamsFrom(din)
        self.dataIn_reg = dataIn_reg
        StreamNode(
            [din],
            [dataIn_reg.dataIn, ooo_fifo.write_confirm]
        ).sync()
        dataIn_reg.dataIn(din, exclude=[din.rd, din.vld])

        ar_node = StreamNode(
            [dataIn_reg.dataOut, ooo_fifo.read_execute],
            [ar]
        )
        ar_node.sync()

        state_arr = self.state_array
        state_write = state_arr.port[0]
        state_write.en(ar_node.ack())
        state_write.addr(ooo_fifo.read_execute.index)

        din_data = dataIn_reg.dataOut

        state_write.din(packIntf(din_data, exclude=[din_data.rd, din_data.vld]))

        ar.id(ooo_fifo.read_execute.index)
        ar.addr(Concat(din_data.addr, Bits(self.ADDR_OFFSET_W).from_py(0)))
        self._axi_addr_defaults(ar, 1)
コード例 #6
0
ファイル: w.py プロジェクト: mfkiwl/hwtLib
    def axiWHandler(self, wErrFlag: RtlSignal):
        w = self.axi.w
        wIn = self.driver.w
        wInfo = self.writeInfoFifo.dataOut
        bInfo = self.bInfoFifo.dataIn

        dataAck = self._sig("dataAck")
        inLast = wIn.last
        if hasattr(w, "id"):
            # AXI3 has id signal, AXI4 does not
            w.id(self.ID_VAL)

        if self.isAlwaysAligned():
            w.data(wIn.data)
            w.strb(wIn.strb)
            if self.axi.LEN_WIDTH:
                doSplit = wIn.last
            else:
                doSplit = BIT.from_py(1)

            waitForShift = BIT.from_py(0)
        else:
            isFirst = self._reg("isFirstData", def_val=1)
            prevData = self._reg("prevData",
                                 HStruct(
                                     (wIn.data._dtype, "data"),
                                     (wIn.strb._dtype, "strb"),
                                     (BIT, "waitingForShift"),
                                 ),
                                 def_val={"waitingForShift": 0})

            waitForShift = prevData.waitingForShift
            isShifted = (wInfo.shift != 0) | (wInfo.SHIFT_OPTIONS[0] != 0)
            wInWillWaitForShift = wIn.valid & wIn.last & isShifted & ~prevData.waitingForShift & ~wInfo.drop_last_word

            If(
                StreamNode([wIn, wInfo], [w, bInfo],
                           skipWhen={
                               wIn: waitForShift
                           }).ack() & ~wErrFlag,
                # data feed in to prevData is stalled if we need to dispath
                # the remainder from previous word which was not yet dispatched due data shift
                # the last data from wIn is consumed on wIn.last, however there is 1 beat stall
                # for wIn i transaction was not aligned. wInfo and bInfo channels are activated
                # after last beat of wOut is send
                If(
                    ~prevData.waitingForShift,
                    prevData.data(wIn.data),
                    prevData.strb(wIn.strb),
                ),
                waitForShift(wInWillWaitForShift),
                isFirst((isShifted & waitForShift)
                        | ((~isShifted | wInfo.drop_last_word) & wIn.last)))

            def applyShift(sh):
                if sh == 0 and wInfo.SHIFT_OPTIONS[0] == 0:
                    return [
                        w.data(wIn.data),
                        w.strb(wIn.strb),
                    ]
                else:
                    rem_w = self.DATA_WIDTH - sh
                    return [
                        # wIn.data starts on 0 we need to shift it sh bits
                        # in first word the prefix is invalid, in rest of the frames it is taken from
                        # previous data
                        If(
                            waitForShift,
                            w.data(
                                Concat(
                                    Bits(rem_w).from_py(None),
                                    prevData.data[:rem_w])),
                        ).Else(
                            w.data(
                                Concat(wIn.data[rem_w:],
                                       prevData.data[:rem_w])), ),
                        If(
                            waitForShift,
                            # wait until remainder of previous data is send
                            w.strb(
                                Concat(
                                    Bits(rem_w // 8).from_py(0),
                                    prevData.strb[:rem_w // 8])),
                        ).Elif(
                            isFirst,
                            # ignore previous data
                            w.strb(
                                Concat(wIn.strb[rem_w // 8:],
                                       Bits(sh // 8).from_py(0))),
                        ).Else(
                            # take what is left from prev data and append from wIn
                            w.strb(
                                Concat(wIn.strb[rem_w // 8:],
                                       prevData.strb[:rem_w // 8])), )
                    ]

            Switch(wInfo.shift).add_cases([
                (i, applyShift(sh)) for i, sh in enumerate(wInfo.SHIFT_OPTIONS)
            ]).Default(
                w.data(None),
                w.strb(None),
            )
            inLast = rename_signal(
                self,
                isShifted._ternary(
                    waitForShift | (wIn.last & wInfo.drop_last_word),
                    wIn.last), "inLast")
            doSplit = inLast

        if self.useTransSplitting():
            wordCntr = self._reg("wWordCntr", self.getLen_t(), 0)
            doSplit = rename_signal(
                self,
                wordCntr._eq(self.getAxiLenMax()) | doSplit, "doSplit1")

            If(
                StreamNode([wInfo, wIn], [bInfo, w]).ack() & ~wErrFlag,
                If(doSplit, wordCntr(0)).Else(wordCntr(wordCntr + 1)))
        if self.AXI_CLS.LEN_WIDTH != 0:
            w.last(doSplit)

        # if this frame was split into a multiple frames wIn.last will equal 0
        bInfo.isLast(inLast)
        dataNode = StreamNode(masters=[wIn, wInfo],
                              slaves=[bInfo, w],
                              skipWhen={
                                  wIn: waitForShift,
                              },
                              extraConds={
                                  wIn: ~waitForShift,
                                  wInfo: doSplit,
                                  bInfo: doSplit,
                                  w: ~wErrFlag
                              })
        dataAck(dataNode.ack())
        dataNode.sync()
コード例 #7
0
ファイル: base.py プロジェクト: mfkiwl/hwtLib
    def addrHandler(self, req: AddrSizeHs,
                    axiA: Union[Axi3_addr, Axi3Lite_addr, Axi4_addr,
                                Axi4Lite_addr], transInfo: HandshakeSync,
                    errFlag: RtlSignal):
        """
        Propagate read/write requests from req to axi address channel
        and store extra info using transInfo interface.
        """
        r, s = self._reg, self._sig

        self.axiAddrDefaults(axiA)
        if self.ID_WIDTH:
            axiA.id(self.ID_VAL)

        alignmentError = self.hasAlignmentError(req.addr)

        HAS_LEN = self._axiCls.LEN_WIDTH > 0
        if self.useTransSplitting():
            # if axi len is smaller we have to use transaction splitting
            # that means we need to split requests from driver.req to multiple axi requests
            transPartPending = r("transPartPending", def_val=0)
            addrTmp = r("addrTmp", req.addr._dtype)

            dispatchNode = StreamNode([
                req,
            ], [axiA, transInfo],
                                      skipWhen={req: transPartPending.next},
                                      extraConds={
                                          req: ~transPartPending.next,
                                          axiA: req.vld & ~alignmentError,
                                          transInfo: req.vld & ~alignmentError
                                      })
            dispatchNode.sync(~errFlag)
            ack = s("ar_ack")
            ack(dispatchNode.ack() & ~errFlag)

            LEN_MAX = max(mask(self._axiCls.LEN_WIDTH), 0)
            reqLen = s("reqLen", self.getLen_t())
            reqLenRemaining = r("reqLenRemaining", reqLen._dtype)

            If(reqLen > LEN_MAX, *([axiA.len(LEN_MAX)] if HAS_LEN else []),
               self.storeTransInfo(transInfo, 0)).Else(
                   # connect only lower bits of len
                   *([axiA.len(reqLen, fit=True)] if HAS_LEN else []),
                   self.storeTransInfo(transInfo, 1))

            # dispatchNode not used because of combinational loop
            If(
                StreamNode([
                    req,
                ], [axiA, transInfo]).ack() & ~errFlag,
                If(reqLen > LEN_MAX, reqLenRemaining(reqLen - (LEN_MAX + 1)),
                   transPartPending(1)).Else(transPartPending(0)))
            reqLenSwitch = If(
                ~req.vld,
                reqLen(None),
            )
            if not self.isAlwaysAligned():
                crossesWordBoundary = self.isCrossingWordBoundary(
                    req.addr, req.rem)
                reqLenSwitch.Elif(
                    ~self.addrIsAligned(req.addr) & crossesWordBoundary,
                    reqLen(fitTo(req.len, reqLen, shrink=False) + 1),
                )
            reqLenSwitch.Else(reqLen(fitTo(req.len, reqLen, shrink=False)), )

            ADDR_STEP = self.getBurstAddrOffset()
            If(
                transPartPending,
                axiA.addr(self.addrAlign(addrTmp)),
                If(ack, addrTmp(addrTmp + ADDR_STEP)),
                reqLen(reqLenRemaining),
            ).Else(
                axiA.addr(self.addrAlign(req.addr)),
                addrTmp(req.addr + ADDR_STEP),
                reqLenSwitch,
            )

        else:
            # if axi len is wider we can directly translate requests to axi
            axiA.addr(self.addrAlign(req.addr))
            if req.MAX_LEN > 0:
                lenDrive = axiA.len(fitTo(req.len, axiA.len, shrink=False))

                if not self.isAlwaysAligned():
                    crossesWordBoundary = self.isCrossingWordBoundary(
                        req.addr, req.rem)
                    If(
                        ~self.addrIsAligned(req.addr) & crossesWordBoundary,
                        axiA.len(fitTo(req.len, axiA.len) + 1),
                    ).Else(lenDrive, )
            else:
                if HAS_LEN:
                    axiA.len(0)

            self.storeTransInfo(transInfo, 1)
            StreamNode(masters=[req],
                       slaves=[axiA, transInfo],
                       extraConds={
                           axiA: ~alignmentError,
                           transInfo: ~alignmentError,
                       }).sync(~errFlag)
コード例 #8
0
    def flush_or_read_node(
            self,
            d_arr_r: RamHsR,
            d_arr_w: AddrDataHs,
            st2_out: HsStructIntf,
            data_arr_read: Axi4_r,
            tag_update: AxiCacheTagArrayUpdateIntf,  # out
    ):
        ########################## st1 - post (victim flushing, read forwarding) ######################
        in_w = AxiSBuilder(self, self.s.w)\
            .buff(self.tag_array.LOOKUP_LATENCY + 4)\
            .end

        st2 = st2_out.data
        d_arr_w.addr(
            self.addr_in_data_array(st2.victim_way,
                                    self.parse_addr(st2.replacement_addr)[1]))
        data_arr_read_data = d_arr_r.data  # HsBuilder(self, d_arr_r.data).buff(1, latency=(1, 2)).end
        d_arr_w.data(in_w.data)
        d_arr_w.mask(in_w.strb)

        self.s.b.id(st2.write_id)
        self.s.b.resp(RESP_OKAY)

        data_arr_read.id(st2.read_id)
        data_arr_read.data(data_arr_read_data.data)
        data_arr_read.resp(RESP_OKAY)
        data_arr_read.last(1)

        m = self.m
        m.aw.addr(st2.victim_addr)
        m.aw.id(st2.write_id)
        m.aw.len(0)
        self.axiAddrDefaults(m.aw)

        m.w.data(data_arr_read_data.data)
        m.w.strb(mask(m.w.data._dtype.bit_length() // 8))
        m.w.last(1)

        # flushing needs to have higher priority then read in order
        # to prevent deadlock
        # write replacement after victim load with higher priority
        # else if found just write the data to data array
        is_flush = st2.data_array_op._eq(data_trans_t.write_and_flush)
        contains_write = rename_signal(
            self,
            In(st2.data_array_op, [
                data_trans_t.write, data_trans_t.write_and_flush,
                data_trans_t.read_and_write
            ]), "contains_write")
        contains_read = rename_signal(
            self,
            In(st2.data_array_op, [
                data_trans_t.read, data_trans_t.write_and_flush,
                data_trans_t.read_and_write
            ]), "contains_read")
        contains_read_data = rename_signal(
            self,
            In(st2.data_array_op,
               [data_trans_t.read, data_trans_t.read_and_write]),
            "contains_read_data")

        flush_or_read_node = StreamNode(
            [st2_out, data_arr_read_data, in_w
             ],  # collect read data from data array, collect write data
            [data_arr_read, m.aw, m.w, d_arr_w, self.s.b
             ],  # to read block or to slave connected on "m" interface
            # write data to data array and send write acknowledge
            extraConds={
                data_arr_read_data: contains_read,
                in_w: contains_write,
                data_arr_read: contains_read_data,
                m.aw: is_flush,
                m.w: is_flush,
                d_arr_w: contains_write,
                self.s.b: contains_write,
            },
            skipWhen={
                data_arr_read_data: ~contains_read,
                in_w: ~contains_write,
                data_arr_read: ~contains_read_data,
                m.aw: ~is_flush,
                m.w: ~is_flush,
                d_arr_w: ~contains_write,
                self.s.b: ~contains_write,
            })
        flush_or_read_node.sync()
        m.b.ready(1)

        tag_update.vld(st2_out.vld & contains_write)
        tag_update.delete(0)
        tag_update.way_en(binToOneHot(st2.victim_way))
        tag_update.addr(st2.replacement_addr)
        # [TODO] initial clean
        lru_array_set = self.lru_array.set
        lru_array_set.addr(None)
        lru_array_set.data(None)
        lru_array_set.vld(0)
コード例 #9
0
    def data_array_io(
            self,
            aw_lru_incr: IndexWayHs,  # out
            aw_tagRes: AxiCacheTagArrayLookupResIntf,  # in
            victim_req: AddrHs,
            victim_way: Handshaked,  # out, in
            data_arr_read_req: IndexWayHs,
            data_arr_read: Axi4_r,  # in, out
            data_arr_r_port: BramPort_withoutClk,
            data_arr_w_port: BramPort_withoutClk,  # out, out
            tag_update: AxiCacheTagArrayUpdateIntf  # out
    ):
        """
        :ivar aw_lru_incr: an interface to increment LRU for write channel
        :ivar victim_req: an interface to get a victim from LRU array for a specified index
        :ivar victim_way: return interface for victim_req
        :ivar aw_tagRes: an interface with a results from tag lookup
        :ivar data_arr_read_req: an input interface with read requests from read section
        :ivar data_arr_read: an output interface with a read data to read section
        :ivar data_arr_r_port: read port of main data array
        :ivar data_arr_w_port: write port of main data array
        """
        # note that the lru update happens even if the data is stalled
        # but that is not a problem because it wont change the order of the usage
        # of the cahceline
        self.incr_lru_on_hit(aw_lru_incr, aw_tagRes)

        st0 = self._reg(
            "victim_load_status0",
            HStruct(
                (self.s.aw.id._dtype, "write_id"
                 ),  # the original id and address of a write transaction
                (self.s.aw.addr._dtype, "replacement_addr"),
                (aw_tagRes.TAG_T[aw_tagRes.WAY_CNT], "tags"),
                (BIT, "tag_found"),
                (BIT, "had_empty"),  # had some empty tag
                (aw_tagRes.way._dtype, "found_way"),
                (BIT, "valid"),
            ),
            def_val={
                "valid": 0,
            })
        # resolve if we need to select a victim and optianally ask for it
        st0_ready = self._sig("victim_load_status0_ready")
        has_empty = rename_signal(self,
                                  Or(*(~t.valid for t in aw_tagRes.tags)),
                                  "has_empty")
        If(
            st0_ready,
            st0.write_id(aw_tagRes.id),
            st0.replacement_addr(aw_tagRes.addr),
            st0.tags(aw_tagRes.tags),
            st0.tag_found(aw_tagRes.found),
            st0.found_way(aw_tagRes.way),
            st0.had_empty(has_empty),
            # this register is beeing flushed, the values can become invalid
            # the st0.valid is used to detect this state
            st0.valid(aw_tagRes.vld),
        )
        victim_req.addr(self.parse_addr(aw_tagRes.addr)[1])
        tag_check_node = StreamNode(
            [aw_tagRes], [victim_req],
            skipWhen={
                victim_req: aw_tagRes.vld & (aw_tagRes.found | has_empty)
            },
            extraConds={victim_req: ~aw_tagRes.found & ~has_empty})

        st1_ready = self._sig("victim_load_status1_ready")
        tag_check_node.sync(~st0.valid | st1_ready)
        tag_check_node_ack = rename_signal(self, tag_check_node.ack(),
                                           "tag_check_node_ack")
        st0_ready((st0.valid & tag_check_node_ack & st1_ready) | ~st0.valid
                  | st1_ready)

        victim_load_st = HStruct(
            # and address constructed from an original tag in cache which is beeing replaced
            (self.s.aw.addr._dtype, "victim_addr"),
            # new data to write to data_array
            # (replacement data is still in in_w buffer because it was not consumed
            #  if the tag was not found)
            (aw_tagRes.way._dtype, "victim_way"),
            (self.s.ar.id._dtype, "read_id"),
            (self.s.aw.id._dtype, "write_id"),
            (self.s.aw.addr._dtype, "replacement_addr"
             ),  # the original address used to resolve new tag
            (Bits(2), "data_array_op"),  # type of operation with data_array
        )
        ########################## st1 - pre (read request resolution, victim address resolution) ##############
        d_arr_r, d_arr_w = self.instantiate_data_array_to_hs(
            data_arr_r_port, data_arr_w_port)

        # :note: flush with higher priority than regular read
        need_to_flush = rename_signal(
            self, st0.valid & (~st0.had_empty & ~st0.tag_found),
            "need_to_flush")

        If(
            need_to_flush,
            d_arr_r.addr.data(
                self.addr_in_data_array(
                    victim_way.data,
                    self.parse_addr(st0.replacement_addr)[1])),
        ).Else(
            d_arr_r.addr.data(
                self.addr_in_data_array(data_arr_read_req.way,
                                        data_arr_read_req.index)))
        _victim_way = self._sig("victim_way_tmp", Bits(log2ceil(self.WAY_CNT)))
        _victim_tag = self._sig("victim_tag_tmp", Bits(self.TAG_W))
        SwitchLogic(
            [
                # select first empty tag
                (~tag.valid, [
                    _victim_way(i),
                    _victim_tag(tag.tag),
                ]) for i, tag in enumerate(st0.tags)
            ],
            default=[
                # select an victim specified by victim_way
                _victim_way(victim_way.data),
                SwitchLogic([(victim_way.data._eq(i), _victim_tag(tag.tag))
                             for i, tag in enumerate(st0.tags)],
                            default=_victim_tag(None))
            ])
        victim_load_status = HObjList(
            HandshakedReg(HsStructIntf) for _ in range(2))
        for i, st in enumerate(victim_load_status):
            st.T = victim_load_st
            if i == 0:
                st.LATENCY = (1, 2)  # to break a ready chain
        self.victim_load_status = victim_load_status

        st1_in = victim_load_status[0].dataIn.data
        # placed between st0, st1
        pure_write = rename_signal(
            self, st0.valid & ~need_to_flush & ~data_arr_read_req.vld,
            "pure_write")
        pure_read = rename_signal(self, ~st0.valid & data_arr_read_req.vld,
                                  "pure_read")
        read_plus_write = rename_signal(
            self, st0.valid & ~need_to_flush & data_arr_read_req.vld,
            "read_plus_write")
        flush_write = rename_signal(
            self, st0.valid & need_to_flush & ~data_arr_read_req.vld,
            "flush_write")
        read_flush_write = rename_signal(
            self, st0.valid & need_to_flush & data_arr_read_req.vld,
            "read_flush_write")  # not dispatched at once

        read_req_node = StreamNode(
            [victim_way, data_arr_read_req],
            [d_arr_r.addr, victim_load_status[0].dataIn],
            extraConds={
                victim_way:
                flush_write | read_flush_write,  # 0
                # only write without flush       not write at all but read request
                data_arr_read_req:
                pure_read | read_plus_write,  # pure_read | read_plus_write, #
                d_arr_r.addr:
                pure_read | read_plus_write | flush_write |
                read_flush_write,  # need_to_flush | data_arr_read_req.vld, # 1
                # victim_load_status[0].dataIn: st0.valid | data_arr_read_req.vld,
            },
            skipWhen={
                victim_way: pure_write | pure_read | read_plus_write,
                data_arr_read_req: pure_write | flush_write | read_flush_write,
                d_arr_r.addr: pure_write,
            })
        read_req_node.sync()
        st1_ready(victim_load_status[0].dataIn.rd & read_req_node.ack())

        st1_in.victim_addr(
            self.deparse_addr(_victim_tag,
                              self.parse_addr(st0.replacement_addr)[1], 0))
        st1_in.victim_way(st0.tag_found._ternary(st0.found_way, _victim_way)),
        st1_in.read_id(data_arr_read_req.id)
        st1_in.write_id(st0.write_id)
        st1_in.replacement_addr(st0.replacement_addr)
        If(pure_write, st1_in.data_array_op(data_trans_t.write)).Elif(
            pure_read, st1_in.data_array_op(data_trans_t.read)).Elif(
                read_plus_write,
                st1_in.data_array_op(data_trans_t.read_and_write)
            ).Else(  # .Elif(flush_write | read_flush_write,
                st1_in.data_array_op(data_trans_t.write_and_flush))
        # If(st0.valid,
        #    If(need_to_flush,
        #        st1_in.data_array_op(data_trans_t.write_and_flush)
        #    ).Elif(st0.tag_found & data_arr_read_req.vld,
        #        st1_in.data_array_op(data_trans_t.read_and_write)
        #    ).Else(
        #        st1_in.data_array_op(data_trans_t.write)
        #    )
        # ).Else(
        #    st1_in.data_array_op(data_trans_t.read)
        # )

        victim_load_status[1].dataIn(victim_load_status[0].dataOut)

        self.flush_or_read_node(d_arr_r, d_arr_w,
                                victim_load_status[1].dataOut, data_arr_read,
                                tag_update)
コード例 #10
0
    def main_pipeline(self):
        PIPELINE_CONFIG = self.PIPELINE_CONFIG
        self.pipeline = pipeline = [
            OOOOpPipelineStage(i, f"st{i:d}", self)
            for i in range(PIPELINE_CONFIG.WAIT_FOR_WRITE_ACK + 1)
        ]

        state_read = self.state_array.port[1]
        self.collision_detector(pipeline)
        HAS_TRANS_ST = self.TRANSACTION_STATE_T is not None

        for i, st in enumerate(pipeline):
            if i > 0:
                st_prev = pipeline[i - 1]

            if i < len(pipeline) - 1:
                st_next = pipeline[i + 1]

            # :note: pipeline stages described in PIPELINE_CONFIG enum
            if i == PIPELINE_CONFIG.READ_DATA_RECEIVE:
                # :note: we can not apply forward write data there because we do not know the original address yet
                r = self.m.r
                state_read.addr(r.id)
                st.addr = state_read.dout[self.MAIN_STATE_INDEX_WIDTH:]
                if HAS_TRANS_ST:
                    low = self.MAIN_STATE_INDEX_WIDTH
                    st.transaction_state = state_read.dout[:low]._reinterpret_cast(self.TRANSACTION_STATE_T)

                r.ready(st.in_ready)
                st.in_valid(r.valid)
                st.out_ready(st_next.in_ready)
                state_read.en(st.load_en)
                If(st.load_en,
                    st.id(r.id),
                    self.data_load(r, st),
                )

            elif i <= PIPELINE_CONFIG.STATE_LOAD:
                If(st.load_en,
                    st.id(st_prev.id),
                    st.addr(st_prev.addr),
                    self.propagate_trans_st(st_prev, st),
                )
                self.apply_data_write_forwarding(st, st.load_en)
                st.in_valid(st_prev.valid)
                st.out_ready(st_next.in_ready)

            elif i == PIPELINE_CONFIG.WRITE_BACK:
                If(st.load_en,
                    st.id(st_prev.id),
                    st.addr(st_prev.addr),
                    self.propagate_trans_st(st_prev, st),
                )
                self.apply_data_write_forwarding(st, st.load_en, self.main_op)
                aw = self.m.aw
                w = self.m.w

                cancel = rename_signal(self, self.write_cancel(st), "write_back_cancel")
                st.in_valid(st_prev.valid)
                st.out_ready(st_next.in_ready & ((aw.ready & w.ready) | cancel))

                StreamNode(
                    [], [aw, w],
                    extraConds={
                        aw: st.valid & st_next.in_ready & ~cancel,
                        w: st.valid & st_next.in_ready & ~cancel
                    },
                    skipWhen={
                        aw:cancel,
                        w:cancel,
                    }
                ).sync()

                self._axi_addr_defaults(aw, 1)
                aw.id(st.id)
                aw.addr(Concat(st.addr, Bits(self.ADDR_OFFSET_W).from_py(0)))

                st_data = st.data
                if not isinstance(st_data, RtlSignal):
                    st_data = packIntf(st_data)

                w.data(st_data._reinterpret_cast(w.data._dtype))
                w.strb(mask(self.DATA_WIDTH // 8))
                w.last(1)

            elif i > PIPELINE_CONFIG.WRITE_BACK and i != PIPELINE_CONFIG.WAIT_FOR_WRITE_ACK:
                if i == PIPELINE_CONFIG.WRITE_BACK + 1:
                    st.in_valid(st_prev.valid & ((aw.ready & w.ready) | cancel))
                else:
                    st.in_valid(st_prev.valid)
                st.out_ready(st_next.in_ready)

                If(st.load_en,
                   st.id(st_prev.id),
                   st.addr(st_prev.addr),
                   st.data(st_prev.data),
                   self.propagate_trans_st(st_prev, st),
                )
            elif i == PIPELINE_CONFIG.WAIT_FOR_WRITE_ACK:
                If(st.load_en,
                    st.id(st_prev.id),
                    st.addr(st_prev.addr),
                    self.propagate_trans_st(st_prev, st),
                    st.data(st_prev.data),
                )
                dout = self.dataOut
                b = self.m.b
                confirm = self.ooo_fifo.read_confirm
                cancel = self.write_cancel(st)

                # ommiting st_next.ready as there is no next
                w_ack_node = StreamNode(
                    [b],
                    [dout, confirm],
                    extraConds={
                        dout: st.valid,
                        b: st.valid & ~cancel,
                        confirm: st.valid,
                    },
                    skipWhen={
                        b: st.valid & cancel,
                    }
                )
                w_ack_node.sync()
                st.in_valid(st_prev.valid)
                st.out_ready((b.valid | cancel) & dout.rd & confirm.rd)

                dout.addr(st.addr)
                dout.data(st.data)
                if HAS_TRANS_ST:
                    dout.transaction_state(st.transaction_state)

                confirm.data(st.id)
コード例 #11
0
    def read_request_section(self, read_ack: RtlSignal, item_vld: RtlSignal,
                             waiting_transaction_id: RtlSignal,
                             waiting_transaction_vld: RtlSignal,
                             data_copy_override: VldSynced):
        s = self.s
        m = self.m
        addr_cam = self.addr_cam
        ITEMS = addr_cam.ITEMS
        addr_cam_out = self.add_addr_cam_out_reg(item_vld)

        with self._paramsShared():
            s_ar_tmp = self.s_ar_tmp = AxiSReg(s.AR_CLS)

        last_cam_insert_match = self._reg("last_cam_insert_match",
                                          Bits(ITEMS),
                                          def_val=0)
        match_res = rename_signal(
            self, item_vld & (addr_cam_out.data | last_cam_insert_match)
            & ~waiting_transaction_vld, "match_res")
        blocking_access = rename_signal(
            self, s.ar.valid & (item_vld[s.ar.id] |
                                (s_ar_tmp.dataOut.valid &
                                 (s.ar.id._eq(s_ar_tmp.dataOut.id)))),
            "blocking_access")
        s_ar_node = StreamNode(
            [s.ar],
            [addr_cam.match[0], s_ar_tmp.dataIn],
        )
        s_ar_node.sync(~blocking_access)
        # s_ar_node_ack = s_ar_node.ack() & ~blocking_access
        s_ar_tmp.dataIn(s.ar, exclude={s.ar.valid, s.ar.ready})

        parent_transaction_id = oneHotToBin(self, match_res,
                                            "parent_transaction_id")

        m_ar_node = StreamNode(
            [s_ar_tmp.dataOut, addr_cam_out],
            [m.ar],
            extraConds={m.ar: match_res._eq(0)},
            skipWhen={m.ar: match_res != 0},
        )
        m_ar_node.sync()
        m.ar(s_ar_tmp.dataOut, exclude={m.ar.valid, m.ar.ready})
        addr_cam.match[0].data(s.ar.addr[:self.CACHE_LINE_OFFSET_BITS])
        ar_ack = rename_signal(self, m_ar_node.ack(), "ar_ack")

        # insert into cam on empty position specified by id of this transaction
        acw = addr_cam.write
        acw.addr(s_ar_tmp.dataOut.id)
        acw.data(s_ar_tmp.dataOut.addr[:self.CACHE_LINE_OFFSET_BITS])
        acw.vld(addr_cam_out.vld)
        #If(s_ar_node_ack,
        last_cam_insert_match(
            binToOneHot(
                s_ar_tmp.dataOut.id,
                en=~blocking_access & s.ar.valid & s_ar_tmp.dataOut.valid
                & s_ar_tmp.dataOut.addr[:self.CACHE_LINE_OFFSET_BITS]._eq(
                    s.ar.addr[:self.CACHE_LINE_OFFSET_BITS])))
        #)

        for trans_id in range(ITEMS):
            # it becomes ready if we are requested for it on "s" interface
            this_trans_start = s_ar_tmp.dataOut.id._eq(trans_id) & \
                (data_copy_override.vld | ar_ack)
            # item becomes invalid if we read last data word
            this_trans_end = read_ack & s.r.id._eq(trans_id) & s.r.last
            this_trans_end = rename_signal(self, this_trans_end,
                                           f"this_trans_end{trans_id:d}")
            item_vld[trans_id](apply_set_and_clear(item_vld[trans_id],
                                                   this_trans_start,
                                                   this_trans_end))

            waiting_transaction_start = (ar_ack & (match_res != 0)
                                         & parent_transaction_id._eq(trans_id)
                                         & ~this_trans_end)
            # note: this_trans_end in this context is for parent transactio
            # which was not started just now, so it may be ending just now
            waiting_transaction_start = rename_signal(
                self, waiting_transaction_start,
                f"waiting_transaction_start{trans_id:d}")
            _waiting_transaction_vld = apply_set_and_clear(
                waiting_transaction_vld[trans_id], waiting_transaction_start,
                this_trans_end)
            waiting_transaction_vld[trans_id](rename_signal(
                self, _waiting_transaction_vld,
                f"waiting_transaction_vld{trans_id:d}"))

        If(
            self.clk._onRisingEdge(),
            If((match_res != 0) & ar_ack,
               waiting_transaction_id[parent_transaction_id](
                   s_ar_tmp.dataOut.id)))

        # parent transaction is finishing just now
        # we need to quickly grab the data in data buffer and copy it also
        # for this transaction
        data_copy_override.vld(s_ar_tmp.dataOut.valid & read_ack
                               & (match_res != 0)
                               & s.r.id._eq(parent_transaction_id) & s.r.last)
        data_copy_override.data(s_ar_tmp.dataOut.id)
コード例 #12
0
ファイル: addr_data_hs_to_Axi.py プロジェクト: mfkiwl/hwtLib
    def connect_w(self, s_w: AddrDataHs, axi: Axi4, w_cntr: RtlSignal,
                  CNTR_MAX: int, in_axi_t: HStruct):
        def axi_w_deparser_parametrization(u: AxiS_frameDeparser):
            # [TODO] specify _frames or maxFrameLen if required (AXI3 16beats, AXI4 256)
            u.DATA_WIDTH = axi.DATA_WIDTH
            u.ID_WIDTH = 0

        # component to create a axi-stream like packet from AddrDataHs write data
        w_builder, w_in = AxiSBuilder.deparse(self, in_axi_t, Axi4.W_CLS,
                                              axi_w_deparser_parametrization)
        w_in = w_in.data

        self.addr_defaults(axi.aw)

        if self.data_words_in_axi_word <= 1:
            self.connect_addr(s_w.addr, axi.aw.addr)
            w_in.data(s_w.data, fit=True)
            aw_sn = StreamNode([s_w], [axi.aw, w_in])
        else:
            addr, sub_addr = self.split_subaddr(s_w.addr)
            self.connect_addr(addr, axi.aw.addr)
            w_in._select.data(sub_addr)

            # sel = HsBuilder(self, w_in._select, master_to_slave=False)\
            #     .buff(self.MAX_TRANS_OVERLAP).end
            # sel.data(sub_addr)

            w_reg = HandshakedReg(Handshaked)
            w_reg.DATA_WIDTH = s_w.DATA_WIDTH
            self.w_data_reg = w_reg
            w_reg.dataIn.data(s_w.data)

            aw_sn = StreamNode([s_w], [axi.aw, w_reg.dataIn, w_in._select])

            data_items = [
                getattr(w_in, f"data{i:d}").data
                for i in range(self.data_words_in_axi_word)
            ]
            for w in data_items:
                w.vld(w_reg.dataOut.vld)
                w.data(w_reg.dataOut.data)
                # ready is not important because it is part of  ._select.rd
            w_reg.dataOut.rd(Or(*[d.rd for d in data_items]))

        w_start_en = w_cntr != CNTR_MAX
        aw_sn.sync(w_start_en)
        # s_w.rd(win.rd)
        # axi.aw.valid(s_w.vld & w_start_en & ~waiting_for_w_data & win.rd)
        # win.vld(s_w.vld & w_start_en & axi.aw.ready)

        if hasattr(axi.w, "id"):
            # axi3
            axi.w(w_builder.end, exclude={axi.w.id})
            axi.w.id(0)
        else:
            # axi4
            axi.w(w_builder.end)

        If(axi.aw.ready & axi.aw.valid,
           If(~axi.b.valid, w_cntr(w_cntr + 1))).Elif(axi.b.valid,
                                                      w_cntr(w_cntr - 1))

        axi.b.ready(1)