def chainFrameWords(self): offset = 0 for f in self._frames: wi = 0 for last, (wi, w) in iter_with_last(f.walkWords(showPadding=True)): yield (offset + wi, w, last) offset += wi + 1
def packAxiSFrame(dataWidth, structVal, withStrb=False): """ pack data of structure into words on axis interface """ if withStrb: byte_cnt = dataWidth // 8 words = iterBits(structVal, bitsInOne=dataWidth, skipPadding=False, fillup=True) for last, d in iter_with_last(words): assert d._dtype.bit_length() == dataWidth, d._dtype.bit_length() if withStrb: word_mask = 0 for B_i in range(byte_cnt): m = get_bit_range(d.vld_mask, B_i * 8, 8) if m == 0xff: word_mask = set_bit(word_mask, B_i) else: assert m == 0, ("Each byte has to be entirely valid" " or entirely invalid," " because of mask granularity", m) yield (d, word_mask, last) else: yield (d, last)
def spotNewTransaction(sim, onDone): try: id_, words = next(tIt) except StopIteration: return magic = MAGIC * id_ initValues = [magic + i for i in range(words)] memPtr = m.calloc(words, WORD_SIZE, initValues=initValues) expected_data.append(initValues) self.expected_id = id_ r.ar_aw_w_id.write(id_) r.addr.write(memPtr) r.burst.write(BURST_INCR) r.cache.write(CACHE_DEFAULT) r.len.write(words - 1) r.lock.write(LOCK_DEFAULT) r.prot.write(PROT_DEFAULT) r.size.write(BYTES_IN_TRANS(WORD_SIZE)) r.qos.write(0) self.wordIt = iter_with_last(initValues) r.cmd_and_status.write(SEND_AR, onDone=onDone)
def __repr__(self, offset: int = 0): offsetStr = "".join([" " for _ in range(offset)]) try: name = self.origin.name except AttributeError: name = None if name: name = " name:%s," % name else: name = "" s = "%s<TransTmpl%s start:%d, end:%d" % (offsetStr, name, self.bitAddr, self.bitAddrEnd) if isinstance(self.dtype, HArray): s += ", itemCnt:%d" % (self.itemCnt) + "\n" s += self.children.__repr__(offset=offset + 1) + "\n" s += offsetStr + ">" return s elif not self.children: return s + ">" buff = [ s, ] for isLast, child in iter_with_last(self.children): buff.append(child.__repr__(offset=offset + 1)) if self.childrenAreChoice and not isLast: buff.append(offsetStr + " <OR>") buff.append(offsetStr + ">") return "\n".join(buff)
def __repr__(self, offset: int = 0): offsetStr = "".join([" " for _ in range(offset)]) try: name = self.origin[-1].name except (AttributeError, IndexError): name = None if name: name = f" name:{name:s}," else: name = "" s = f"{offsetStr:s}<TransTmpl{name} start:{self.bitAddr:d}, end:{self.bitAddrEnd:d}" if isinstance(self.dtype, (HArray, HStream)): s_buff = [ s, f", itemCnt:{self.itemCnt:d}\n", self.children.__repr__(offset=offset + 1), "\n", offsetStr, ">" ] return "".join(s_buff) elif not self.children: return s + ">" buff = [ s, ] for isLast, child in iter_with_last(self.children): buff.append(child.__repr__(offset=offset + 1)) if self.childrenAreChoice and not isLast: buff.append(offsetStr + " <OR>") buff.append(offsetStr + ">") return "\n".join(buff)
def data_transaction(self, id_, data): transactions = [] DW = self.u.s[0].w.DATA_WIDTH m = mask(DW // 8) for is_last, d in iter_with_last(data): transactions.append((d, m, int(is_last))) return transactions
def test_non_mergable_no_ack(self, N=10, randomized=False): u = self.u u.w._ag.data.extend( (i, 10 + i, mask(u.CACHE_LINE_SIZE)) for i in range(N)) if randomized: self.randomize_all() self.runSim((N + 10) * 2 * CLK_PERIOD * u.BUS_WORDS_IN_CACHE_LINE) SIZE = 2**u.ID_WIDTH aw = u.m.aw._ag self.assertValSequenceEqual(aw.data, [ aw.create_addr_req(addr=u.CACHE_LINE_SIZE * i, _len=u.BUS_WORDS_IN_CACHE_LINE - 1, _id=i) for i in range(min(SIZE, N)) ]) w_ref = [] for i in range(min(SIZE, N)): for last, w_i in iter_with_last(range(u.BUS_WORDS_IN_CACHE_LINE)): d = (10 + i if w_i == 0 else 0, mask(u.DATA_WIDTH // 8), int(last)) w_ref.append(d) # for i, (x0, x1) in enumerate(zip(u.m.w._ag.data, w_ref)): # print(i, allValuesToInts(x0), x1) self.assertValSequenceEqual(u.m.w._ag.data, w_ref) # 1 item is currently handled by agent, 1 item in tmp reg self.assertEqual(len(u.w._ag.data), N - SIZE - 1 - 1)
def _impl(self): w = self.port[0] ram_w = self.ram.port[0] # True if each byte of the mask is 0xff or 0x00 we_bytes = list(iterBits(w.we, bitsInOne=8, fillup=True)) # cut off padding we_for_we_bytes = [] for last, b in iter_with_last(we_bytes): if last and self.MASK_PADDING_W: mask_rem_w = self.MASK_W % 8 b = b[mask_rem_w:] we_for_we_bytes.append(b != 0) we_for_we_bytes = rename_signal( self, Concat(*[ b | ~w.do_accumulate | w.do_overwrite for b in reversed(we_for_we_bytes) ]), "we_for_we_bytes") preload = self._reg("preload", def_val=0) If(w.en.vld, preload(~preload & w.do_accumulate & ~w.do_overwrite)) w.en.rd(~w.do_accumulate | w.do_overwrite | preload) ram_w.addr(w.addr) ram_w.en(w.en.vld & (w.do_overwrite | ~w.do_accumulate | preload)) ram_w.we(Concat(w.we, we_for_we_bytes)) w_mask = w.we if self.MASK_PADDING_W: w_mask = Concat(Bits(self.MASK_PADDING_W).from_py(0), w_mask) is_first_read_port = True for ram_r, r in zip(self.ram.port[1:], self.port[1:]): if is_first_read_port: w_mask = preload._ternary( w_mask | ram_r.dout[self.MASK_PADDING_W + self.MASK_W:], w_mask) w_mask = rename_signal(self, w_mask, "w_mask") ram_w.din(Concat(w.din, w_mask)) will_preload_for_accumulate = rename_signal( self, w.en.vld & w.do_accumulate & ~w.do_overwrite, "will_preload_for_accumulate") ram_r.addr(will_preload_for_accumulate._ternary( w.addr, r.addr)) ram_r.en(will_preload_for_accumulate | r.en.vld) # [TODO] check if r.en.rd is according to spec r.en.rd(~will_preload_for_accumulate | preload) is_first_read_port = False else: ram_r.addr(r.addr) ram_r.en(r.en.vld) r.en.rd(1) r.dout(ram_r.dout[:self.MASK_PADDING_W + self.MASK_W]) r.dout_mask(ram_r.dout[self.MASK_W:]) propagateClkRstn(self)
def test_rx(self): N = 10 data = [i for i in range(N)] u = self.u u.eth.rx._ag._append_frame(data) self.runSim(self.CLK * 100) self.assertValSequenceEqual( u.rx._ag.data, [(d, 0, int(last)) for last, d in iter_with_last(data)])
def is_footer_mask_set_values(self, LOOK_AHEAD, regs): D_W = self.DATA_WIDTH BYTE_CNT = D_W // 8 FOOTER_WIDTH = self.FOOTER_WIDTH din = self.dataIn if self.USE_KEEP: in_mask = din.keep elif self.USE_STRB: in_mask = din.strb elif self.DATA_WIDTH == 8: in_mask = BIT.from_py(1, 1) else: raise NotImplementedError( "keep/strb can be ignored only for DATA_WIDTH=8") set_is_footer = self._sig("set_is_footer") set_is_footer(din.valid & din.last) mask_cases = [] for last_B_valid, bytes_in_last_input_word in iter_with_last( range(1, BYTE_CNT + 1)): footer_end = (LOOK_AHEAD * BYTE_CNT + bytes_in_last_input_word) * 8 footer_start = footer_end - FOOTER_WIDTH assert footer_start > 0, ( "otherwise we would not be able to send last for previous frame", footer_start) assert footer_start < D_W * 3, ( "footer start can appear only in last-1 or last-2 regster," " last register is output register", footer_start, D_W) _is_footer = set_bit_range(0, footer_start // 8, FOOTER_WIDTH // 8, mask(FOOTER_WIDTH // 8)) set_flags = [] for i, (_, _, is_footer_set_val, _, _) in enumerate(regs): if i == 0: is_footer_val = 0 is_footer_val_last_word = get_bit_range( _is_footer, (LOOK_AHEAD - i) * BYTE_CNT, BYTE_CNT) set_flags.append( If(set_is_footer, is_footer_set_val(is_footer_val_last_word)).Else( is_footer_set_val(is_footer_val))) else: is_footer_val = get_bit_range( _is_footer, (LOOK_AHEAD - i + 1) * BYTE_CNT, BYTE_CNT) set_flags.append(is_footer_set_val(is_footer_val)) if last_B_valid: # last byte also valid mask_default = set_flags else: # last 0 from the end of the validity mask mask_cases.append( (~in_mask[bytes_in_last_input_word], set_flags)) SwitchLogic(mask_cases, mask_default) return set_is_footer
def create_w_frame(self, words: List[int]): """ :param words: list of frame data words """ strb = mask(self.u.DATA_WIDTH // 8) axis_data = [] assert words, words for last, w in iter_with_last(words): axis_data.append((w, strb, int(last))) return axis_data
def test_commited_on_end(self): u = self.u u.dataIn_discard._ag.data.append(0) N = self.ITEMS - 1 # N = 60 ref_data = [(i + 1, int(last)) for last, i in iter_with_last(range(N))] u.dataIn._ag.data.extend(ref_data) self.runSim((2 * N + 10) * CLK_PERIOD) self.assertValSequenceEqual(u.dataOut._ag.data, ref_data)
def is_mask_byte_unaligned(mask_signal: RtlSignal) -> RtlSignal: # True if each byte of the mask is all 0 or all 1 we_bytes = list(iterBits(mask_signal, bitsInOne=8, fillup=True)) write_mask_not_aligned = [] for last, b in iter_with_last(we_bytes): if last: # cut off padding if required mask_rem_w = mask_signal._dtype.bit_length() % 8 if mask_rem_w: b = b[mask_rem_w:] write_mask_not_aligned.append((b != 0) & (b != mask(b._dtype.bit_length()))) return Or(*write_mask_not_aligned)
def _get(self, o: Union[Tuple[Unit, RtlSignal, Interface], iHdlConstrain], only_first=False): """ :param only_first: if true select only first bit from vector, else select whole vector """ if isinstance(o, iHdlConstrain): return self.visit_iHdlConstrain(o) is_reg = False _o = o[-1] if isinstance(_o, RtlSignal): q = "get_cells" for d in _o.drivers: if d._event_dependent_from_branch is not None: is_reg = True elif isinstance(_o, Interface): q = "get_pins" else: raise NotImplementedError(o) w = self.out.write w(q) w(" -hier -filter {NAME =~ */") path = o # [TODO] find out how to make select with ip top module/entity name for last, p in iter_with_last(islice(path, 1, None)): if isinstance(p, Unit): w(p._name) w("_inst") elif isinstance(p, RtlSignal): w(p.name) elif isinstance(p, Interface): w(p._name) else: raise NotImplementedError(p) if not last: w("/") t = _o._dtype if is_reg: w("_reg") if isinstance(t, Bits) and (t.bit_length() > 1 or t.force_vector): # * on end because of Vivado _replica if only_first: w("[0]*") else: w("[*]*") w("}")
def _impl(self): V = self.VAL VAL_W = self.VAL._dtype.bit_length() D_W = self.DATA_WIDTH if not V._is_full_valid(): raise NotImplementedError() din = self.dataIn dout = self.dataOut if VAL_W <= D_W: # do comparison in single word dout.data(din.data[VAL_W:]._eq(V)) StreamNode([din], [dout]).sync() else: # build fsm for comparing word_cnt = ceil(VAL_W / D_W) word_index = self._reg("word_index", Bits(log2ceil(word_cnt - 1)), def_val=0) # true if all previous words were matching state = self._reg("state", def_val=1) offset = 0 word_cases = [] for is_last_word, i in iter_with_last(range(word_cnt)): val_low = offset val_high = min(offset + D_W, VAL_W) in_high = val_high - val_low state_update = din.data[in_high:]._eq(V[val_high:val_low]) if is_last_word: dout.data(state & state_update) else: word_cases.append((i, state(state & state_update))) If(StreamNode([din], [dout]).ack(), If(din.last, word_index(0), state(1), ).Else( word_index(word_index + 1), Switch(word_index)\ .add_cases(word_cases) ) ) StreamNode([din], [dout], extraConds={ dout: din.valid & din.last }, skipWhen={ dout: ~(din.valid & din.last) }).sync()
def generate_regs( self, LOOK_AHEAD ) -> List[Tuple[RtlSignal, RtlSignal, RtlSignal, RtlSignal, RtlSignal]]: din = self.dataIn mask_t = Bits(self.DATA_WIDTH // 8, force_vector=True) data_fieds = [ (din.data._dtype, "data"), # flag for end of input frame (BIT, "last"), (BIT, "valid"), ] if self.USE_KEEP: data_fieds.append((mask_t, "keep")) if self.USE_STRB: data_fieds.append((mask_t, "strb")) reg_t = HStruct(*data_fieds) regs = [] # 0 is dataIn, 1 is connected to dataIn, ..., n connected to dataOut for last, i in iter_with_last(range(LOOK_AHEAD + 1 + 1)): if i == 0: r = din ready = din.ready can_flush = BIT.from_py(0) is_footer = self._sig("dataIn_is_footer", mask_t) is_footer_set_val = is_footer # because it is always set elif last: r = self._reg("out_reg", reg_t, def_val={"valid": 0}) ready = self._sig("out_reg_ready") can_flush = self._sig("out_reg_can_flush") is_footer = self._reg("out_reg_is_footer", mask_t, def_val=0) is_footer_set_val = self._sig("out_reg_is_footer_set_val", mask_t) else: r = self._reg(f"r{i:d}", reg_t, def_val={"valid": 0}) ready = self._sig(f"r{i:d}_ready") can_flush = self._sig(f"r{i:d}_can_flush") is_footer = self._reg(f"r{i:d}_is_footer", mask_t, def_val=0) is_footer_set_val = self._sig(f"r{i:d}_is_footer_set_val", mask_t) # :var ready: signal which is 1 if this register can accept data # :var can_flush: tells if this register can pass it's value to next # even if prev does not contain valid data # and the hole in data may be created # :var is_footer: mask with 1 for footer bytes # :var is_footer_set_val: value of is_footer which will be set if end # of frame is detected regs.append((r, is_footer, is_footer_set_val, can_flush, ready)) return regs
def test_tx(self): N = 10 data = [i for i in range(N)] expected = [ *[int(ETH.PREAMBLE_1B) for _ in range(7)], int(ETH.SFD), *data, ] u = self.u u.tx._ag.data.extend([(d, last) for last, d in iter_with_last(data)]) self.runSim(self.CLK * 200) self.assertEmpty(u.eth.tx._ag.data) self.assertEqual(len(u.eth.tx._ag.frames), 1) self.assertValSequenceEqual(u.eth.tx._ag.frames[0], expected)
def _impl(self): LATENCY = self.LATENCY DELAY = self.DELAY vld = self.get_valid_signal rd = self.get_ready_signal data = self.get_data Out = self.dataOut In = self.dataIn if LATENCY == (1, 2): # ready chain break if DELAY != 0: raise NotImplementedError() in_vld, in_rd, in_data = vld(In), rd(In), data(In) out_vld, out_rd = vld(Out), rd(Out) outData = self._implReadyChainBreak(in_vld, in_rd, in_data, out_vld, out_rd, "ready_chain_break_") elif DELAY == 0: in_vld, in_rd, in_data = vld(In), rd(In), data(In) for last, i in iter_with_last(range(LATENCY)): if last: out_vld, out_rd = vld(Out), rd(Out) else: out_vld = self._sig("latency%d_vld" % (i + 1)) out_rd = self._sig("latency%d_rd" % (i + 1)) outData = self._impl_latency(in_vld, in_rd, in_data, out_vld, out_rd, f"latency{i:d}_") in_vld, in_rd, in_data = out_vld, out_rd, outData elif LATENCY == 2 and DELAY == 1: latency1_vld = self._sig("latency1_vld") latency1_rd = self._sig("latency1_rd") outData = self._impl_latency(vld(In), rd(In), data(In), latency1_vld, latency1_rd, "latency1_") outData = self._implLatencyAndDelay(latency1_vld, latency1_rd, outData, vld(Out), rd(Out), "latency2_delay1_") else: raise NotImplementedError(LATENCY, DELAY) for ds, dm in zip(data(Out), outData): ds(dm)
def packAxiSFrame(dataWidth, structVal, withStrb=False): """ pack data of structure into words on axis interface """ if withStrb: maskAll = mask(dataWidth // 8) words = iterBits(structVal, bitsInOne=dataWidth, skipPadding=False, fillup=True) for last, d in iter_with_last(words): assert d._dtype.bit_length() == dataWidth, d._dtype.bit_length() if withStrb: # [TODO] mask in last resolved from size of datatype, mask for padding yield (d, maskAll, last) else: yield (d, last)
def test_single_write(self): u = self.u d = int_list_to_int(range(u.CACHE_LINE_SIZE), 8) u.w._ag.data.append((1, d, mask(u.CACHE_LINE_SIZE))) self.runSim(10 * CLK_PERIOD) aw = u.m.aw._ag self.assertValSequenceEqual(aw.data, [ aw.create_addr_req(addr=1 * u.CACHE_LINE_SIZE, _len=u.BUS_WORDS_IN_CACHE_LINE - 1, _id=0), ]) self.assertValSequenceEqual( u.m.w._ag.data, [(get_bit_range(d, u.DATA_WIDTH * i, u.DATA_WIDTH), mask(u.DATA_WIDTH // 8), int(last)) for last, i in iter_with_last(range(u.BUS_WORDS_IN_CACHE_LINE))])
def generate_in_axi_type(self): s_w = self.s_w axi = self.m # type used to describe how to build and parse axi-s frames DW = ceil(s_w.DATA_WIDTH / 8) * 8 data_words_in_axi_word = axi.DATA_WIDTH // DW if data_words_in_axi_word <= 1: data_type = HStruct((Bits(DW), "data")) else: assert data_words_in_axi_word > 1, data_words_in_axi_word data_fields = [] for last, i in iter_with_last(range(data_words_in_axi_word)): prefix, suffix = (), () if i != 0: prefix = ((Bits(i * DW), None), ) if not last: suffix = ((Bits(axi.DATA_WIDTH - ((i + 1) * DW)), None), ) data_fields.append((HStruct(*prefix, (Bits(DW), "data"), *suffix), f"data{i:d}")) data_type = HStruct( # union with member for each data position in axi word (HUnion(*data_fields), 'data')) return data_type, data_words_in_axi_word
def getFrames(self): u = self.u sizes = u.sizes._ag.data data = iter(u.dataOut._ag.data) size_of_word = self.DATA_WIDTH // 8 frames = [] for s in sizes: _s = int(s) words = _s // size_of_word if _s / size_of_word - words > 0.0: words += 1 frame = [] for last, (_d, _mask, _last) in iter_with_last(take(data, words)): self.assertValEqual(_last, last) _mask = int(_mask) _d.val = mask_bytes(_d.val, _mask, size_of_word) _d.vldMask = mask_bytes(_d.vldMask, _mask, size_of_word) frame.append(int(_d)) frames.append(frame) return frames
def input_B_dst_to_fsm(word_bytes: int, input_cnt: int, input_B_dst: List[List[Set[Tuple[Tuple[int, int], int, int, int]]]]): """ :param word_bytes: number of bytes in output word :param input_cnt: number of input streams :param input_B_dst: list with mapping of input bytes to a output bytes in each state .. code-block:: Format of input_B_dst is: List for each input in this list there are lists for each input byte in this list there are sets of byte destinations for each input byte byte destination is a tuple: state label, input index, time index, output byte index, input last flag :note: input_B_dst is produced by :func:`hwtLib.amba.axis_comp.frame_utils.join.FrameJoinUtils.resolve_input_bytes_destinations` """ # label: StateTransInfo sub_states = {} # create substates from input byte mux info for in_i, in_word_dst in enumerate(input_B_dst): for in_B_i, in_B_dsts in enumerate(in_word_dst): for (st_label, in_B_time, out_B_i, B_from_last_input_word) in in_B_dsts: st = sub_states.get(st_label, None) if st is None: st = StateTransInfo(st_label, word_bytes, input_cnt) sub_states[st_label] = st st.set_output(out_B_i, in_i, in_B_time, in_B_i, B_from_last_input_word) # resolve max lookahead for each input max_lookahead_for_input = [0 for _ in range(input_cnt)] for in_i, in_word_dst in enumerate(input_B_dst): for in_B_i, in_B_dsts in enumerate(in_word_dst): for st_label, in_B_time, out_B_i, _ in in_B_dsts: max_lookahead_for_input[in_i] = max( max_lookahead_for_input[in_i], in_B_time) # build fsm state_cnt = input_cnt tt = StateTransTable(word_bytes, max_lookahead_for_input, state_cnt) states_for_relict_processing = [] # for all possible in/out configurations for ss in sorted(sub_states.values(), key=lambda x: x.label): st_i = get_state_i(ss) next_ss = get_next_substate(sub_states, ss) if next_ss is None: next_st_i = 0 else: next_st_i = get_state_i(next_ss) tr = StateTransItem(tt) tt.state_trans[st_i].append(tr) tr.state = st_i tr.state_next = next_st_i tr.last = int(next_ss is None) o_prev = None for last, (out_B_i, o) in iter_with_last(enumerate(ss.outputs)): if o is None: o_prev = o # output byte is disconnected, which is default state continue # in_i - input stream index # in_t - input time (register index) (in_i, in_t, in_B_i, is_from_last_input_word) = o in_rec = tr.input[in_i][in_t] # vld, keep required as we are planing to use this byte in output in_rec.keep[in_B_i] = 1 in_rec.last = is_from_last_input_word tr.out_byte_mux_sel[out_B_i] = (in_i, in_t, in_B_i) tr.input_rd[in_i] = 1 # next keep = 0 because this byte will be consumed tr.input_keep_mask[in_i][in_t][in_B_i] = 0 tr.output_keep[out_B_i] = 1 is_first_input_byte = is_from_different_input(o_prev, o) # is last byte from input byte in this output word if last: o_next = next_ss.outputs[0] if next_ss is not None else None else: o_next = ss.outputs[out_B_i + 1] if o_next is not None: assert o[0] <= o_next[0] is_input_word_continuing_in_next_out_word = last \ and next_ss is not None \ and is_next_byte_from_same_input(o, o_next)\ and in_B_i != word_bytes - 1 if is_input_word_continuing_in_next_out_word: assert next_ss is not None states_for_relict_processing.append(next_ss) is_last_input_byte = is_from_different_input(o, o_next) if is_last_input_byte: assert not is_input_word_continuing_in_next_out_word if is_first_input_byte and in_B_i != 0: # mark leading zero for i in range(0, in_B_i): in_rec.keep[i] = 0 if (is_last_input_byte \ or is_input_word_continuing_in_next_out_word\ or last) \ and ( not (is_from_last_input_word \ and is_last_input_byte \ and in_B_i == word_bytes - 1)): # mark keep for next input byte if not is_from_last_input_word or is_input_word_continuing_in_next_out_word: # the next input byte is present because we are not in last input word # or this may be a last word but it is not fully consumed next_B_keep = 1 else: # no more bytes from this input stream next_B_keep = 0 if in_B_i == word_bytes - 1: # because pipeline will shift next time in_t += 1 input_val = tr.input[in_i] if in_t < len(input_val): next_keep = input_val[in_t].keep next_keep[(in_B_i + 1) % word_bytes] = next_B_keep o_prev = o # if we are checking the input keep==0 set keep_mask=0 as well # (not required, to make clear that the byte will not be used in code) for in_meta, in_keep_mask in zip(tr.input, tr.input_keep_mask): for in_i, in_inputs in enumerate(in_meta): for B_i, k in enumerate(in_inputs.keep): if k is not None and k == 0: in_keep_mask[in_i][B_i] = 0 # mark relict flag first_input_is_relict = ss in states_for_relict_processing for o in ss.outputs: if o is None: # skip start padding continue (in_i, in_t, in_B_i, _) = o v = tr.input[in_i][in_t] if v.last: # relict flag matters only for word with last flag set # because it is used to distinguis starts of single word frames # where only part of the word can be consumed to a output word v.relict = int(first_input_is_relict) break tt.filter_unique_state_trans() tt.assert_transitions_deterministic() return tt
def data_transaction(self, id_, data): r_transactions = [] for is_last, d in iter_with_last(data): r_transactions.append((id_, d, RESP_OKAY, int(is_last))) return r_transactions
def _impl(self): USE_KEEP = self.USE_KEEP USE_STRB = self.USE_STRB FOOTER_WIDTH = self.FOOTER_WIDTH D_W = self.DATA_WIDTH LOOK_AHEAD = ceil(FOOTER_WIDTH / D_W) if FOOTER_WIDTH % 8 != 0: raise NotImplementedError() if not (self.USE_KEEP or self.USE_STRB): assert D_W == 8, ("AxiStream is configured not to use KEEP/STRB" " but is required to resolve frame end", D_W) dout = self.dataOut regs = self.generate_regs(LOOK_AHEAD) self.flush_en_logic(regs) # resolve footer flags set_is_footer = self.is_footer_mask_set_values(LOOK_AHEAD, regs) din = self.dataIn # connect inputs/outputs of registers, dataIn, dataOut for is_last, (i, (r, is_footer, is_footer_set_val, can_flush, ready)) in\ iter_with_last(enumerate(regs)): # :note: reg[0] is dataIn, reg[-1] is out_reg if is_last: # connect last register to outputs prev_r, prev_is_footer, _, _, _ = regs[i - 1] en = rename_signal(self, din.valid | can_flush, "out_en") # at least starts with non footer data d0_en = rename_signal(self, ~is_footer[0], "d0_en") # contains at least some footer data d1_en = rename_signal(self, is_footer != 0, "d1_en") # connect prefix data dout[0].data(r.data) # last if this word contains footer, or next word does not contains data d0_last_word_in_last_r = prev_r.valid & prev_is_footer[0] dout[0].last((is_footer != 0) | d0_last_word_in_last_r) dout[0].valid(r.valid & d0_en & en & (~d1_en | dout[1].ready)) # connect footer dout[1].data(r.data) dout[1].last(r.last) dout[1].valid(r.valid & d1_en & en & (~d0_en | dout[0].ready)) mask0 = ~is_footer mask1 = is_footer if USE_KEEP: dout[0].keep(r.keep & mask0) dout[1].keep(r.keep & mask1) if USE_STRB: dout[0].strb(r.strb & mask0) dout[1].strb(r.strb & mask1) ready(~r.valid | ((dout[0].ready | ~d0_en) & (dout[1].ready | ~d1_en))) else: # connect only ready, because inputs of next register # are connected by next register next_ready = regs[i + 1][-1] if i == 0: # dataIn is actually not a register # and we do not need any extra check ready(next_ready) else: ready(~r.valid | next_ready) if i > 0: # connect register inputs, skip 0 because it is dataIn itself prev_r, prev_is_footer, _, prev_can_flush, _ = regs[i - 1] data_feed = [] if USE_KEEP: data_feed.append(r.keep(prev_r.keep)) if USE_STRB: data_feed.append(r.strb(prev_r.strb)) prev_r_vld = prev_r.valid & (din.valid | prev_can_flush) If( ready, If( prev_r_vld | can_flush, r.data(prev_r.data), r.last(prev_r.last), *data_feed, ), If( set_is_footer & ~(prev_r.valid & prev_r.last), is_footer(is_footer_set_val), r.valid(prev_r.valid), ).Elif( prev_r_vld, is_footer(prev_is_footer), r.valid(prev_r.valid), ).Elif( can_flush, is_footer(0), r.valid(0), ))
def chainFrameWords(self): offset = 0 for f in self._frames: for last, (wi, w) in iter_with_last(f.walkWords(showPadding=True)): yield (offset + wi, w, last) offset += wi + 1
def formatStream(self, data): strb = self.m return [(d, strb, last) for last, d in iter_with_last(data)]
def _impl(self): mask_t = Bits(self.DATA_WIDTH // 8, force_vector=True) data_fieds = [ (Bits(self.DATA_WIDTH), "data"), (mask_t, "keep"), # valid= keep != 0 (BIT, "relict"), # flag for partially consumed word (BIT, "last"), # flag for end of frame ] if self.USE_STRB: data_fieds.append((mask_t, "strb"), ) data_t = HStruct(*data_fieds) # regs[0] connected to output as first, regs[-1] connected to input regs = [ self._reg(f"r{r_i:d}", data_t, def_val={ "keep": 0, "last": 0, "relict": 0 }) for r_i in range(self.REG_CNT) ] ready = self.ready keep_masks = self.keep_masks fully_consumed_flags = [] for i, r in enumerate(regs): _fully_consumed = rename_signal(self, (r.keep & keep_masks[i])._eq(0), f"r{i:d}_fully_consumed") fully_consumed_flags.append(_fully_consumed) for i, (is_first_on_input_r, r) in enumerate(iter_with_last(regs)): keep_mask_all = mask(r.keep._dtype.bit_length()) prev_keep_mask = self._sig(f"prev_keep_mask_{i:d}_tmp", r.keep._dtype) prev_last_mask = self._sig(f"prev_last_mask_{i:d}_tmp") if is_first_on_input_r: # is register connected directly to dataIn r_prev = self.dataIn If( r_prev.valid, prev_keep_mask(keep_mask_all), prev_last_mask(1) ).Else( # flush (invalid input but the data can be dispersed # in registers so we need to collapse it) prev_keep_mask(0), prev_last_mask(0), ) if self.REG_CNT > 1: next_empty = regs[i - 1].keep._eq(0) else: next_empty = 0 whole_pipeline_shift = ( ready & (regs[0].keep & self.keep_masks[0])._eq(0)) r_prev.ready( r.keep._eq(0) # last input reg empty | whole_pipeline_shift | next_empty) else: r_prev = regs[i + 1] prev_last_mask(1) If( r.keep._eq(0), # flush prev_keep_mask(keep_mask_all), ).Else(prev_keep_mask(keep_masks[i + 1]), ) data_drive = [ r.data(r_prev.data), ] if self.USE_STRB: data_drive.append(r.strb(r_prev.strb)) is_empty = r.keep._eq(0) fully_consumed = fully_consumed_flags[i] if i == 0: # last register in path If( (ready & fully_consumed) | is_empty, *data_drive, r.keep(r_prev.keep & prev_keep_mask), r.last(r_prev.last & prev_last_mask), r.relict( 0 if is_first_on_input_r else # [TODO] potentially it should not be keep[0] but fist keep with 1 r_prev.relict | (r_prev.last & (r_prev.keep[0] & ~keep_masks[i + 1][0] & ~fully_consumed_flags[i + 1]))) ).Elif( ready, r.keep(r.keep & keep_masks[i]), r.relict( 1 ), # became relict if there is some 1 in keep (== not fully consumed) ) else: next_fully_consumed = fully_consumed_flags[i - 1] next_is_empty = regs[i - 1].keep._eq(0) if is_first_on_input_r: is_relict = 0 else: prev_fully_consumed = fully_consumed_flags[i + 1] is_relict = r_prev.relict | ~prev_fully_consumed If((ready & next_fully_consumed) | is_empty | next_is_empty, *data_drive, r.keep(r_prev.keep & prev_keep_mask), r.last(r_prev.last & prev_last_mask), r.relict(is_relict)) for rout, rin in zip(self.regs, regs): rout.data(rin.data) if self.USE_STRB: rout.strb(rin.strb) rout.keep(rin.keep) rout.relict(rin.relict) rout.last(rin.last)