def _make_fifo(self, arbiter_side, logic_side, cd_logic, reset, depth, wrapper=lambda x: x): if cd_logic is None: fifo = wrapper(SyncFIFOBuffered(8, depth)) if reset is not None: fifo = ResetInserter()(fifo) fifo.comb += fifo.reset.eq(reset) else: assert isinstance(cd_logic, ClockDomain) fifo = wrapper( ClockDomainsRenamer({ arbiter_side: "sys", logic_side: "logic", })(AsyncFIFO(8, depth))) if reset is not None: raise NotImplementedError( "reset not yet implemented for async FIFOs") fifo.clock_domains.cd_logic = ClockDomain() self.comb += fifo.cd_logic.clk.eq(cd_logic.clk) if cd_logic.rst is not None: self.comb += fifo.cd_logic.rst.eq(cd_logic.rst) self.submodules += fifo return fifo
def __init__(self, pads, asmiport): self.submodules.edid = EDID(pads) self.submodules.clocking = Clocking(pads) invert = False try: s = getattr(pads, "data0") except AttributeError: s = getattr(pads, "data0_n") invert = True self.submodules.data0_cap = DataCapture(8, invert) self.comb += [ self.data0_cap.pad.eq(s), self.data0_cap.serdesstrobe.eq(self.clocking.serdesstrobe) ] fifo = RenameClockDomains(AsyncFIFO(10, 256), {"write": "pix", "read": "sys"}) self.submodules += fifo self.comb += [ fifo.din.eq(self.data0_cap.d), fifo.we.eq(1) ] pack_factor = asmiport.hub.dw//16 self.submodules.packer = structuring.Pack([("word", 10), ("pad", 6)], pack_factor) self.submodules.cast = structuring.Cast(self.packer.source.payload.layout, asmiport.hub.dw) self.submodules.dma = spi.DMAWriteController(dma_lasmi.Writer(lasmim), spi.MODE_SINGLE_SHOT) self.comb += [ self.packer.sink.stb.eq(fifo.readable), fifo.re.eq(self.packer.sink.ack), self.packer.sink.word.eq(fifo.dout), self.packer.source.connect_flat(self.cast.sink), self.cast.source.connect_flat(self.dma.data) ]
def __init__(self, interface, counter, fifo_depth): data_width = rtlink.get_data_width(interface) fine_ts_width = rtlink.get_fine_ts_width(interface) ev_layout = [] if data_width: ev_layout.append(("data", data_width)) if interface.timestamped: ev_layout.append(("timestamp", counter.width + fine_ts_width)) self.ev = Record(ev_layout) self.readable = Signal() self.re = Signal() self.overflow = Signal() # pulsed # # # fifo = RenameClockDomains(AsyncFIFO(ev_layout, fifo_depth), { "read": "rsys", "write": "rio" }) self.submodules += fifo # FIFO write if data_width: self.comb += fifo.din.data.eq(interface.data) if interface.timestamped: if fine_ts_width: full_ts = Cat(interface.fine_ts, counter.value_rio) else: full_ts = counter.value_rio self.comb += fifo.din.timestamp.eq(full_ts) self.comb += fifo.we.eq(interface.stb) # FIFO read self.comb += [ self.ev.eq(fifo.dout), self.readable.eq(fifo.readable), fifo.re.eq(self.re) ] overflow_sync = PulseSynchronizer("rio", "rsys") overflow_ack_sync = PulseSynchronizer("rsys", "rio") self.submodules += overflow_sync, overflow_ack_sync overflow_blind = Signal() self.comb += overflow_sync.i.eq(fifo.we & ~fifo.writable & ~overflow_blind) self.sync.rio += [ If(fifo.we & ~fifo.writable, overflow_blind.eq(1)), If(overflow_ack_sync.o, overflow_blind.eq(0)) ] self.comb += [ overflow_ack_sync.i.eq(overflow_sync.o), self.overflow.eq(overflow_sync.o) ]
def __init__(self, interface, counter, fifo_depth): data_width = rtlink.get_data_width(interface) fine_ts_width = rtlink.get_fine_ts_width(interface) ev_layout = [] if data_width: ev_layout.append(("data", data_width)) if interface.timestamped: ev_layout.append(("timestamp", counter.width + fine_ts_width)) self.ev = Record(ev_layout) self.readable = Signal() self.re = Signal() self.overflow = Signal() # pulsed # # # fifo = ClockDomainsRenamer({ "read": "rsys", "write": "rio" })(AsyncFIFO(layout_len(ev_layout), fifo_depth)) self.submodules += fifo fifo_in = Record(ev_layout) fifo_out = Record(ev_layout) self.comb += [ fifo.din.eq(fifo_in.raw_bits()), fifo_out.raw_bits().eq(fifo.dout) ] # FIFO write if data_width: self.comb += fifo_in.data.eq(interface.data) if interface.timestamped: if fine_ts_width: full_ts = Cat(interface.fine_ts, counter.value_rtio) else: full_ts = counter.value_rtio self.comb += fifo_in.timestamp.eq(full_ts) self.comb += fifo.we.eq(interface.stb) # FIFO read self.comb += [ self.ev.eq(fifo_out), self.readable.eq(fifo.readable), fifo.re.eq(self.re) ] overflow_transfer = _BlindTransfer() self.submodules += overflow_transfer self.comb += [ overflow_transfer.i.eq(fifo.we & ~fifo.writable), self.overflow.eq(overflow_transfer.o), ]
def _make_fifo(self, arbiter_side, logic_side, cd_logic, depth): if cd_logic is None: fifo = SyncFIFOBuffered(8, depth) else: assert isinstance(cd_logic, ClockDomain) fifo = ClockDomainsRenamer({ arbiter_side: "sys", logic_side: "logic", })(AsyncFIFO(8, depth)) fifo.clock_domains.cd_logic = ClockDomain() self.comb += fifo.cd_logic.clk.eq(cd_logic.clk) if cd_logic.rst is not None: self.comb += fifo.cd_logic.rst.eq(cd_logic.rst) return fifo
def _make_fifo(self, crossbar_side, logic_side, cd_logic, depth, wrapper=lambda x: x): if cd_logic is None: fifo = wrapper(SyncFIFOBuffered(8, depth)) else: assert isinstance(cd_logic, ClockDomain) fifo = wrapper(ClockDomainsRenamer({ crossbar_side: "sys", logic_side: "logic", })(AsyncFIFO(8, depth))) fifo.clock_domains.cd_logic = ClockDomain() self.comb += fifo.cd_logic.clk.eq(cd_logic.clk) if cd_logic.rst is not None: self.comb += fifo.cd_logic.rst.eq(cd_logic.rst) return fifo
def __init__(self, FIFO_DEPTH=1024, ADC_BITS=12): self.din = Signal(2) self.frame = Signal() pulser = Pulser() pulser.input.eq(self.frame) self.submodules += pulser shiftreg = ADC_ShiftReg() shiftreg.sync.eq(pulser.output) shiftreg.input.eq(self.din) # https://m-labs.hk/migen/manual/reference.html#module-migen.genlib.fifo fifo = AsyncFIFO(ADC_BITS, FIFO_DEPTH) fifo.din.eq() fifo.we.eq(pulser.output) fifo.din.eq(shiftreg.output) self.submodules += fifo
def _make_fifo(self, arbiter_side, logic_side, cd_logic, reset, depth, wrapper): if cd_logic is None: fifo = wrapper(SyncFIFOBuffered(8, depth)) if reset is not None: fifo = ResetInserter()(fifo) fifo.comb += fifo.reset.eq(reset) else: assert isinstance(cd_logic, ClockDomain) fifo = wrapper( ClockDomainsRenamer({ arbiter_side: "arbiter", logic_side: "logic", })(AsyncFIFO(8, depth))) # Note that for the reset to get asserted AND deasserted, the logic clock domain must # have a running clock. This is because, while AsyncResetSynchronizer is indeed # asynchronous, the registers in the FIFO logic clock domain reset synchronous # to the logic clock, as this is how Migen handles clock domain reset signals. # # If the logic clock domain does not have a single clock transition between assertion # and deassertion of FIFO reset, and the FIFO has not been empty at the time when # reset has been asserted, stale data will be read from the FIFO after deassertion. # # This can lead to all sorts of framing issues, and is rather unfortunate, but at # the moment I do not know of a way to fix this, since Migen does not support # asynchronous resets. fifo.clock_domains.cd_arbiter = ClockDomain( reset_less=reset is None) fifo.clock_domains.cd_logic = ClockDomain(reset_less=reset is None) fifo.comb += [ fifo.cd_arbiter.clk.eq(ClockSignal()), fifo.cd_logic.clk.eq(cd_logic.clk), ] if reset is not None: fifo.comb += fifo.cd_arbiter.rst.eq(reset) fifo.specials += AsyncResetSynchronizer(fifo.cd_logic, reset) self.submodules += fifo return fifo
def __init__(self, pack_factor): self.phy = stream.Endpoint(phy_layout(pack_factor)) self.busy = Signal() self.pix_hsync = Signal() self.pix_vsync = Signal() self.pix_de = Signal() self.pix_r = Signal(bpc_phy) self.pix_g = Signal(bpc_phy) self.pix_b = Signal(bpc_phy) ### fifo = RenameClockDomains(AsyncFIFO(phy_layout(pack_factor), 512), { "write": "sys", "read": "pix" }) self.submodules += fifo self.comb += [ self.phy.ack.eq(fifo.writable), fifo.we.eq(self.phy.stb), fifo.din.eq(self.phy.payload), self.busy.eq(0) ] unpack_counter = Signal(max=pack_factor) assert (pack_factor & (pack_factor - 1) == 0 ) # only support powers of 2 self.sync.pix += [ unpack_counter.eq(unpack_counter + 1), self.pix_hsync.eq(fifo.dout.hsync), self.pix_vsync.eq(fifo.dout.vsync), self.pix_de.eq(fifo.dout.de) ] for i in range(pack_factor): pixel = getattr(fifo.dout, "p" + str(i)) self.sync.pix += If(unpack_counter == i, self.pix_r.eq(pixel.r), self.pix_g.eq(pixel.g), self.pix_b.eq(pixel.b)) self.comb += fifo.re.eq(unpack_counter == (pack_factor - 1))
def __init__(self, link_layer, write_fifo_depth=4): # all interface signals in sys domain unless otherwise specified # write interface, optimized for throughput self.write_stb = Signal() self.write_ack = Signal() self.write_timestamp = Signal(64) self.write_channel = Signal(16) self.write_address = Signal(16) self.write_data = Signal(512) # fifo space interface # write with timestamp[48:] == 0xffff to make a fifo space request # (space requests have to be ordered wrt writes) self.fifo_space_not = Signal() self.fifo_space_not_ack = Signal() self.fifo_space = Signal(16) # echo interface self.echo_stb = Signal() self.echo_ack = Signal() self.echo_sent_now = Signal() # in rtio domain self.echo_received_now = Signal() # in rtio_rx domain # set_time interface self.set_time_stb = Signal() self.set_time_ack = Signal() # in rtio domain, must be valid all time while there is # a set_time request pending self.tsc_value = Signal(64) # reset interface self.reset_stb = Signal() self.reset_ack = Signal() self.reset_phy = Signal() # errors self.error_not = Signal() self.error_not_ack = Signal() self.error_code = Signal(8) # packet counters self.packet_cnt_tx = Signal(32) self.packet_cnt_rx = Signal(32) # # # # RX/TX datapath assert len(link_layer.tx_rt_data) == len(link_layer.rx_rt_data) assert len(link_layer.tx_rt_data) % 8 == 0 ws = len(link_layer.tx_rt_data) tx_plm = get_m2s_layouts(ws) tx_dp = ClockDomainsRenamer("rtio")(TransmitDatapath( link_layer.tx_rt_frame, link_layer.tx_rt_data, tx_plm)) self.submodules += tx_dp rx_plm = get_s2m_layouts(ws) rx_dp = ClockDomainsRenamer("rtio_rx")(ReceiveDatapath( link_layer.rx_rt_frame, link_layer.rx_rt_data, rx_plm)) self.submodules += rx_dp # Write FIFO and extra data count wfifo = ClockDomainsRenamer({ "write": "sys_with_rst", "read": "rtio_with_rst" })(AsyncFIFO(64 + 16 + 16 + 512, write_fifo_depth)) self.submodules += wfifo write_timestamp_d = Signal(64) write_channel_d = Signal(16) write_address_d = Signal(16) write_data_d = Signal(512) self.comb += [ wfifo.we.eq(self.write_stb), self.write_ack.eq(wfifo.writable), wfifo.din.eq( Cat(self.write_timestamp, self.write_channel, self.write_address, self.write_data)), Cat(write_timestamp_d, write_channel_d, write_address_d, write_data_d).eq(wfifo.dout) ] wfb_readable = Signal() wfb_re = Signal() self.comb += wfifo.re.eq(wfifo.readable & (~wfb_readable | wfb_re)) self.sync.rtio += \ If(wfifo.re, wfb_readable.eq(1), ).Elif(wfb_re, wfb_readable.eq(0), ) write_timestamp = Signal(64) write_channel = Signal(16) write_address = Signal(16) write_extra_data_cnt = Signal(8) write_data = Signal(512) self.sync.rtio += If(wfifo.re, write_timestamp.eq(write_timestamp_d), write_channel.eq(write_channel_d), write_address.eq(write_address_d), write_data.eq(write_data_d)) short_data_len = tx_plm.field_length("write", "short_data") write_extra_data_d = Signal(512) self.comb += write_extra_data_d.eq(write_data_d[short_data_len:]) for i in range(512 // ws): self.sync.rtio += If( wfifo.re, If(write_extra_data_d[ws * i:ws * (i + 1)] != 0, write_extra_data_cnt.eq(i + 1))) write_extra_data = Signal(512) self.sync.rtio += If(wfifo.re, write_extra_data.eq(write_extra_data_d)) extra_data_ce = Signal() extra_data_last = Signal() extra_data_counter = Signal(max=512 // ws + 1) self.comb += [ Case( extra_data_counter, { i + 1: tx_dp.raw_data.eq( write_extra_data[i * ws:(i + 1) * ws]) for i in range(512 // ws) }), extra_data_last.eq(extra_data_counter == write_extra_data_cnt) ] self.sync.rtio += \ If(extra_data_ce, extra_data_counter.eq(extra_data_counter + 1), ).Else( extra_data_counter.eq(1) ) # CDC fifo_space_not = Signal() fifo_space = Signal(16) self.submodules += _CrossDomainNotification("rtio_rx", fifo_space_not, fifo_space, self.fifo_space_not, self.fifo_space_not_ack, self.fifo_space) set_time_stb = Signal() set_time_ack = Signal() self.submodules += _CrossDomainRequest("rtio", self.set_time_stb, self.set_time_ack, None, set_time_stb, set_time_ack, None) reset_stb = Signal() reset_ack = Signal() reset_phy = Signal() self.submodules += _CrossDomainRequest("rtio", self.reset_stb, self.reset_ack, self.reset_phy, reset_stb, reset_ack, reset_phy) echo_stb = Signal() echo_ack = Signal() self.submodules += _CrossDomainRequest("rtio", self.echo_stb, self.echo_ack, None, echo_stb, echo_ack, None) error_not = Signal() error_code = Signal(8) self.submodules += _CrossDomainNotification("rtio_rx", error_not, error_code, self.error_not, self.error_not_ack, self.error_code) # TX FSM tx_fsm = ClockDomainsRenamer("rtio")(FSM(reset_state="IDLE")) self.submodules += tx_fsm echo_sent_now = Signal() self.sync.rtio += self.echo_sent_now.eq(echo_sent_now) tsc_value = Signal(64) tsc_value_load = Signal() self.sync.rtio += If(tsc_value_load, tsc_value.eq(self.tsc_value)) tx_fsm.act( "IDLE", If( wfb_readable, If(write_timestamp[48:] == 0xffff, NextState("FIFO_SPACE")).Else(NextState("WRITE"))).Else( If(echo_stb, echo_sent_now.eq(1), NextState("ECHO")).Elif(set_time_stb, tsc_value_load.eq(1), NextState("SET_TIME")).Elif( reset_stb, NextState("RESET")))) tx_fsm.act( "WRITE", tx_dp.send("write", timestamp=write_timestamp, channel=write_channel, address=write_address, extra_data_cnt=write_extra_data_cnt, short_data=write_data[:short_data_len]), If( tx_dp.packet_last, If(write_extra_data_cnt == 0, wfb_re.eq(1), NextState("IDLE")).Else(NextState("WRITE_EXTRA")))) tx_fsm.act("WRITE_EXTRA", tx_dp.raw_stb.eq(1), extra_data_ce.eq(1), If(extra_data_last, wfb_re.eq(1), NextState("IDLE"))) tx_fsm.act("FIFO_SPACE", tx_dp.send("fifo_space_request", channel=write_channel), If(tx_dp.packet_last, wfb_re.eq(1), NextState("IDLE"))) tx_fsm.act("ECHO", tx_dp.send("echo_request"), If(tx_dp.packet_last, echo_ack.eq(1), NextState("IDLE"))) tx_fsm.act( "SET_TIME", tx_dp.send("set_time", timestamp=tsc_value), If(tx_dp.packet_last, set_time_ack.eq(1), NextState("IDLE"))) tx_fsm.act("RESET", tx_dp.send("reset", phy=reset_phy), If(tx_dp.packet_last, reset_ack.eq(1), NextState("IDLE"))) # RX FSM rx_fsm = ClockDomainsRenamer("rtio_rx")(FSM(reset_state="INPUT")) self.submodules += rx_fsm ongoing_packet_next = Signal() ongoing_packet = Signal() self.sync.rtio_rx += ongoing_packet.eq(ongoing_packet_next) echo_received_now = Signal() self.sync.rtio_rx += self.echo_received_now.eq(echo_received_now) rx_fsm.act( "INPUT", If( rx_dp.frame_r, rx_dp.packet_buffer_load.eq(1), If( rx_dp.packet_last, Case( rx_dp.packet_type, { rx_plm.types["error"]: NextState("ERROR"), rx_plm.types["echo_reply"]: echo_received_now.eq(1), rx_plm.types["fifo_space_reply"]: NextState("FIFO_SPACE"), "default": [ error_not.eq(1), error_code.eq( error_codes["unknown_type_local"]) ] })).Else(ongoing_packet_next.eq(1))), If(~rx_dp.frame_r & ongoing_packet, error_not.eq(1), error_code.eq(error_codes["truncated_local"]))) rx_fsm.act("ERROR", error_not.eq(1), error_code.eq(rx_dp.packet_as["error"].code), NextState("INPUT")) rx_fsm.act("FIFO_SPACE", fifo_space_not.eq(1), fifo_space.eq(rx_dp.packet_as["fifo_space_reply"].space), NextState("INPUT")) # packet counters tx_frame_r = Signal() packet_cnt_tx = Signal(32) self.sync.rtio += [ tx_frame_r.eq(link_layer.tx_rt_frame), If(link_layer.tx_rt_frame & ~tx_frame_r, packet_cnt_tx.eq(packet_cnt_tx + 1)) ] cdc_packet_cnt_tx = GrayCodeTransfer(32) self.submodules += cdc_packet_cnt_tx self.comb += [ cdc_packet_cnt_tx.i.eq(packet_cnt_tx), self.packet_cnt_tx.eq(cdc_packet_cnt_tx.o) ] rx_frame_r = Signal() packet_cnt_rx = Signal(32) self.sync.rtio_rx += [ rx_frame_r.eq(link_layer.rx_rt_frame), If(link_layer.rx_rt_frame & ~rx_frame_r, packet_cnt_rx.eq(packet_cnt_rx + 1)) ] cdc_packet_cnt_rx = ClockDomainsRenamer({"rtio": "rtio_rx" })(GrayCodeTransfer(32)) self.submodules += cdc_packet_cnt_rx self.comb += [ cdc_packet_cnt_rx.i.eq(packet_cnt_rx), self.packet_cnt_rx.eq(cdc_packet_cnt_rx.o) ]
def __init__(self, word_width): # in pix clock domain self.valid_i = Signal() self.vsync = Signal() self.de = Signal() self.r = Signal(8) self.g = Signal(8) self.b = Signal(8) # in sys clock domain word_layout = [("sof", 1), ("pixels", word_width)] self.frame = Source(word_layout) self.busy = Signal() self._overflow = CSR() ### # start of frame detection vsync_r = Signal() new_frame = Signal() self.comb += new_frame.eq(self.vsync & ~vsync_r) self.sync.pix += vsync_r.eq(self.vsync) # pack pixels into words cur_word = Signal(word_width) cur_word_valid = Signal() encoded_pixel = Signal(24) self.comb += encoded_pixel.eq(Cat(self.b, self.g, self.r)) pack_factor = word_width // 24 assert (pack_factor & (pack_factor - 1) == 0 ) # only support powers of 2 pack_counter = Signal(max=pack_factor) self.sync.pix += [ cur_word_valid.eq(0), If( new_frame, cur_word_valid.eq(pack_counter == (pack_factor - 1)), pack_counter.eq(0), ).Elif(self.valid_i & self.de, [ If(pack_counter == (pack_factor - i - 1), cur_word[24 * i:24 * (i + 1)].eq(encoded_pixel)) for i in range(pack_factor) ], cur_word_valid.eq(pack_counter == (pack_factor - 1)), pack_counter.eq(pack_counter + 1)) ] # FIFO fifo = RenameClockDomains(AsyncFIFO(word_layout, 512), { "write": "pix", "read": "sys" }) self.submodules += fifo self.comb += [fifo.din.pixels.eq(cur_word), fifo.we.eq(cur_word_valid)] self.sync.pix += \ If(new_frame, fifo.din.sof.eq(1) ).Elif(cur_word_valid, fifo.din.sof.eq(0) ) self.comb += [ self.frame.stb.eq(fifo.readable), self.frame.payload.eq(fifo.dout), fifo.re.eq(self.frame.ack), self.busy.eq(0) ] # overflow detection pix_overflow = Signal() pix_overflow_reset = Signal() self.sync.pix += [ If(fifo.we & ~fifo.writable, pix_overflow.eq(1)).Elif(pix_overflow_reset, pix_overflow.eq(0)) ] sys_overflow = Signal() self.specials += MultiReg(pix_overflow, sys_overflow) self.submodules.overflow_reset = PulseSynchronizer("sys", "pix") self.submodules.overflow_reset_ack = PulseSynchronizer("pix", "sys") self.comb += [ pix_overflow_reset.eq(self.overflow_reset.o), self.overflow_reset_ack.i.eq(pix_overflow_reset) ] overflow_mask = Signal() self.comb += [ self._overflow.w.eq(sys_overflow & ~overflow_mask), self.overflow_reset.i.eq(self._overflow.re) ] self.sync += \ If(self._overflow.re, overflow_mask.eq(1) ).Elif(self.overflow_reset_ack.o, overflow_mask.eq(0) )
def __init__(self, capture_depth, **kwargs): self.platform = Platform(**kwargs) self.platform.add_extension([ ("tp0", 0, Pins("X3:5"), IOStandard("LVCMOS33")), ]) self.clock_domains.cd_ref = ClockDomain() self.clock_domains.cd_rx = ClockDomain() self.clock_domains.cd_tx = ClockDomain() self.submodules.serdes = serdes = \ LatticeECP5PCIeSERDES(self.platform.request("pcie_x1")) self.submodules.aligner = aligner = \ ClockDomainsRenamer("rx")(PCIeSERDESAligner(serdes.lane)) self.comb += [ self.cd_ref.clk.eq(serdes.ref_clk), serdes.rx_clk_i.eq(serdes.rx_clk_o), self.cd_rx.clk.eq(serdes.rx_clk_i), serdes.tx_clk_i.eq(serdes.tx_clk_o), self.cd_tx.clk.eq(serdes.tx_clk_i), ] self.submodules.tx_phy = ClockDomainsRenamer("tx")(PCIePHYTX(aligner)) self.comb += [ self.aligner.rx_align.eq(1), self.tx_phy.ts.n_fts.eq(0xff), self.tx_phy.ts.rate.gen1.eq(1), ] with open("top.sdc", "w") as f: f.write("define_clock -name {n:serdes_ref_clk} -freq 100.000\n") f.write("define_clock -name {n:serdes_tx_clk_o} -freq 150.000\n") f.write("define_clock -name {n:serdes_rx_clk_o} -freq 150.000\n") self.platform.add_source("top.sdc") self.platform.add_platform_command( """FREQUENCY NET "serdes_ref_clk" 100 MHz;""") self.platform.add_platform_command( """FREQUENCY NET "serdes_rx_clk_o" 125 MHz;""") self.platform.add_platform_command( """FREQUENCY NET "serdes_tx_clk_o" 125 MHz;""") refclkcounter = Signal(32) self.sync.ref += refclkcounter.eq(refclkcounter + 1) rxclkcounter = Signal(32) self.sync.rx += rxclkcounter.eq(rxclkcounter + 1) txclkcounter = Signal(32) self.sync.tx += txclkcounter.eq(txclkcounter + 1) led_att1 = self.platform.request("user_led") led_att2 = self.platform.request("user_led") led_sta1 = self.platform.request("user_led") led_sta2 = self.platform.request("user_led") led_err1 = self.platform.request("user_led") led_err2 = self.platform.request("user_led") led_err3 = self.platform.request("user_led") led_err4 = self.platform.request("user_led") self.comb += [ led_att1.eq(~(refclkcounter[25])), led_att2.eq(~(0)), led_sta1.eq(~(rxclkcounter[25])), led_sta2.eq(~(txclkcounter[25])), led_err1.eq(~(~serdes.lane.rx_present)), led_err2.eq(~(~serdes.lane.rx_locked)), led_err3.eq(~(~serdes.lane.rx_aligned)), led_err4.eq(~(0)), ] trigger_rx = Signal() trigger_ref = Signal() self.specials += MultiReg(trigger_ref, trigger_rx, odomain="rx") capture = Signal() self.submodules.symbols = symbols = ClockDomainsRenamer({ "write": "rx", "read": "ref" })(AsyncFIFO(width=18, depth=capture_depth)) self.comb += [ symbols.din.eq(Cat(aligner.rx_symbol)), symbols.we.eq(capture) ] self.sync.rx += [ If(trigger_rx, capture.eq(1)).Elif(~symbols.writable, capture.eq(0)) ] uart_pads = Pads(self.platform.request("serial")) self.submodules += uart_pads self.submodules.uart = uart = ClockDomainsRenamer("ref")(UART( uart_pads, bit_cyc=uart_bit_cyc(100e6, 115200)[0])) self.comb += [uart.rx_ack.eq(uart.rx_rdy), trigger_ref.eq(uart.rx_rdy)] self.submodules.fsm = ClockDomainsRenamer("ref")( FSM(reset_state="WAIT")) self.fsm.act("WAIT", If(uart.rx_rdy, NextState("SYNC-1"))) self.fsm.act( "SYNC-1", If(uart.tx_rdy, uart.tx_ack.eq(1), uart.tx_data.eq(0xff), NextState("SYNC-2"))) self.fsm.act( "SYNC-2", If(uart.tx_rdy, uart.tx_ack.eq(1), uart.tx_data.eq(0xff), NextState("BYTE-0"))) self.fsm.act( "BYTE-0", If(symbols.readable & uart.tx_rdy, uart.tx_ack.eq(1), uart.tx_data.eq(symbols.dout[16:]), NextState("BYTE-1")).Elif(~symbols.readable, NextState("WAIT"))) self.fsm.act( "BYTE-1", If(symbols.readable & uart.tx_rdy, uart.tx_ack.eq(1), uart.tx_data.eq(symbols.dout[8:]), NextState("BYTE-2"))) self.fsm.act( "BYTE-2", If(symbols.readable & uart.tx_rdy, uart.tx_ack.eq(1), uart.tx_data.eq(symbols.dout[0:]), symbols.re.eq(1), NextState("BYTE-0"))) tp0 = self.platform.request("tp0")
def __init__(self, word_width, fifo_depth): # in pix clock domain self.valid_i = Signal() self.vsync = Signal() self.de = Signal() self.r = Signal(8) self.g = Signal(8) self.b = Signal(8) # in sys clock domain word_layout = [("sof", 1), ("pixels", word_width)] self.frame = Source(word_layout) self.busy = Signal() self._overflow = CSR() ### de_r = Signal() self.sync.pix += de_r.eq(self.de) rgb2ycbcr = RGB2YCbCr() self.submodules += RenameClockDomains(rgb2ycbcr, "pix") chroma_downsampler = YCbCr444to422() self.submodules += RenameClockDomains(chroma_downsampler, "pix") self.comb += [ rgb2ycbcr.sink.stb.eq(self.valid_i), rgb2ycbcr.sink.sop.eq(self.de & ~de_r), rgb2ycbcr.sink.r.eq(self.r), rgb2ycbcr.sink.g.eq(self.g), rgb2ycbcr.sink.b.eq(self.b), Record.connect(rgb2ycbcr.source, chroma_downsampler.sink), chroma_downsampler.source.ack.eq(1) ] # XXX need clean up de = self.de vsync = self.vsync for i in range(rgb2ycbcr.latency + chroma_downsampler.latency): next_de = Signal() next_vsync = Signal() self.sync.pix += [next_de.eq(de), next_vsync.eq(vsync)] de = next_de vsync = next_vsync # start of frame detection vsync_r = Signal() new_frame = Signal() self.comb += new_frame.eq(vsync & ~vsync_r) self.sync.pix += vsync_r.eq(vsync) # pack pixels into words cur_word = Signal(word_width) cur_word_valid = Signal() encoded_pixel = Signal(16) self.comb += encoded_pixel.eq( Cat(chroma_downsampler.source.y, chroma_downsampler.source.cb_cr)), pack_factor = word_width // 16 assert (pack_factor & (pack_factor - 1) == 0 ) # only support powers of 2 pack_counter = Signal(max=pack_factor) self.sync.pix += [ cur_word_valid.eq(0), If( new_frame, cur_word_valid.eq(pack_counter == (pack_factor - 1)), pack_counter.eq(0), ).Elif(chroma_downsampler.source.stb & de, [ If(pack_counter == (pack_factor - i - 1), cur_word[16 * i:16 * (i + 1)].eq(encoded_pixel)) for i in range(pack_factor) ], cur_word_valid.eq(pack_counter == (pack_factor - 1)), pack_counter.eq(pack_counter + 1)) ] # FIFO fifo = RenameClockDomains(AsyncFIFO(word_layout, fifo_depth), { "write": "pix", "read": "sys" }) self.submodules += fifo self.comb += [fifo.din.pixels.eq(cur_word), fifo.we.eq(cur_word_valid)] self.sync.pix += \ If(new_frame, fifo.din.sof.eq(1) ).Elif(cur_word_valid, fifo.din.sof.eq(0) ) self.comb += [ self.frame.stb.eq(fifo.readable), self.frame.payload.eq(fifo.dout), fifo.re.eq(self.frame.ack), self.busy.eq(0) ] # overflow detection pix_overflow = Signal() pix_overflow_reset = Signal() self.sync.pix += [ If(fifo.we & ~fifo.writable, pix_overflow.eq(1)).Elif(pix_overflow_reset, pix_overflow.eq(0)) ] sys_overflow = Signal() self.specials += MultiReg(pix_overflow, sys_overflow) self.submodules.overflow_reset = PulseSynchronizer("sys", "pix") self.submodules.overflow_reset_ack = PulseSynchronizer("pix", "sys") self.comb += [ pix_overflow_reset.eq(self.overflow_reset.o), self.overflow_reset_ack.i.eq(pix_overflow_reset) ] overflow_mask = Signal() self.comb += [ self._overflow.w.eq(sys_overflow & ~overflow_mask), self.overflow_reset.i.eq(self._overflow.re) ] self.sync += \ If(self._overflow.re, overflow_mask.eq(1) ).Elif(self.overflow_reset_ack.o, overflow_mask.eq(0) )
def __init__(self, rbus, counter, fine_ts_width, fifo_depth, guard_io_cycles): self.sel = Signal(max=len(rbus)) # timestamp and value must be valid 1 cycle before we self.timestamp = Signal(counter.width + fine_ts_width) self.value = Signal(2) self.writable = Signal() self.we = Signal() # maximum throughput 1/2 self.underflow = Signal() # valid 2 cycles after we self.underflow_reset = Signal() self.sequence_error = Signal() self.sequence_error_reset = Signal() # # # signal_underflow = Signal() signal_sequence_error = Signal() fifos = [] ev_layout = [("timestamp", counter.width + fine_ts_width), ("value", 2)] for n, chif in enumerate(rbus): # FIFO fifo = RenameClockDomains(AsyncFIFO(ev_layout, fifo_depth), { "write": "rsys", "read": "rio" }) self.submodules += fifo fifos.append(fifo) # Buffer buf_pending = Signal() buf = Record(ev_layout) buf_just_written = Signal() # Special cases replace = Signal() sequence_error = Signal() nop = Signal() self.sync.rsys += [ replace.eq(self.timestamp == buf.timestamp[fine_ts_width:]), sequence_error.eq( self.timestamp < buf.timestamp[fine_ts_width:]), nop.eq(self.value == buf.value) ] self.comb += If(self.we & (self.sel == n) & sequence_error, signal_sequence_error.eq(1)) # Buffer read and FIFO write self.comb += fifo.din.eq(buf) in_guard_time = Signal() self.comb += in_guard_time.eq( buf.timestamp[fine_ts_width:] < counter.o_value_sys + guard_io_cycles) self.sync.rsys += If(in_guard_time, buf_pending.eq(0)) self.comb += \ If(buf_pending, If(in_guard_time, If(buf_just_written, signal_underflow.eq(1) ).Else( fifo.we.eq(1) ) ), If((self.we & (self.sel == n) & ~replace & ~nop & ~sequence_error), fifo.we.eq(1) ) ) # Buffer write # Must come after read to handle concurrent read+write properly self.sync.rsys += [ buf_just_written.eq(0), If(self.we & (self.sel == n) & ~nop & ~sequence_error, buf_just_written.eq(1), buf_pending.eq(1), buf.timestamp.eq(self.timestamp), buf.value.eq(self.value)) ] # Buffer output of FIFO to improve timing dout_stb = Signal() dout_ack = Signal() dout = Record(ev_layout) self.sync.rio += \ If(fifo.re, dout_stb.eq(1), dout.eq(fifo.dout) ).Elif(dout_ack, dout_stb.eq(0) ) self.comb += fifo.re.eq(fifo.readable & (~dout_stb | dout_ack)) # FIFO read through buffer self.comb += [ dout_ack.eq( dout.timestamp[fine_ts_width:] == counter.o_value_rio), chif.o_stb.eq(dout_stb & dout_ack), chif.o_value.eq(dout.value) ] if fine_ts_width: self.comb += chif.o_fine_ts.eq(dout.timestamp[:fine_ts_width]) self.comb += \ self.writable.eq(Array(fifo.writable for fifo in fifos)[self.sel]) self.sync.rsys += [ If(self.underflow_reset, self.underflow.eq(0)), If(self.sequence_error_reset, self.sequence_error.eq(0)), If(signal_underflow, self.underflow.eq(1)), If(signal_sequence_error, self.sequence_error.eq(1)) ]
def __init__(self, interface, counter, fifo_depth, guard_io_cycles): data_width = rtlink.get_data_width(interface) address_width = rtlink.get_address_width(interface) fine_ts_width = rtlink.get_fine_ts_width(interface) ev_layout = [] if data_width: ev_layout.append(("data", data_width)) if address_width: ev_layout.append(("address", address_width)) ev_layout.append(("timestamp", counter.width + fine_ts_width)) # ev must be valid 1 cycle before we to account for the latency in # generating replace, sequence_error and nop self.ev = Record(ev_layout) self.writable = Signal() self.we = Signal() # maximum throughput 1/2 self.underflow = Signal() # valid 1 cycle after we, pulsed self.sequence_error = Signal() self.collision_error = Signal() # # # # FIFO fifo = RenameClockDomains(AsyncFIFO(ev_layout, fifo_depth), { "write": "rsys", "read": "rio" }) self.submodules += fifo # Buffer buf_pending = Signal() buf = Record(ev_layout) buf_just_written = Signal() # Special cases replace = Signal() sequence_error = Signal() collision_error = Signal() any_error = Signal() nop = Signal() self.sync.rsys += [ # Note: replace does not perform any RTLink address checks, # i.e. a write to a different address will be silently replaced # as well. replace.eq(self.ev.timestamp == buf.timestamp), # Detect sequence errors on coarse timestamps only # so that they are mutually exclusive with collision errors. sequence_error.eq(self.ev.timestamp[fine_ts_width:] < buf.timestamp[fine_ts_width:]) ] if fine_ts_width: self.sync.rsys += collision_error.eq( (self.ev.timestamp[fine_ts_width:] == buf.timestamp[fine_ts_width:]) & (self.ev.timestamp[:fine_ts_width] != buf.timestamp[:fine_ts_width])) self.comb += any_error.eq(sequence_error | collision_error) if interface.suppress_nop: # disable NOP at reset: do not suppress a first write with all 0s nop_en = Signal(reset=0) self.sync.rsys += [ nop.eq(nop_en & optree("&", [ getattr(self.ev, a) == getattr(buf, a) for a in ("data", "address") if hasattr(self.ev, a) ], default=0)), # buf now contains valid data. enable NOP. If(self.we & ~any_error, nop_en.eq(1)), # underflows cancel the write. allow it to be retried. If(self.underflow, nop_en.eq(0)) ] self.comb += [ self.sequence_error.eq(self.we & sequence_error), self.collision_error.eq(self.we & collision_error) ] # Buffer read and FIFO write self.comb += fifo.din.eq(buf) in_guard_time = Signal() self.comb += in_guard_time.eq( buf.timestamp[fine_ts_width:] < counter.value_sys + guard_io_cycles) self.sync.rsys += If(in_guard_time, buf_pending.eq(0)) self.comb += \ If(buf_pending, If(in_guard_time, If(buf_just_written, self.underflow.eq(1) ).Else( fifo.we.eq(1) ) ), If(self.we & ~replace & ~nop & ~any_error, fifo.we.eq(1) ) ) # Buffer write # Must come after read to handle concurrent read+write properly self.sync.rsys += [ buf_just_written.eq(0), If(self.we & ~nop & ~any_error, buf_just_written.eq(1), buf_pending.eq(1), buf.eq(self.ev)) ] self.comb += self.writable.eq(fifo.writable) # Buffer output of FIFO to improve timing dout_stb = Signal() dout_ack = Signal() dout = Record(ev_layout) self.sync.rio += \ If(fifo.re, dout_stb.eq(1), dout.eq(fifo.dout) ).Elif(dout_ack, dout_stb.eq(0) ) self.comb += fifo.re.eq(fifo.readable & (~dout_stb | dout_ack)) # FIFO read through buffer # TODO: report error on stb & busy self.comb += [ dout_ack.eq(dout.timestamp[fine_ts_width:] == counter.value_rio), interface.stb.eq(dout_stb & dout_ack) ] if data_width: self.comb += interface.data.eq(dout.data) if address_width: self.comb += interface.address.eq(dout.address) if fine_ts_width: self.comb += interface.fine_ts.eq(dout.timestamp[:fine_ts_width])
def __init__(self): self.adc_dout0 = Signal(2) self.adc_dout1 = Signal(2) self.i_fclk = Signal() self.i_we = Signal() self.i_re = Signal() self.o_readable = Signal() self.o_dout = Signal(ADC_BITS) self.twos_complement = Signal(ADC_BITS) self.pulser = pulser = Pulser() self.comb += pulser.input.eq(self.i_fclk) self.submodules += pulser self.shiftreg0 = shiftreg0 = ADC_ShiftReg(bits_per_cycle=2) self.shiftreg1 = shiftreg1 = ADC_ShiftReg(bits_per_cycle=2) self.comb += shiftreg0.input.eq(self.adc_dout0) self.comb += shiftreg1.input.eq(self.adc_dout1) self.submodules += [shiftreg0, shiftreg1] self.fifo = fifo = ClockDomainsRenamer({"write": "adc_bitclk", "read": "sys"})(AsyncFIFO(ADC_BITS, FIFO_DEPTH)) self.comb += [ fifo.we.eq(self.i_we & pulser.output), fifo.din.eq(Cat(shiftreg0.output, shiftreg1.output)), fifo.re.eq(self.i_re), self.o_readable.eq(fifo.readable), self.o_dout.eq(fifo.dout), ] self.submodules += fifo self.comb += self.twos_complement.eq(~fifo.din + 1)
def __init__(self, interface, counter, fifo_depth, guard_io_cycles): data_width = rtlink.get_data_width(interface) address_width = rtlink.get_address_width(interface) fine_ts_width = rtlink.get_fine_ts_width(interface) ev_layout = [] if data_width: ev_layout.append(("data", data_width)) if address_width: ev_layout.append(("address", address_width)) ev_layout.append(("timestamp", counter.width + fine_ts_width)) # ev must be valid 1 cycle before we to account for the latency in # generating replace, sequence_error and collision self.ev = Record(ev_layout) self.writable = Signal() self.we = Signal() # maximum throughput 1/2 self.underflow = Signal() # valid 1 cycle after we, pulsed self.sequence_error = Signal() self.collision = Signal() self.busy = Signal() # pulsed # # # # FIFO fifo = ClockDomainsRenamer({ "write": "rsys", "read": "rio" })(AsyncFIFO(layout_len(ev_layout), fifo_depth)) self.submodules += fifo fifo_in = Record(ev_layout) fifo_out = Record(ev_layout) self.comb += [ fifo.din.eq(fifo_in.raw_bits()), fifo_out.raw_bits().eq(fifo.dout) ] # Buffer buf_pending = Signal() buf = Record(ev_layout) buf_just_written = Signal() # Special cases replace = Signal() sequence_error = Signal() collision = Signal() any_error = Signal() if interface.enable_replace: # Note: replace may be asserted at the same time as collision # when addresses are different. In that case, it is a collision. self.sync.rsys += replace.eq(self.ev.timestamp == buf.timestamp) # Detect sequence errors on coarse timestamps only # so that they are mutually exclusive with collision errors. self.sync.rsys += sequence_error.eq( self.ev.timestamp[fine_ts_width:] < buf.timestamp[fine_ts_width:]) if interface.enable_replace: if address_width: different_addresses = self.ev.address != buf.address else: different_addresses = 0 if fine_ts_width: self.sync.rsys += collision.eq( (self.ev.timestamp[fine_ts_width:] == buf.timestamp[fine_ts_width:]) & ((self.ev.timestamp[:fine_ts_width] != buf.timestamp[:fine_ts_width]) | different_addresses)) else: self.sync.rsys += collision.eq(self.ev.timestamp[fine_ts_width:] == buf.timestamp[fine_ts_width:]) self.comb += [ any_error.eq(sequence_error | collision), self.sequence_error.eq(self.we & sequence_error), self.collision.eq(self.we & collision) ] # Buffer read and FIFO write self.comb += fifo_in.eq(buf) in_guard_time = Signal() self.comb += in_guard_time.eq( buf.timestamp[fine_ts_width:] < counter.value_sys + guard_io_cycles) self.sync.rsys += If(in_guard_time, buf_pending.eq(0)) self.comb += \ If(buf_pending, If(in_guard_time, If(buf_just_written, self.underflow.eq(1) ).Else( fifo.we.eq(1) ) ), If(self.we & ~replace & ~any_error, fifo.we.eq(1) ) ) # Buffer write # Must come after read to handle concurrent read+write properly self.sync.rsys += [ buf_just_written.eq(0), If(self.we & ~any_error, buf_just_written.eq(1), buf_pending.eq(1), buf.eq(self.ev)) ] self.comb += self.writable.eq(fifo.writable) # Buffer output of FIFO to improve timing dout_stb = Signal() dout_ack = Signal() dout = Record(ev_layout) self.sync.rio += \ If(fifo.re, dout_stb.eq(1), dout.eq(fifo_out) ).Elif(dout_ack, dout_stb.eq(0) ) self.comb += fifo.re.eq(fifo.readable & (~dout_stb | dout_ack)) # latency compensation if interface.delay: counter_rtio = Signal.like(counter.value_rtio) self.sync.rtio += counter_rtio.eq(counter.value_rtio - interface.delay + 1) else: counter_rtio = counter.value_rtio # FIFO read through buffer self.comb += [ dout_ack.eq(dout.timestamp[fine_ts_width:] == counter_rtio), interface.stb.eq(dout_stb & dout_ack) ] busy_transfer = BlindTransfer() self.submodules += busy_transfer self.comb += [ busy_transfer.i.eq(interface.stb & interface.busy), self.busy.eq(busy_transfer.o), ] if data_width: self.comb += interface.data.eq(dout.data) if address_width: self.comb += interface.address.eq(dout.address) if fine_ts_width: self.comb += interface.fine_ts.eq(dout.timestamp[:fine_ts_width])
def __init__(self, config, fpga_id): self.config = config self.submodules.cores = [Core(config, fpga_id)] self.global_inactive = self.cores[0].global_inactive self.kernel_error = self.cores[0].kernel_error self.deadlock = self.cores[0].deadlock self.total_num_messages = self.cores[0].total_num_messages self.cycle_count = self.cores[0].cycle_count self.done = self.cores[0].done self.start = Signal() msg_recvd_sys = Signal() self.comb += self.cores[0].start.eq(self.start | msg_recvd_sys) self.num_messages_to = [Signal(32) for _ in range(config.addresslayout.num_fpga - 1)] self.num_messages_from = [Signal(32) for _ in range(config.addresslayout.num_fpga - 1)] if config.addresslayout.num_fpga > 1: msg_len = len(self.cores[0].network.external_network_interface_out[0].raw_bits()) self.submodules.in_fifo = [ClockDomainsRenamer({"write":"stream", "read":"sys"}) (AsyncFIFO(width=msg_len, depth=64)) for j in range(config.addresslayout.num_fpga - 1)] self.submodules.out_fifo = [ClockDomainsRenamer({"write":"sys", "read":"stream"}) (AsyncFIFO(width=msg_len, depth=64)) for j in range(config.addresslayout.num_fpga - 1)] rx_valids = [] for j in range(config.addresslayout.num_fpga - 1): rx, tx = config.platform[fpga_id].getStreamPair() assert msg_len <= len(tx.data) self.comb += [ self.out_fifo[j].din.eq(self.cores[0].network.external_network_interface_out[j].raw_bits()), self.out_fifo[j].we.eq(self.cores[0].network.external_network_interface_out[j].valid), self.cores[0].network.external_network_interface_out[j].ack.eq(self.out_fifo[j].writable), tx.data.eq(self.out_fifo[j].dout), tx.valid.eq(self.out_fifo[j].readable), self.out_fifo[j].re.eq(tx.rdy), self.in_fifo[j].din.eq(rx.data), self.in_fifo[j].we.eq(rx.valid), rx.rdy.eq(self.in_fifo[j].writable), self.cores[0].network.external_network_interface_in[j].raw_bits().eq(self.in_fifo[j].dout), self.cores[0].network.external_network_interface_in[j].valid.eq(self.in_fifo[j].readable), self.in_fifo[j].re.eq(self.cores[0].network.external_network_interface_in[j].ack) ] self.sync.stream += [ If(rx.rdy & rx.valid, self.num_messages_from[j].eq(self.num_messages_from[j] + 1) ), If(tx.rdy & tx.valid, self.num_messages_to[j].eq(self.num_messages_to[j] + 1) ) ] rx_valids.append(rx.valid) msg_recvd = Signal() self.sync.stream += If(reduce(or_, rx_valids, 0), msg_recvd.eq(1) ) self.specials += MultiReg(msg_recvd, msg_recvd_sys, odomain="sys")
def __init__(self, sdram, clk_out, clk_sample, databits, rowbits, colbits, bankbits, inbuf, outbuf, burst, tRESET, tCL, tRP, tRFC, tRCD, tREFI): _FIFOInterface.__init__(self, databits, None) addrbits = rowbits + colbits + bankbits assert sdram.dq.nbits == databits colabits = colbits if colbits <= 10 else colbits + 1 max_col = Replicate(1, colbits) assert sdram.a.nbits >= colabits assert sdram.a.nbits >= rowbits assert sdram.ba.nbits == bankbits dqmbits = max(databits // 8, 1) assert sdram.dqm.nbits == dqmbits assert burst <= 1<<colbits # DQ handling, tristate, and sampling dq = TSTriple(databits) self.specials += dq.get_tristate(sdram.dq) dq_r = Signal(databits) self.clock_domains.cd_sample = ClockDomain(reset_less=True) self.comb += self.cd_sample.clk.eq(clk_sample) self.sync.sample += dq_r.eq(dq.i) # Signals used for driving SDRAM control signals # These are not registers, they are functions of the current FSM state. # However, the reset state actually determines the default value for # states where they are not explicitly assigned. For example, cmd is # INHIBIT at reset (because the FSM is in RESET state at reset and that # sets cmd to INHIBIT), but it's NOP for every other state where it # isn't assigned. cmd = Signal(4, reset=NOP) dqm = Signal() ba = Signal(bankbits) a = Signal(max(colabits, rowbits)) cke = Signal() self.comb += [ sdram.dqm.eq(Replicate(dqm, dqmbits)), sdram.cs_n.eq(cmd[3]), sdram.ras_n.eq(cmd[2]), sdram.cas_n.eq(cmd[1]), sdram.we_n.eq(cmd[0]), sdram.clk.eq(clk_out), sdram.ba.eq(ba), sdram.a.eq(a), sdram.cke.eq(cke), ] # Counter to time reset cycle of the SDRAM # We enable CKE on the first cycle after system reset, then wait tRESET reset_ctr = Signal(max=tRESET+1) self.sync += [ cke.eq(1), reset_ctr.eq(reset_ctr + 1) ] # Counter to time refresh intervals # Note that this can go higher than tREFI, since we might be in the # middle of a burst, but long-term refresh cycles will be issued often # enough to meet refresh timing. refresh_interval = tREFI - 2 # A bit of leeway for safety refresh_ctr = Signal(max=(refresh_interval + 2*burst + 128)) self.sync += If(cmd == AUTO_REFRESH, If(refresh_ctr > refresh_interval, refresh_ctr.eq(refresh_ctr - refresh_interval) ).Else( refresh_ctr.eq(0)) ).Else( refresh_ctr.eq(refresh_ctr + 1)) tMRD = 3 # JEDEC spec, Micron only needs 2 # Mode: Full page burst mode, burst write mode = 0b0000000111 | (tCL << 4) # FIFOs fifo_in = RenameClockDomains(AsyncFIFO(databits, inbuf), {"read": "sys"}) fifo_out = RenameClockDomains(AsyncFIFO(databits, outbuf), {"write": "sys"}) self.submodules += [fifo_in, fifo_out] self.comb += [ # Wire up FIFO ports to module interface self.writable.eq(fifo_in.writable), fifo_in.din.eq(self.din_bits), fifo_in.we.eq(self.we), self.readable.eq(fifo_out.readable), fifo_out.re.eq(self.re), self.dout_bits.eq(fifo_out.dout), ] # short circuit FIFOs for testing #self.comb += [ #fifo_out.din.eq(fifo_in.dout), #fifo_out.we.eq(fifo_in.readable), #fifo_in.re.eq(fifo_out.writable), #] # SDRAM FIFO pointer regs write_ptr = Signal(addrbits) read_ptr = Signal(addrbits) read_ptr_shadow = Signal(addrbits) def delay_clocks(v, d): for i in range(d): n = Signal() self.sync += n.eq(v) v = n return v # Read cycle state signals issuing_read = Signal() # Reads come back tCL clocks later returning_read = delay_clocks(issuing_read, tCL) can_read = Signal() can_continue_read = Signal() kill_read = Signal() self.comb += [ can_read.eq((write_ptr != read_ptr) & fifo_out.writable), can_continue_read.eq((write_ptr != read_ptr_shadow + 1) & fifo_out.writable & (read_ptr_shadow[:colbits] != max_col) & ~kill_read), fifo_out.din.eq(dq_r), fifo_out.we.eq(returning_read & ~kill_read), ] self.sync += [ # Increment read pointer when data is written to output FIFO If(fifo_out.we & fifo_out.writable, read_ptr.eq(read_ptr + 1)), # Keep a shadow read pointer for issuing reads. Increment it # while a read is being issued, but reset it to the true read # otherwise (which might be different if a read was killed). If(~issuing_read, read_ptr_shadow.eq(read_ptr), ).Else( read_ptr_shadow.eq(read_ptr_shadow + 1), ), # If the output FIFO becomes full, kill the current read If(returning_read & ~fifo_out.writable, kill_read.eq(1) ).Elif(~returning_read, kill_read.eq(0) ), ] # Write state signals issuing_write = Signal() can_write = Signal() can_continue_write = Signal() self.comb += [ can_write.eq((write_ptr + 1 != read_ptr) & fifo_in.readable), can_continue_write.eq((write_ptr + 2 != read_ptr) & fifo_in.readable & (write_ptr[:colbits] != max_col)), dq.o.eq(fifo_in.dout), dq.oe.eq(issuing_write), fifo_in.re.eq(issuing_write), ] self.sync += [ # Increment write pointer when data is read from input FIFO If(fifo_in.re & fifo_in.readable, write_ptr.eq(write_ptr + 1)), ] # Address generation def split(addr): col = addr[:colbits] if colbits > 10: col = Cat(col[:10],0,col[10:]) return col, addr[colbits:colbits+rowbits], addr[colbits+rowbits:] r_col, r_row, r_bank = split(read_ptr) w_col, w_row, w_bank = split(write_ptr) # Finite state machine driving the controller fsm = self.submodules.fsm = FSM(reset_state="RESET") # Initialization sequence fsm.act("RESET", cmd.eq(INHIBIT), If(reset_ctr == tRESET, NextState("INIT_IDLE"))) fsm.delayed_enter("INIT_IDLE", "INIT_PRECHARGE", 5) fsm.act("INIT_PRECHARGE", cmd.eq(PRECHARGE), a[10].eq(1)) fsm.delayed_enter("INIT_PRECHARGE", "INIT_REFRESH1", tRP) fsm.act("INIT_REFRESH1", cmd.eq(AUTO_REFRESH)) fsm.delayed_enter("INIT_REFRESH1", "INIT_REFRESH2", tRFC) fsm.act("INIT_REFRESH2", cmd.eq(AUTO_REFRESH)) fsm.delayed_enter("INIT_REFRESH2", "INIT_MODE", tRFC) fsm.act("INIT_MODE", cmd.eq(LOAD_MODE), a.eq(mode)) fsm.delayed_enter("INIT_MODE", "IDLE", tMRD) # Main loop fsm.act("IDLE", If(refresh_ctr >= refresh_interval, NextState("REFRESH") ).Elif(can_write, NextState("WRITE_ACTIVE") ).Elif(can_read, NextState("READ_ACTIVE") )) # REFRESH fsm.act("REFRESH", cmd.eq(AUTO_REFRESH)) fsm.delayed_enter("REFRESH", "IDLE", tRFC) # WRITE fsm.act("WRITE_ACTIVE", cmd.eq(ACTIVE), ba.eq(w_bank), a.eq(w_row)) fsm.delayed_enter("WRITE_ACTIVE", "WRITE", tRCD) fsm.act("WRITE", cmd.eq(WRITE), ba.eq(w_bank), a.eq(w_col), issuing_write.eq(1), dqm.eq(~fifo_in.readable), If(can_continue_write, NextState("WRITING") ).Else( If(can_read, NextState("PRECHARGE_AND_READ") ).Else( NextState("PRECHARGE") ))) fsm.act("WRITING", issuing_write.eq(1), dqm.eq(~fifo_in.readable), If(~can_continue_write, If(can_read, NextState("PRECHARGE_AND_READ") ).Else( NextState("PRECHARGE") ))) fsm.act("PRECHARGE_AND_READ", cmd.eq(PRECHARGE), a[10].eq(1)), fsm.delayed_enter("PRECHARGE_AND_READ", "READ_ACTIVE", tRP) # READ fsm.act("READ_ACTIVE", cmd.eq(ACTIVE), ba.eq(r_bank), a.eq(r_row)) fsm.delayed_enter("READ_ACTIVE", "READ", tRCD) fsm.act("READ", cmd.eq(READ), ba.eq(r_bank), a.eq(r_col), issuing_read.eq(1), If(can_continue_read, NextState("READING") ).Else( NextState("PRECHARGE"))) fsm.act("READING", issuing_read.eq(1), If(~can_continue_read, NextState("PRECHARGE"))) fsm.act("PRECHARGE", cmd.eq(PRECHARGE), a[10].eq(1)), fsm.delayed_enter("PRECHARGE", "IDLE", tRP)
def __init__(self, platform): self.clock_domains.cd_pico = ClockDomain() bus_clk, bus_rst = platform.getBusClkRst() self.comb += [ self.cd_pico.clk.eq(bus_clk), self.cd_pico.rst.eq(bus_rst) ] self.clock_domains.cd_sys = ClockDomain() sys_clk, _, sys_rst, _ = platform.getHMCClkEtc() self.comb += [self.cd_sys.clk.eq(sys_clk), self.cd_sys.rst.eq(sys_rst)] self.clock_domains.cd_pcie = ClockDomain() clk, rst = platform.getStreamClkRst() self.comb += [self.cd_pcie.clk.eq(clk), self.cd_pcie.rst.eq(rst)] self.bus = platform.getBus() addr = Signal(30) control_regs = [Signal(32) for _ in range(4)] base_addr = 0x10000 start_addr = 0x20000 status_regs = [Signal(32) for _ in range(8)] self.sync.pico += [ self.bus.PicoDataOut.eq(0), [ If(self.bus.PicoRd & (self.bus.PicoAddr == base_addr + i * 4), self.bus.PicoDataOut.eq(csr)) for i, csr in enumerate(control_regs + status_regs) ], [ If(self.bus.PicoWr & (self.bus.PicoAddr == base_addr + i * 4), csr.eq(self.bus.PicoDataIn)) for i, csr in enumerate(control_regs) ] ] self.submodules.start = PulseSynchronizer("pico", "sys") self.comb += [ If(self.bus.PicoWr & (self.bus.PicoAddr == start_addr), self.start.i.eq(1)) ] control_regs_sys = [Signal(32) for _ in control_regs] self.submodules.control_regs_transfer = BusSynchronizer( len(control_regs) * 32, "pico", "sys") self.comb += [ self.control_regs_transfer.i.eq(Cat(*control_regs)), Cat(*control_regs_sys).eq(self.control_regs_transfer.o) ] status_regs_sys = [Signal(32) for _ in status_regs] self.submodules.status_regs_transfer = BusSynchronizer( len(status_regs_sys) * 32, "sys", "pico") self.comb += [ self.status_regs_transfer.i.eq(Cat(*status_regs_sys)), Cat(*status_regs).eq(self.status_regs_transfer.o) ] # request memory lookup addresslayout = SimpleNamespace(nodeidsize=32, edgeidsize=32, payloadsize=32) config = SimpleNamespace(platform=platform, addresslayout=addresslayout) self.submodules.neighbors = Neighbors(config=config, pe_id=0, port=platform.getHMCPort(0)) valid = Signal() self.sync += [ If(self.start.o, valid.eq(1)).Elif(self.neighbors.ack, valid.eq(0)) ] self.comb += [ self.neighbors.start_idx.eq(control_regs_sys[0]), self.neighbors.num_neighbors.eq(control_regs_sys[1]), self.neighbors.valid.eq(valid), self.neighbors.barrier_in.eq(0), self.neighbors.message_in.eq(control_regs_sys[2]), self.neighbors.sender_in.eq(control_regs_sys[3]), self.neighbors.round_in.eq(0) ] self.comb += [ status_regs_sys[0].eq(self.neighbors.num_requests_accepted), status_regs_sys[1].eq(self.neighbors.num_hmc_commands_issued), status_regs_sys[2].eq(self.neighbors.num_hmc_commands_retired), status_regs_sys[3].eq(self.neighbors.num_hmc_responses) ] self.sync += [ If(valid, status_regs_sys[4].eq(1)), If(self.neighbors.valid & self.neighbors.ack, status_regs_sys[5].eq(status_regs_sys[5] + 1)), If(self.neighbors.neighbor_valid & self.neighbors.neighbor_ack, status_regs_sys[6].eq(status_regs_sys[6] + 1)), If(self.neighbors.valid & self.neighbors.ack, status_regs_sys[7].eq(self.neighbors.num_neighbors)), ] fifo_depth = 256 self.submodules.read_fifo = ClockDomainsRenamer({ "write": "sys", "read": "pcie" })(AsyncFIFO(128, fifo_depth)) self.comb += [ self.read_fifo.din.eq( Cat(self.neighbors.neighbor, self.neighbors.num_neighbors_out, self.neighbors.message_out, self.neighbors.sender_out)), self.read_fifo.we.eq(self.neighbors.neighbor_valid), self.neighbors.neighbor_ack.eq(self.read_fifo.writable) ] # send data back to host over PCIe rx, tx = platform.getStreamPair() self.comb += [ tx.data.eq(self.read_fifo.dout), tx.valid.eq(self.read_fifo.readable), self.read_fifo.re.eq(tx.rdy) ]
def __init__(self, link_layer, sr_fifo_depth=4): # all interface signals in sys domain unless otherwise specified # standard request interface # # notwrite=1 address=0 buffer space request <destination> # notwrite=1 address=1 read request <channel, timestamp> # # optimized for write throughput # requests are performed on the DRTIO link preserving their order of issue # this is important for buffer space requests, which have to be ordered # wrt writes. self.sr_stb = Signal() self.sr_ack = Signal() self.sr_notwrite = Signal() self.sr_timestamp = Signal(64) self.sr_chan_sel = Signal(24) self.sr_address = Signal(8) self.sr_data = Signal(512) # buffer space reply interface self.buffer_space_not = Signal() self.buffer_space_not_ack = Signal() self.buffer_space = Signal(16) # read reply interface self.read_not = Signal() self.read_not_ack = Signal() # no_event is_overflow # 0 X event # 1 0 timeout # 1 1 overflow self.read_no_event = Signal() self.read_is_overflow = Signal() self.read_data = Signal(32) self.read_timestamp = Signal(64) # echo interface self.echo_stb = Signal() self.echo_ack = Signal() self.echo_sent_now = Signal() # in rtio domain self.echo_received_now = Signal() # in rtio_rx domain # set_time interface self.set_time_stb = Signal() self.set_time_ack = Signal() # in rtio domain, must be valid all time while there is # a set_time request pending self.tsc_value = Signal(64) # rx errors self.err_unknown_packet_type = Signal() self.err_packet_truncated = Signal() # packet counters self.packet_cnt_tx = Signal(32) self.packet_cnt_rx = Signal(32) # # # # RX/TX datapath assert len(link_layer.tx_rt_data) == len(link_layer.rx_rt_data) assert len(link_layer.tx_rt_data) % 8 == 0 ws = len(link_layer.tx_rt_data) tx_plm = get_m2s_layouts(ws) tx_dp = ClockDomainsRenamer("rtio")(TransmitDatapath( link_layer.tx_rt_frame, link_layer.tx_rt_data, tx_plm)) self.submodules += tx_dp rx_plm = get_s2m_layouts(ws) rx_dp = ClockDomainsRenamer("rtio_rx")(ReceiveDatapath( link_layer.rx_rt_frame, link_layer.rx_rt_data, rx_plm)) self.submodules += rx_dp # Write FIFO and extra data count sr_fifo = ClockDomainsRenamer({ "write": "sys", "read": "rtio" })(AsyncFIFO(1 + 64 + 24 + 8 + 512, sr_fifo_depth)) self.submodules += sr_fifo sr_notwrite_d = Signal() sr_timestamp_d = Signal(64) sr_chan_sel_d = Signal(24) sr_address_d = Signal(8) sr_data_d = Signal(512) self.comb += [ sr_fifo.we.eq(self.sr_stb), self.sr_ack.eq(sr_fifo.writable), sr_fifo.din.eq( Cat(self.sr_notwrite, self.sr_timestamp, self.sr_chan_sel, self.sr_address, self.sr_data)), Cat(sr_notwrite_d, sr_timestamp_d, sr_chan_sel_d, sr_address_d, sr_data_d).eq(sr_fifo.dout) ] sr_buf_readable = Signal() sr_buf_re = Signal() self.comb += sr_fifo.re.eq(sr_fifo.readable & (~sr_buf_readable | sr_buf_re)) self.sync.rtio += \ If(sr_fifo.re, sr_buf_readable.eq(1), ).Elif(sr_buf_re, sr_buf_readable.eq(0), ) sr_notwrite = Signal() sr_timestamp = Signal(64) sr_chan_sel = Signal(24) sr_address = Signal(8) sr_extra_data_cnt = Signal(8) sr_data = Signal(512) self.sync.rtio += If(sr_fifo.re, sr_notwrite.eq(sr_notwrite_d), sr_timestamp.eq(sr_timestamp_d), sr_chan_sel.eq(sr_chan_sel_d), sr_address.eq(sr_address_d), sr_data.eq(sr_data_d)) short_data_len = tx_plm.field_length("write", "short_data") sr_extra_data_d = Signal(512) self.comb += sr_extra_data_d.eq(sr_data_d[short_data_len:]) for i in range(512 // ws): self.sync.rtio += If( sr_fifo.re, If(sr_extra_data_d[ws * i:ws * (i + 1)] != 0, sr_extra_data_cnt.eq(i + 1))) sr_extra_data = Signal(512) self.sync.rtio += If(sr_fifo.re, sr_extra_data.eq(sr_extra_data_d)) extra_data_ce = Signal() extra_data_last = Signal() extra_data_counter = Signal(max=512 // ws + 1) self.comb += [ Case( extra_data_counter, { i + 1: tx_dp.raw_data.eq( sr_extra_data[i * ws:(i + 1) * ws]) for i in range(512 // ws) }), extra_data_last.eq(extra_data_counter == sr_extra_data_cnt) ] self.sync.rtio += \ If(extra_data_ce, extra_data_counter.eq(extra_data_counter + 1), ).Else( extra_data_counter.eq(1) ) # CDC buffer_space_not = Signal() buffer_space = Signal(16) self.submodules += CrossDomainNotification("rtio_rx", "sys", buffer_space_not, buffer_space, self.buffer_space_not, self.buffer_space_not_ack, self.buffer_space) set_time_stb = Signal() set_time_ack = Signal() self.submodules += CrossDomainRequest("rtio", self.set_time_stb, self.set_time_ack, None, set_time_stb, set_time_ack, None) echo_stb = Signal() echo_ack = Signal() self.submodules += CrossDomainRequest("rtio", self.echo_stb, self.echo_ack, None, echo_stb, echo_ack, None) read_not = Signal() read_no_event = Signal() read_is_overflow = Signal() read_data = Signal(32) read_timestamp = Signal(64) self.submodules += CrossDomainNotification( "rtio_rx", "sys", read_not, Cat(read_no_event, read_is_overflow, read_data, read_timestamp), self.read_not, self.read_not_ack, Cat(self.read_no_event, self.read_is_overflow, self.read_data, self.read_timestamp)) self.comb += [ read_is_overflow.eq( rx_dp.packet_as["read_reply_noevent"].overflow), read_data.eq(rx_dp.packet_as["read_reply"].data), read_timestamp.eq(rx_dp.packet_as["read_reply"].timestamp) ] err_unknown_packet_type = BlindTransfer("rtio_rx", "sys") err_packet_truncated = BlindTransfer("rtio_rx", "sys") self.submodules += err_unknown_packet_type, err_packet_truncated self.comb += [ self.err_unknown_packet_type.eq(err_unknown_packet_type.o), self.err_packet_truncated.eq(err_packet_truncated.o) ] # TX FSM tx_fsm = ClockDomainsRenamer("rtio")(FSM(reset_state="IDLE")) self.submodules += tx_fsm echo_sent_now = Signal() self.sync.rtio += self.echo_sent_now.eq(echo_sent_now) tsc_value = Signal(64) tsc_value_load = Signal() self.sync.rtio += If(tsc_value_load, tsc_value.eq(self.tsc_value)) tx_fsm.act( "IDLE", # Ensure 2 cycles between frames on the link. NextState("READY")) tx_fsm.act( "READY", If( sr_buf_readable, If( sr_notwrite, Case(sr_address[0], { 0: NextState("BUFFER_SPACE"), 1: NextState("READ") }), ).Else(NextState("WRITE"))).Else( If(echo_stb, echo_sent_now.eq(1), NextState("ECHO")).Elif(set_time_stb, tsc_value_load.eq(1), NextState("SET_TIME")))) tx_fsm.act( "WRITE", tx_dp.send("write", timestamp=sr_timestamp, chan_sel=sr_chan_sel, address=sr_address, extra_data_cnt=sr_extra_data_cnt, short_data=sr_data[:short_data_len]), If( tx_dp.packet_last, If(sr_extra_data_cnt == 0, sr_buf_re.eq(1), NextState("IDLE")).Else(NextState("WRITE_EXTRA")))) tx_fsm.act("WRITE_EXTRA", tx_dp.raw_stb.eq(1), extra_data_ce.eq(1), If(extra_data_last, sr_buf_re.eq(1), NextState("IDLE"))) tx_fsm.act( "BUFFER_SPACE", tx_dp.send("buffer_space_request", destination=sr_chan_sel[16:]), If(tx_dp.packet_last, sr_buf_re.eq(1), NextState("IDLE"))) tx_fsm.act( "READ", tx_dp.send("read_request", chan_sel=sr_chan_sel, timeout=sr_timestamp), If(tx_dp.packet_last, sr_buf_re.eq(1), NextState("IDLE"))) tx_fsm.act("ECHO", tx_dp.send("echo_request"), If(tx_dp.packet_last, echo_ack.eq(1), NextState("IDLE"))) tx_fsm.act( "SET_TIME", tx_dp.send("set_time", timestamp=tsc_value), If(tx_dp.packet_last, set_time_ack.eq(1), NextState("IDLE"))) # RX FSM rx_fsm = ClockDomainsRenamer("rtio_rx")(FSM(reset_state="INPUT")) self.submodules += rx_fsm ongoing_packet_next = Signal() ongoing_packet = Signal() self.sync.rtio_rx += ongoing_packet.eq(ongoing_packet_next) echo_received_now = Signal() self.sync.rtio_rx += self.echo_received_now.eq(echo_received_now) rx_fsm.act( "INPUT", If( rx_dp.frame_r, rx_dp.packet_buffer_load.eq(1), If( rx_dp.packet_last, Case( rx_dp.packet_type, { rx_plm.types["echo_reply"]: echo_received_now.eq(1), rx_plm.types["buffer_space_reply"]: NextState("BUFFER_SPACE"), rx_plm.types["read_reply"]: NextState("READ_REPLY"), rx_plm.types["read_reply_noevent"]: NextState("READ_REPLY_NOEVENT"), "default": err_unknown_packet_type.i.eq(1) })).Else(ongoing_packet_next.eq(1))), If(~rx_dp.frame_r & ongoing_packet, err_packet_truncated.i.eq(1))) rx_fsm.act( "BUFFER_SPACE", buffer_space_not.eq(1), buffer_space.eq(rx_dp.packet_as["buffer_space_reply"].space), NextState("INPUT")) rx_fsm.act("READ_REPLY", read_not.eq(1), read_no_event.eq(0), NextState("INPUT")) rx_fsm.act("READ_REPLY_NOEVENT", read_not.eq(1), read_no_event.eq(1), NextState("INPUT")) # packet counters tx_frame_r = Signal() packet_cnt_tx = Signal(32) self.sync.rtio += [ tx_frame_r.eq(link_layer.tx_rt_frame), If(link_layer.tx_rt_frame & ~tx_frame_r, packet_cnt_tx.eq(packet_cnt_tx + 1)) ] cdc_packet_cnt_tx = GrayCodeTransfer(32) self.submodules += cdc_packet_cnt_tx self.comb += [ cdc_packet_cnt_tx.i.eq(packet_cnt_tx), self.packet_cnt_tx.eq(cdc_packet_cnt_tx.o) ] rx_frame_r = Signal() packet_cnt_rx = Signal(32) self.sync.rtio_rx += [ rx_frame_r.eq(link_layer.rx_rt_frame), If(link_layer.rx_rt_frame & ~rx_frame_r, packet_cnt_rx.eq(packet_cnt_rx + 1)) ] cdc_packet_cnt_rx = ClockDomainsRenamer({"rtio": "rtio_rx" })(GrayCodeTransfer(32)) self.submodules += cdc_packet_cnt_rx self.comb += [ cdc_packet_cnt_rx.i.eq(packet_cnt_rx), self.packet_cnt_rx.eq(cdc_packet_cnt_rx.o) ]
def __init__(self, rbus, counter, fine_ts_width, fifo_depth): self.sel = Signal(max=len(rbus)) self.timestamp = Signal(counter.width + fine_ts_width) self.value = Signal() self.readable = Signal() self.re = Signal() self.overflow = Signal() self.overflow_reset = Signal() self.pileup_count = Signal(16) self.pileup_reset = Signal() # # # timestamps = [] values = [] readables = [] overflows = [] pileup_counts = [] ev_layout = [("timestamp", counter.width + fine_ts_width), ("value", 1)] for n, chif in enumerate(rbus): if hasattr(chif, "oe"): sensitivity = Signal(2) self.sync.rio += If(~chif.oe & chif.o_stb, sensitivity.eq(chif.o_value)) fifo = RenameClockDomains(AsyncFIFO(ev_layout, fifo_depth), { "read": "rsys", "write": "rio" }) self.submodules += fifo # FIFO write if fine_ts_width: full_ts = Cat(chif.i_fine_ts, counter.i_value_rio) else: full_ts = counter.i_value_rio self.comb += [ fifo.din.timestamp.eq(full_ts), fifo.din.value.eq(chif.i_value), fifo.we.eq(~chif.oe & chif.i_stb & ((chif.i_value & sensitivity[0]) | (~chif.i_value & sensitivity[1]))) ] # FIFO read timestamps.append(fifo.dout.timestamp) values.append(fifo.dout.value) readables.append(fifo.readable) self.comb += fifo.re.eq(self.re & (self.sel == n)) overflow = Signal() overflow_reset_sync = PulseSynchronizer("rsys", "rio") self.submodules += overflow_reset_sync self.comb += overflow_reset_sync.i.eq(self.overflow_reset & (self.sel == n)) self.sync.rio += [ If(overflow_reset_sync.o, overflow.eq(0)), If(fifo.we & ~fifo.writable, overflow.eq(1)) ] overflow_sys = Signal() self.specials += MultiReg(overflow, overflow_sys, "rsys") overflows.append(overflow_sys) pileup_count = Signal(16) pileup_count_reset_sync = PulseSynchronizer("rsys", "rio") self.submodules += pileup_count_reset_sync self.comb += pileup_count_reset_sync.i.eq(self.pileup_reset & (self.sel == n)) self.sync.rio += \ If(pileup_count_reset_sync.o, pileup_count.eq(0) ).Elif(chif.i_pileup, If(pileup_count != 2**16 - 1, # saturate pileup_count.eq(pileup_count + 1) ) ) pileup_count_sync = _GrayCodeTransfer(16) self.submodules += pileup_count_sync self.comb += pileup_count_sync.i.eq(pileup_count) pileup_counts.append(pileup_count_sync.o) else: timestamps.append(0) values.append(0) readables.append(0) overflows.append(0) pileup_counts.append(0) self.comb += [ self.timestamp.eq(Array(timestamps)[self.sel]), self.value.eq(Array(values)[self.sel]), self.readable.eq(Array(readables)[self.sel]), self.overflow.eq(Array(overflows)[self.sel]), self.pileup_count.eq(Array(pileup_counts)[self.sel]) ]