def __init__(self, tsc, channels, lane_count=8, fifo_depth=128): self.cri = cri.Interface() self.async_errors = Record(async_errors_layout) chan_fine_ts_width = max( max( rtlink.get_fine_ts_width(channel.interface.o) for channel in channels), max( rtlink.get_fine_ts_width(channel.interface.i) for channel in channels)) assert tsc.glbl_fine_ts_width >= chan_fine_ts_width self.submodules.outputs = ClockDomainsRenamer("rio")(SED( channels, tsc.glbl_fine_ts_width, "sync", lane_count=lane_count, fifo_depth=fifo_depth, enable_spread=False, report_buffer_space=True, interface=self.cri)) self.comb += self.outputs.coarse_timestamp.eq(tsc.coarse_ts) self.sync.rtio += self.outputs.minimum_coarse_timestamp.eq( tsc.coarse_ts + 16) self.submodules.inputs = ClockDomainsRenamer("rio")(InputCollector( tsc, channels, "sync", interface=self.cri)) for attr, _ in async_errors_layout: self.comb += getattr(self.async_errors, attr).eq(getattr(self.outputs, attr))
def __init__(self): self.underflow = CSR() self.error_channel = CSRStatus(24) self.error_timestamp = CSRStatus(64) self.error_address = CSRStatus(16) self.sink = stream.Endpoint(record_layout) self.cri = cri.Interface() self.busy = Signal() # # # underflow_trigger = Signal() self.sync += [ If(underflow_trigger, self.underflow.w.eq(1), self.error_channel.status.eq(self.sink.channel), self.error_timestamp.status.eq(self.sink.timestamp), self.error_address.status.eq(self.sink.address)), If(self.underflow.re, self.underflow.w.eq(0)) ] self.comb += [ self.cri.chan_sel.eq(self.sink.channel), self.cri.timestamp.eq(self.sink.timestamp), self.cri.o_address.eq(self.sink.address), self.cri.o_data.eq(self.sink.data) ] fsm = FSM(reset_state="IDLE") self.submodules += fsm fsm.act( "IDLE", If( ~self.underflow.w, If( self.sink.stb, If( self.sink.eop, # last packet contains dummy data, discard it self.sink.ack.eq(1)).Else(NextState("WRITE")))).Else( # discard all data until errors are acked self.sink.ack.eq(1))) fsm.act("WRITE", self.busy.eq(1), self.cri.cmd.eq(cri.commands["write"]), NextState("CHECK_STATE")) fsm.act( "CHECK_STATE", self.busy.eq(1), If(self.cri.o_status == 0, self.sink.ack.eq(1), NextState("IDLE")), If(self.cri.o_status[1], NextState("UNDERFLOW"))) fsm.act("UNDERFLOW", self.busy.eq(1), underflow_trigger.eq(1), self.sink.ack.eq(1), NextState("IDLE"))
def __init__(self, channels, glbl_fine_ts_width, mode, quash_channels=[], interface=None): if interface is None: interface = cri.Interface() self.cri = interface self.coarse_timestamp = Signal(64 - glbl_fine_ts_width) # # # if mode == "sync": fifo_factory = SyncFIFOBuffered sync_io = self.sync sync_cri = self.sync elif mode == "async": fifo_factory = lambda *args: ClockDomainsRenamer({ "write": "rio", "read": "rsys" })(AsyncFIFO(*args)) sync_io = self.sync.rio sync_cri = self.sync.rsys else: raise ValueError i_statuses, i_datas, i_timestamps = [], [], [] i_ack = Signal() sel = self.cri.chan_sel[:16] for n, channel in enumerate(channels): iif = channel.interface.i if iif is None or n in quash_channels: i_datas.append(0) i_timestamps.append(0) i_statuses.append(0) continue # FIFO layout = get_channel_layout(len(self.coarse_timestamp), iif) fifo = fifo_factory(layout_len(layout), channel.ififo_depth) self.submodules += fifo fifo_in = Record(layout) fifo_out = Record(layout) self.comb += [ fifo.din.eq(fifo_in.raw_bits()), fifo_out.raw_bits().eq(fifo.dout) ] # FIFO write if iif.delay: counter_rtio = Signal.like(self.coarse_timestamp, reset_less=True) sync_io += counter_rtio.eq(self.coarse_timestamp - (iif.delay + 1)) else: counter_rtio = self.coarse_timestamp if hasattr(fifo_in, "data"): self.comb += fifo_in.data.eq(iif.data) if hasattr(fifo_in, "timestamp"): if hasattr(iif, "fine_ts"): full_ts = Cat(iif.fine_ts, counter_rtio) else: full_ts = counter_rtio self.comb += fifo_in.timestamp.eq(full_ts) self.comb += fifo.we.eq(iif.stb) overflow_io = Signal() self.comb += overflow_io.eq(fifo.we & ~fifo.writable) if mode == "sync": overflow_trigger = overflow_io elif mode == "async": overflow_transfer = BlindTransfer() self.submodules += overflow_transfer self.comb += overflow_transfer.i.eq(overflow_io) overflow_trigger = overflow_transfer.o else: raise ValueError # FIFO read, CRI connection if hasattr(fifo_out, "data"): i_datas.append(fifo_out.data) else: i_datas.append(0) if hasattr(fifo_out, "timestamp"): ts_shift = 64 - len(fifo_out.timestamp) i_timestamps.append(fifo_out.timestamp << ts_shift) else: i_timestamps.append(0) selected = Signal() self.comb += selected.eq(sel == n) overflow = Signal() sync_cri += [ If(selected & i_ack, overflow.eq(0)), If(overflow_trigger, overflow.eq(1)) ] self.comb += fifo.re.eq(selected & i_ack & ~overflow) i_statuses.append(Cat(fifo.readable & ~overflow, overflow)) i_status_raw = Signal(2) self.comb += i_status_raw.eq(Array(i_statuses)[sel]) input_timeout = Signal.like(self.cri.timestamp, reset_less=True) input_pending = Signal() self.cri.i_data.reset_less = True self.cri.i_timestamp.reset_less = True sync_cri += [ i_ack.eq(0), If( i_ack, self.cri.i_status.eq(Cat(~i_status_raw[0], i_status_raw[1], 0)), self.cri.i_data.eq(Array(i_datas)[sel]), self.cri.i_timestamp.eq(Array(i_timestamps)[sel]), ), If((self.cri.counter >= input_timeout) | (i_status_raw != 0), If(input_pending, i_ack.eq(1)), input_pending.eq(0)), If(self.cri.cmd == cri.commands["read"], input_timeout.eq(self.cri.timestamp), input_pending.eq(1), self.cri.i_status.eq(0b100)) ]
def __init__(self, tsc, link_layer): # in rtio domain self.reset = Signal() # CRI target interface in rtio domain self.cri = cri.Interface() # in rtio_rx domain self.err_unknown_packet_type = Signal() self.err_packet_truncated = Signal() # in rtio domain self.err_command_missed = Signal() self.command_missed_cmd = Signal(2) self.command_missed_chan_sel = Signal(24) self.err_buffer_space_timeout = Signal() self.buffer_space_destination = Signal(8) # set_time interface, in rtio domain self.set_time_stb = Signal() self.set_time_ack = Signal() # # # # RX/TX datapath assert len(link_layer.tx_rt_data) == len(link_layer.rx_rt_data) assert len(link_layer.tx_rt_data) % 8 == 0 ws = len(link_layer.tx_rt_data) tx_plm = get_m2s_layouts(ws) tx_dp = ClockDomainsRenamer("rtio")(TransmitDatapath( link_layer.tx_rt_frame, link_layer.tx_rt_data, tx_plm)) self.submodules += tx_dp rx_plm = get_s2m_layouts(ws) rx_dp = ClockDomainsRenamer("rtio_rx")(ReceiveDatapath( link_layer.rx_rt_frame, link_layer.rx_rt_data, rx_plm)) self.submodules += rx_dp # TSC sync tsc_value = Signal(64) tsc_value_load = Signal() self.sync.rtio += If(tsc_value_load, tsc_value.eq(tsc.coarse_ts)) # CRI buffer stage 1 cb0_loaded = Signal() cb0_ack = Signal() cb0_cmd = Signal(2) cb0_timestamp = Signal(64) cb0_chan_sel = Signal(24) cb0_o_address = Signal(8) cb0_o_data = Signal(512) self.sync.rtio += [ If(self.reset | cb0_ack, cb0_loaded.eq(0), cb0_cmd.eq(cri.commands["nop"])), If( ~self.reset & ~cb0_loaded & (self.cri.cmd != cri.commands["nop"]), cb0_loaded.eq(1), cb0_cmd.eq(self.cri.cmd), If(self.cri.cmd == cri.commands["read"], cb0_timestamp.eq(self.cri.i_timeout)).Else( cb0_timestamp.eq(self.cri.o_timestamp)), cb0_chan_sel.eq(self.cri.chan_sel), cb0_o_address.eq(self.cri.o_address), cb0_o_data.eq(self.cri.o_data)), self.err_command_missed.eq(cb0_loaded & (self.cri.cmd != cri.commands["nop"])), self.command_missed_chan_sel.eq(self.cri.chan_sel), self.command_missed_cmd.eq(self.cri.cmd) ] # CRI buffer stage 2 and write data slicer cb_loaded = Signal() cb_ack = Signal() cb_cmd = Signal(2) cb_timestamp = Signal(64) cb_chan_sel = Signal(24) cb_o_address = Signal(8) cb_o_data = Signal(512) self.sync.rtio += [ If(self.reset | cb_ack, cb_loaded.eq(0), cb_cmd.eq(cri.commands["nop"])), If(~self.reset & ~cb_loaded & cb0_loaded, cb_loaded.eq(1), cb_cmd.eq(cb0_cmd), cb_timestamp.eq(cb0_timestamp), cb_chan_sel.eq(cb0_chan_sel), cb_o_address.eq(cb0_o_address), cb_o_data.eq(cb0_o_data)) ] self.comb += cb0_ack.eq(~cb_loaded) wb_extra_data_cnt = Signal(8) short_data_len = tx_plm.field_length("write", "short_data") wb_extra_data_a = Signal(512) self.comb += wb_extra_data_a.eq(self.cri.o_data[short_data_len:]) for i in range(512 // ws): self.sync.rtio += If( self.cri.cmd == cri.commands["write"], If(wb_extra_data_a[ws * i:ws * (i + 1)] != 0, wb_extra_data_cnt.eq(i + 1))) wb_extra_data = Signal(512) self.sync.rtio += If(self.cri.cmd == cri.commands["write"], wb_extra_data.eq(wb_extra_data_a)) extra_data_ce = Signal() extra_data_last = Signal() extra_data_counter = Signal(max=512 // ws + 1) self.comb += [ Case( extra_data_counter, { i + 1: tx_dp.raw_data.eq( wb_extra_data[i * ws:(i + 1) * ws]) for i in range(512 // ws) }), extra_data_last.eq(extra_data_counter == wb_extra_data_cnt) ] self.sync.rtio += \ If(extra_data_ce, extra_data_counter.eq(extra_data_counter + 1), ).Else( extra_data_counter.eq(1) ) # Buffer space self.sync.rtio += If( self.cri.cmd == cri.commands["get_buffer_space"], self.buffer_space_destination.eq(self.cri.chan_sel[16:])) rx_buffer_space_not = Signal() rx_buffer_space = Signal(16) buffer_space_not = Signal() buffer_space_not_ack = Signal() self.submodules += CrossDomainNotification( "rtio_rx", "rtio", rx_buffer_space_not, rx_buffer_space, buffer_space_not, buffer_space_not_ack, self.cri.o_buffer_space) timeout_counter = ClockDomainsRenamer("rtio")(WaitTimer(8191)) self.submodules += timeout_counter # Read read_not = Signal() read_no_event = Signal() read_is_overflow = Signal() read_data = Signal(32) read_timestamp = Signal(64) rtio_read_not = Signal() rtio_read_not_ack = Signal() rtio_read_no_event = Signal() rtio_read_is_overflow = Signal() rtio_read_data = Signal(32) rtio_read_timestamp = Signal(64) self.submodules += CrossDomainNotification( "rtio_rx", "rtio", read_not, Cat(read_no_event, read_is_overflow, read_data, read_timestamp), rtio_read_not, rtio_read_not_ack, Cat(rtio_read_no_event, rtio_read_is_overflow, rtio_read_data, rtio_read_timestamp)) self.comb += [ read_is_overflow.eq( rx_dp.packet_as["read_reply_noevent"].overflow), read_data.eq(rx_dp.packet_as["read_reply"].data), read_timestamp.eq(rx_dp.packet_as["read_reply"].timestamp) ] # input status i_status_wait_event = Signal() i_status_overflow = Signal() self.comb += self.cri.i_status.eq( Cat(i_status_wait_event, i_status_overflow, cb0_loaded | cb_loaded)) load_read_reply = Signal() self.sync.rtio += [ If( load_read_reply, i_status_wait_event.eq(0), i_status_overflow.eq(0), If( rtio_read_no_event, If(rtio_read_is_overflow, i_status_overflow.eq(1)).Else( i_status_wait_event.eq(1))), self.cri.i_data.eq(rtio_read_data), self.cri.i_timestamp.eq(rtio_read_timestamp)) ] # TX and CRI FSM tx_fsm = ClockDomainsRenamer("rtio")(FSM(reset_state="IDLE")) self.submodules += tx_fsm tx_fsm.act( "IDLE", # Ensure 2 cycles between frames on the link. NextState("READY")) tx_fsm.act( "READY", If(self.set_time_stb, tsc_value_load.eq(1), NextState("SET_TIME")).Else( If(cb_cmd == cri.commands["write"], NextState("WRITE")), If(cb_cmd == cri.commands["get_buffer_space"], NextState("BUFFER_SPACE")), If(cb_cmd == cri.commands["read"], NextState("READ")))) tx_fsm.act( "SET_TIME", tx_dp.send("set_time", timestamp=tsc_value), If(tx_dp.packet_last, self.set_time_ack.eq(1), NextState("IDLE"))) tx_fsm.act( "WRITE", tx_dp.send("write", timestamp=cb_timestamp, chan_sel=cb_chan_sel, address=cb_o_address, extra_data_cnt=wb_extra_data_cnt, short_data=cb_o_data[:short_data_len]), If( tx_dp.packet_last, If(wb_extra_data_cnt == 0, cb_ack.eq(1), NextState("IDLE")).Else(NextState("WRITE_EXTRA")))) tx_fsm.act("WRITE_EXTRA", tx_dp.raw_stb.eq(1), extra_data_ce.eq(1), If(extra_data_last, cb_ack.eq(1), NextState("IDLE"))) tx_fsm.act( "BUFFER_SPACE", tx_dp.send("buffer_space_request", destination=self.buffer_space_destination), If(tx_dp.packet_last, buffer_space_not_ack.eq(1), NextState("GET_BUFFER_SPACE_REPLY"))) tx_fsm.act( "GET_BUFFER_SPACE_REPLY", timeout_counter.wait.eq(1), If(timeout_counter.done, self.err_buffer_space_timeout.eq(1), cb_ack.eq(1), NextState("READY")).Else( If(buffer_space_not, self.cri.o_buffer_space_valid.eq(1), cb_ack.eq(1), NextState("READY")), )) tx_fsm.act( "READ", tx_dp.send("read_request", chan_sel=cb_chan_sel, timeout=cb_timestamp), rtio_read_not_ack.eq(1), If(tx_dp.packet_last, NextState("GET_READ_REPLY"))) tx_fsm.act( "GET_READ_REPLY", rtio_read_not_ack.eq(1), If(self.reset | rtio_read_not, load_read_reply.eq(1), cb_ack.eq(1), NextState("READY"))) # RX FSM rx_fsm = ClockDomainsRenamer("rtio_rx")(FSM(reset_state="INPUT")) self.submodules += rx_fsm ongoing_packet_next = Signal() ongoing_packet = Signal() self.sync.rtio_rx += ongoing_packet.eq(ongoing_packet_next) rx_fsm.act( "INPUT", If( rx_dp.frame_r, rx_dp.packet_buffer_load.eq(1), If( rx_dp.packet_last, Case( rx_dp.packet_type, { rx_plm.types["buffer_space_reply"]: NextState("BUFFER_SPACE"), rx_plm.types["read_reply"]: NextState("READ_REPLY"), rx_plm.types["read_reply_noevent"]: NextState("READ_REPLY_NOEVENT"), "default": self.err_unknown_packet_type.eq(1) })).Else(ongoing_packet_next.eq(1))), If(~rx_dp.frame_r & ongoing_packet, self.err_packet_truncated.eq(1))) rx_fsm.act( "BUFFER_SPACE", rx_buffer_space_not.eq(1), rx_buffer_space.eq(rx_dp.packet_as["buffer_space_reply"].space), NextState("INPUT")) rx_fsm.act("READ_REPLY", read_not.eq(1), read_no_event.eq(0), NextState("INPUT")) rx_fsm.act("READ_REPLY_NOEVENT", read_not.eq(1), read_no_event.eq(1), NextState("INPUT"))
def __init__(self, channels, fine_ts_width=None, guard_io_cycles=20): if fine_ts_width is None: fine_ts_width = max( rtlink.get_fine_ts_width(c.interface) for c in channels) self.cri = cri.Interface() self.reset = CSR() self.reset_phy = CSR() self.comb += self.cri.arb_gnt.eq(1) # Clocking/Reset # Create rsys, rio and rio_phy domains based on sys and rtio # with reset controlled by CRI. cmd_reset = Signal(reset=1) cmd_reset_phy = Signal(reset=1) self.sync += [ cmd_reset.eq(self.reset.re), cmd_reset_phy.eq(self.reset_phy.re) ] cmd_reset.attr.add("no_retiming") cmd_reset_phy.attr.add("no_retiming") self.clock_domains.cd_rsys = ClockDomain() self.clock_domains.cd_rio = ClockDomain() self.clock_domains.cd_rio_phy = ClockDomain() self.comb += [ self.cd_rsys.clk.eq(ClockSignal()), self.cd_rsys.rst.eq(cmd_reset) ] self.comb += self.cd_rio.clk.eq(ClockSignal("rtio")) self.specials += AsyncResetSynchronizer(self.cd_rio, cmd_reset) self.comb += self.cd_rio_phy.clk.eq(ClockSignal("rtio")) self.specials += AsyncResetSynchronizer(self.cd_rio_phy, cmd_reset_phy) # Managers self.submodules.counter = RTIOCounter( len(self.cri.o_timestamp) - fine_ts_width) i_datas, i_timestamps = [], [] o_statuses, i_statuses = [], [] sel = self.cri.chan_sel[:16] for n, channel in enumerate(channels): if isinstance(channel, LogChannel): i_datas.append(0) i_timestamps.append(0) i_statuses.append(0) continue selected = Signal() self.comb += selected.eq(sel == n) o_manager = _OutputManager(channel.interface.o, self.counter, channel.ofifo_depth, guard_io_cycles) self.submodules += o_manager if hasattr(o_manager.ev, "data"): self.comb += o_manager.ev.data.eq(self.cri.o_data) if hasattr(o_manager.ev, "address"): self.comb += o_manager.ev.address.eq(self.cri.o_address) ts_shift = len(self.cri.o_timestamp) - len(o_manager.ev.timestamp) self.comb += o_manager.ev.timestamp.eq( self.cri.o_timestamp[ts_shift:]) self.comb += o_manager.we.eq( selected & (self.cri.cmd == cri.commands["write"])) underflow = Signal() sequence_error = Signal() collision = Signal() busy = Signal() self.sync.rsys += [ If(self.cri.cmd == cri.commands["o_underflow_reset"], underflow.eq(0)), If(self.cri.cmd == cri.commands["o_sequence_error_reset"], sequence_error.eq(0)), If(self.cri.cmd == cri.commands["o_collision_reset"], collision.eq(0)), If(self.cri.cmd == cri.commands["o_busy_reset"], busy.eq(0)), If(o_manager.underflow, underflow.eq(1)), If(o_manager.sequence_error, sequence_error.eq(1)), If(o_manager.collision, collision.eq(1)), If(o_manager.busy, busy.eq(1)) ] o_statuses.append( Cat(~o_manager.writable, underflow, sequence_error, collision, busy)) if channel.interface.i is not None: i_manager = _InputManager(channel.interface.i, self.counter, channel.ififo_depth) self.submodules += i_manager if hasattr(i_manager.ev, "data"): i_datas.append(i_manager.ev.data) else: i_datas.append(0) if channel.interface.i.timestamped: ts_shift = (len(self.cri.i_timestamp) - len(i_manager.ev.timestamp)) i_timestamps.append(i_manager.ev.timestamp << ts_shift) else: i_timestamps.append(0) self.comb += i_manager.re.eq( selected & (self.cri.cmd == cri.commands["read"])) overflow = Signal() self.sync.rsys += [ If( selected & (self.cri.cmd == cri.commands["i_overflow_reset"]), overflow.eq(0)), If(i_manager.overflow, overflow.eq(1)) ] i_statuses.append(Cat(~i_manager.readable, overflow)) else: i_datas.append(0) i_timestamps.append(0) i_statuses.append(0) self.comb += [ self.cri.i_data.eq(Array(i_datas)[sel]), self.cri.i_timestamp.eq(Array(i_timestamps)[sel]), self.cri.o_status.eq(Array(o_statuses)[sel]), self.cri.i_status.eq(Array(i_statuses)[sel]) ] self.comb += self.cri.counter.eq( self.counter.value_sys << fine_ts_width)
def __init__(self): self.arb_req = CSRStorage() self.arb_gnt = CSRStatus() self.error_status = CSRStatus(5) # same encoding as RTIO status self.error_underflow_reset = CSR() self.error_sequence_error_reset = CSR() self.error_collision_reset = CSR() self.error_busy_reset = CSR() self.error_channel = CSRStatus(24) self.error_timestamp = CSRStatus(64) self.error_address = CSRStatus(16) self.sink = stream.Endpoint(record_layout) self.cri = cri.Interface() self.busy = Signal() # # # self.comb += [ self.cri.arb_req.eq(self.arb_req.storage), self.arb_gnt.status.eq(self.cri.arb_gnt) ] error_set = Signal(4) for i, rcsr in enumerate([ self.error_underflow_reset, self.error_sequence_error_reset, self.error_collision_reset, self.error_busy_reset ]): # bit 0 is RTIO wait and always 0 here bit = i + 1 self.sync += [ If(error_set[i], self.error_status.status[bit].eq(1), self.error_channel.status.eq(self.sink.channel), self.error_timestamp.status.eq(self.sink.timestamp), self.error_address.status.eq(self.sink.address)), If(rcsr.re, self.error_status.status[bit].eq(0)) ] self.comb += [ self.cri.chan_sel.eq(self.sink.channel), self.cri.o_timestamp.eq(self.sink.timestamp), self.cri.o_address.eq(self.sink.address), self.cri.o_data.eq(self.sink.data) ] fsm = FSM(reset_state="IDLE") self.submodules += fsm fsm.act( "IDLE", If( self.error_status.status == 0, If( self.sink.stb, If( self.sink.eop, # last packet contains dummy data, discard it self.sink.ack.eq(1)).Else(NextState("WRITE")))).Else( # discard all data until errors are acked self.sink.ack.eq(1))) fsm.act("WRITE", self.busy.eq(1), self.cri.cmd.eq(cri.commands["write"]), NextState("CHECK_STATE")) fsm.act( "CHECK_STATE", self.busy.eq(1), If(self.cri.o_status == 0, self.sink.ack.eq(1), NextState("IDLE")), If(self.cri.o_status[1], NextState("UNDERFLOW")), If(self.cri.o_status[2], NextState("SEQUENCE_ERROR")), If(self.cri.o_status[3], NextState("COLLISION")), If(self.cri.o_status[4], NextState("BUSY"))) for n, name in enumerate( ["UNDERFLOW", "SEQUENCE_ERROR", "COLLISION", "BUSY"]): fsm.act( name, self.busy.eq(1), error_set.eq(1 << n), self.cri.cmd.eq(cri.commands["o_" + name.lower() + "_reset"]), self.sink.ack.eq(1), NextState("IDLE"))
def __init__(self, tsc, channels, lane_count=8, fifo_depth=128): self.cri = cri.Interface() self.reset = CSR() self.reset_phy = CSR() self.async_error = CSR(3) self.collision_channel = CSRStatus(16) self.busy_channel = CSRStatus(16) self.sequence_error_channel = CSRStatus(16) # Clocking/Reset # Create rsys, rio and rio_phy domains based on sys and rtio # with reset controlled by CSR. # # The `rio` CD contains logic that is reset with `core.reset()`. # That's state that could unduly affect subsequent experiments, # i.e. input overflows caused by input gates left open, FIFO events far # in the future blocking the experiment, pending RTIO or # wishbone bus transactions, etc. # The `rio_phy` CD contains state that is maintained across # `core.reset()`, i.e. TTL output state, OE, DDS state. cmd_reset = Signal(reset=1) cmd_reset_phy = Signal(reset=1) self.sync += [ cmd_reset.eq(self.reset.re), cmd_reset_phy.eq(self.reset_phy.re) ] cmd_reset.attr.add("no_retiming") cmd_reset_phy.attr.add("no_retiming") self.clock_domains.cd_rsys = ClockDomain() self.clock_domains.cd_rio = ClockDomain() self.clock_domains.cd_rio_phy = ClockDomain() self.comb += [ self.cd_rsys.clk.eq(ClockSignal()), self.cd_rsys.rst.eq(cmd_reset), self.cd_rio.clk.eq(ClockSignal("rtio")), self.cd_rio_phy.clk.eq(ClockSignal("rtio")) ] self.specials += AsyncResetSynchronizer(self.cd_rio, cmd_reset) self.specials += AsyncResetSynchronizer(self.cd_rio_phy, cmd_reset_phy) # TSC chan_fine_ts_width = max( max( rtlink.get_fine_ts_width(channel.interface.o) for channel in channels), max( rtlink.get_fine_ts_width(channel.interface.i) for channel in channels)) assert tsc.glbl_fine_ts_width >= chan_fine_ts_width # Outputs/Inputs quash_channels = [ n for n, c in enumerate(channels) if isinstance(c, LogChannel) ] outputs = SED(channels, tsc.glbl_fine_ts_width, "async", quash_channels=quash_channels, lane_count=lane_count, fifo_depth=fifo_depth, interface=self.cri) self.submodules += outputs self.comb += outputs.coarse_timestamp.eq(tsc.coarse_ts) self.sync += outputs.minimum_coarse_timestamp.eq(tsc.coarse_ts_sys + 16) inputs = InputCollector(tsc, channels, "async", quash_channels=quash_channels, interface=self.cri) self.submodules += inputs # Asychronous output errors o_collision_sync = BlindTransfer(data_width=16) o_busy_sync = BlindTransfer(data_width=16) self.submodules += o_collision_sync, o_busy_sync o_collision = Signal() o_busy = Signal() o_sequence_error = Signal() self.sync += [ If( self.async_error.re, If(self.async_error.r[0], o_collision.eq(0)), If(self.async_error.r[1], o_busy.eq(0)), If(self.async_error.r[2], o_sequence_error.eq(0)), ), If( o_collision_sync.o, o_collision.eq(1), If(~o_collision, self.collision_channel.status.eq(o_collision_sync.data_o))), If(o_busy_sync.o, o_busy.eq(1), If(~o_busy, self.busy_channel.status.eq(o_busy_sync.data_o))), If( outputs.sequence_error, o_sequence_error.eq(1), If( ~o_sequence_error, self.sequence_error_channel.status.eq( outputs.sequence_error_channel))) ] self.comb += self.async_error.w.eq( Cat(o_collision, o_busy, o_sequence_error)) self.comb += [ o_collision_sync.i.eq(outputs.collision), o_collision_sync.data_i.eq(outputs.collision_channel), o_busy_sync.i.eq(outputs.busy), o_busy_sync.data_i.eq(outputs.busy_channel) ]
def __init__(self, lane_count, seqn_width, layout_payload, compensation, glbl_fine_ts_width, enable_spread=True, quash_channels=[], interface=None): if lane_count & (lane_count - 1): raise NotImplementedError("lane count must be a power of 2") if interface is None: interface = cri.Interface() self.cri = interface self.sequence_error = Signal() self.sequence_error_channel = Signal(16, reset_less=True) # The minimum timestamp that an event must have to avoid triggering # an underflow, at the time when the CRI write happens, and to a channel # with zero latency compensation. This is synchronous to the system clock # domain. us_timestamp_width = 64 - glbl_fine_ts_width self.minimum_coarse_timestamp = Signal(us_timestamp_width) self.output = [ Record(layouts.fifo_ingress(seqn_width, layout_payload)) for _ in range(lane_count) ] # # # o_status_wait = Signal() o_status_underflow = Signal() self.comb += self.cri.o_status.eq( Cat(o_status_wait, o_status_underflow)) # The core keeps writing events into the current lane as long as timestamps # (after compensation) are strictly increasing, otherwise it switches to # the next lane. # If spread is enabled, it also switches to the next lane after the current # lane has been full, in order to maximize lane utilization. # The current lane is called lane "A". The next lane (which may be chosen # at a later stage by the core) is called lane "B". # Computations for both lanes are prepared in advance to increase performance. current_lane = Signal(max=lane_count) # The last coarse timestamp received from the CRI, after compensation. # Used to determine when to switch lanes. last_coarse_timestamp = Signal(us_timestamp_width) # The last coarse timestamp written to each lane. Used to detect # sequence errors. last_lane_coarse_timestamps = Array( Signal(us_timestamp_width) for _ in range(lane_count)) # Sequence number counter. The sequence number is used to determine which # event wins during a replace. seqn = Signal(seqn_width) # distribute data to lanes for lio in self.output: self.comb += [ lio.seqn.eq(seqn), lio.payload.channel.eq(self.cri.chan_sel[:16]), lio.payload.timestamp.eq(self.cri.timestamp), ] if hasattr(lio.payload, "address"): self.comb += lio.payload.address.eq(self.cri.o_address) if hasattr(lio.payload, "data"): self.comb += lio.payload.data.eq(self.cri.o_data) # when timestamp and channel arrive in cycle #1, prepare computations coarse_timestamp = Signal(us_timestamp_width) self.comb += coarse_timestamp.eq( self.cri.timestamp[glbl_fine_ts_width:]) min_minus_timestamp = Signal((us_timestamp_width + 1, True), reset_less=True) laneAmin_minus_timestamp = Signal.like(min_minus_timestamp) laneBmin_minus_timestamp = Signal.like(min_minus_timestamp) last_minus_timestamp = Signal.like(min_minus_timestamp) current_lane_plus_one = Signal(max=lane_count) self.comb += current_lane_plus_one.eq(current_lane + 1) self.sync += [ min_minus_timestamp.eq(self.minimum_coarse_timestamp - coarse_timestamp), laneAmin_minus_timestamp.eq( last_lane_coarse_timestamps[current_lane] - coarse_timestamp), laneBmin_minus_timestamp.eq( last_lane_coarse_timestamps[current_lane_plus_one] - coarse_timestamp), last_minus_timestamp.eq(last_coarse_timestamp - coarse_timestamp) ] # Quash channels are "dummy" channels to which writes are completely ignored. # This is used by the RTIO log channel, which is taken into account # by the analyzer but does not enter the lanes. quash = Signal() self.sync += quash.eq(0) for channel in quash_channels: self.sync += If(self.cri.chan_sel[:16] == channel, quash.eq(1)) assert all(abs(c) < 1 << 14 - 1 for c in compensation) latency_compensation = Memory(14, len(compensation), init=compensation) latency_compensation_port = latency_compensation.get_port() self.specials += latency_compensation, latency_compensation_port self.comb += latency_compensation_port.adr.eq(self.cri.chan_sel[:16]) # cycle #2, write compensation = Signal((14, True)) self.comb += compensation.eq(latency_compensation_port.dat_r) timestamp_above_min = Signal() timestamp_above_last = Signal() timestamp_above_laneA_min = Signal() timestamp_above_laneB_min = Signal() timestamp_above_lane_min = Signal() force_laneB = Signal() use_laneB = Signal() use_lanen = Signal(max=lane_count) do_write = Signal() do_underflow = Signal() do_sequence_error = Signal() self.comb += [ timestamp_above_min.eq(min_minus_timestamp - compensation < 0), timestamp_above_laneA_min.eq( laneAmin_minus_timestamp - compensation < 0), timestamp_above_laneB_min.eq( laneBmin_minus_timestamp - compensation < 0), timestamp_above_last.eq(last_minus_timestamp - compensation < 0), If(force_laneB | ~timestamp_above_last, use_lanen.eq(current_lane_plus_one), use_laneB.eq(1)).Else(use_lanen.eq(current_lane), use_laneB.eq(0)), timestamp_above_lane_min.eq( Mux(use_laneB, timestamp_above_laneB_min, timestamp_above_laneA_min)), If( ~quash & (self.cri.cmd == cri.commands["write"]), If( timestamp_above_min, If(timestamp_above_lane_min, do_write.eq(1)).Else( do_sequence_error.eq(1))).Else(do_underflow.eq(1))), Array(lio.we for lio in self.output)[use_lanen].eq(do_write) ] compensated_timestamp = Signal(64) self.comb += compensated_timestamp.eq( self.cri.timestamp + (compensation << glbl_fine_ts_width)) self.sync += [ If( do_write, current_lane.eq(use_lanen), last_coarse_timestamp.eq( compensated_timestamp[glbl_fine_ts_width:]), last_lane_coarse_timestamps[use_lanen].eq( compensated_timestamp[glbl_fine_ts_width:]), seqn.eq(seqn + 1), ) ] for lio in self.output: self.comb += lio.payload.timestamp.eq(compensated_timestamp) # cycle #3, read status current_lane_writable = Signal() self.comb += [ current_lane_writable.eq( Array(lio.writable for lio in self.output)[current_lane]), o_status_wait.eq(~current_lane_writable) ] self.sync += [ If(self.cri.cmd == cri.commands["write"], o_status_underflow.eq(0)), If(do_underflow, o_status_underflow.eq(1)), self.sequence_error.eq(do_sequence_error), self.sequence_error_channel.eq(self.cri.chan_sel[:16]) ] # current lane has been full, spread events by switching to the next. if enable_spread: current_lane_writable_r = Signal(reset=1) self.sync += [ current_lane_writable_r.eq(current_lane_writable), If(~current_lane_writable_r & current_lane_writable, force_laneB.eq(1)), If(do_write, force_laneB.eq(0)) ]
def __init__(self, rt_packets, channel_count, fine_ts_width): self.csrs = _CSRs() self.cri = cri.Interface() self.comb += self.cri.arb_gnt.eq(1) # channel selection chan_sel = Signal(16) self.comb += chan_sel.eq( Mux(self.csrs.chan_sel_override_en.storage, self.csrs.chan_sel_override.storage, self.cri.chan_sel[:16])) # master RTIO counter and counter synchronization self.submodules.counter = RTIOCounter(64 - fine_ts_width) self.comb += self.cri.counter.eq( self.counter.value_sys << fine_ts_width) tsc_correction = Signal(64) self.csrs.tsc_correction.storage.attr.add("no_retiming") self.specials += MultiReg(self.csrs.tsc_correction.storage, tsc_correction) self.comb += [ rt_packets.tsc_value.eq(self.counter.value_rtio + tsc_correction), self.csrs.set_time.w.eq(rt_packets.set_time_stb) ] self.sync += [ If(rt_packets.set_time_ack, rt_packets.set_time_stb.eq(0)), If(self.csrs.set_time.re, rt_packets.set_time_stb.eq(1)) ] # reset self.sync += [ If(rt_packets.reset_ack, rt_packets.reset_stb.eq(0)), If(self.csrs.reset.re, rt_packets.reset_stb.eq(1), rt_packets.reset_phy.eq(0)), If(self.csrs.reset_phy.re, rt_packets.reset_stb.eq(1), rt_packets.reset_phy.eq(1)), ] local_reset = Signal(reset=1) self.sync += local_reset.eq(self.csrs.reset.re) local_reset.attr.add("no_retiming") self.clock_domains.cd_sys_with_rst = ClockDomain() self.clock_domains.cd_rtio_with_rst = ClockDomain() self.comb += [ self.cd_sys_with_rst.clk.eq(ClockSignal()), self.cd_sys_with_rst.rst.eq(local_reset) ] self.comb += self.cd_rtio_with_rst.clk.eq(ClockSignal("rtio")) self.specials += AsyncResetSynchronizer(self.cd_rtio_with_rst, local_reset) # remote channel status cache fifo_spaces_mem = Memory(16, channel_count) fifo_spaces = fifo_spaces_mem.get_port(write_capable=True) self.specials += fifo_spaces_mem, fifo_spaces last_timestamps_mem = Memory(64, channel_count) last_timestamps = last_timestamps_mem.get_port(write_capable=True) self.specials += last_timestamps_mem, last_timestamps # common packet fields rt_packets_fifo_request = Signal() self.comb += [ fifo_spaces.adr.eq(chan_sel), last_timestamps.adr.eq(chan_sel), last_timestamps.dat_w.eq(self.cri.o_timestamp), rt_packets.write_channel.eq(chan_sel), rt_packets.write_address.eq(self.cri.o_address), rt_packets.write_data.eq(self.cri.o_data), If(rt_packets_fifo_request, rt_packets.write_timestamp.eq(0xffff000000000000)).Else( rt_packets.write_timestamp.eq(self.cri.o_timestamp)) ] fsm = ClockDomainsRenamer("sys_with_rst")(FSM()) self.submodules += fsm status_wait = Signal() status_underflow = Signal() status_sequence_error = Signal() self.comb += [ self.cri.o_status.eq( Cat(status_wait, status_underflow, status_sequence_error)), self.csrs.o_wait.status.eq(status_wait) ] sequence_error_set = Signal() underflow_set = Signal() self.sync.sys_with_rst += [ If(self.cri.cmd == cri.commands["o_underflow_reset"], status_underflow.eq(0)), If(self.cri.cmd == cri.commands["o_sequence_error_reset"], status_sequence_error.eq(0)), If(underflow_set, status_underflow.eq(1)), If(sequence_error_set, status_sequence_error.eq(1)) ] signal_fifo_space_timeout = Signal() self.sync.sys_with_rst += [ If(self.csrs.o_fifo_space_timeout.re, self.csrs.o_fifo_space_timeout.w.eq(0)), If(signal_fifo_space_timeout, self.csrs.o_fifo_space_timeout.w.eq(1)) ] timeout_counter = WaitTimer(8191) self.submodules += timeout_counter # TODO: collision, replace, busy cond_sequence_error = self.cri.o_timestamp < last_timestamps.dat_r cond_underflow = ((self.cri.o_timestamp[fine_ts_width:] - self.csrs.underflow_margin.storage[fine_ts_width:]) < self.counter.value_sys) fsm.act( "IDLE", If( self.cri.cmd == cri.commands["write"], If(cond_sequence_error, sequence_error_set.eq(1)).Elif(cond_underflow, underflow_set.eq(1)).Else( NextState("WRITE"))), If(self.csrs.o_get_fifo_space.re, NextState("GET_FIFO_SPACE"))) fsm.act( "WRITE", status_wait.eq(1), rt_packets.write_stb.eq(1), If( rt_packets.write_ack, fifo_spaces.we.eq(1), fifo_spaces.dat_w.eq(fifo_spaces.dat_r - 1), last_timestamps.we.eq(1), If(fifo_spaces.dat_r <= 1, NextState("GET_FIFO_SPACE")).Else(NextState("IDLE")))) fsm.act("GET_FIFO_SPACE", status_wait.eq(1), rt_packets_fifo_request.eq(1), rt_packets.write_stb.eq(1), rt_packets.fifo_space_not_ack.eq(1), If(rt_packets.write_ack, NextState("GET_FIFO_SPACE_REPLY"))) fsm.act( "GET_FIFO_SPACE_REPLY", status_wait.eq(1), fifo_spaces.dat_w.eq(rt_packets.fifo_space), fifo_spaces.we.eq(1), rt_packets.fifo_space_not_ack.eq(1), If( rt_packets.fifo_space_not, If(rt_packets.fifo_space != 0, NextState("IDLE")).Else(NextState("GET_FIFO_SPACE"))), timeout_counter.wait.eq(1), If(timeout_counter.done, signal_fifo_space_timeout.eq(1), NextState("GET_FIFO_SPACE"))) # channel state access self.comb += [ self.csrs.o_dbg_fifo_space.status.eq(fifo_spaces.dat_r), self.csrs.o_dbg_last_timestamp.status.eq(last_timestamps.dat_r), If(self.csrs.o_reset_channel_status.re, fifo_spaces.dat_w.eq(0), fifo_spaces.we.eq(1), last_timestamps.dat_w.eq(0), last_timestamps.we.eq(1)) ] self.sync += \ If((rt_packets.write_stb & rt_packets.write_ack & rt_packets_fifo_request), self.csrs.o_dbg_fifo_space_req_cnt.status.eq( self.csrs.o_dbg_fifo_space_req_cnt.status + 1) )
def __init__(self, tsc, rt_packet): self.csrs = _CSRs() self.cri = cri.Interface() # protocol errors err_unknown_packet_type = Signal() err_packet_truncated = Signal() signal_buffer_space_timeout = Signal() err_buffer_space_timeout = Signal() self.sync += [ If( self.csrs.protocol_error.re, If(self.csrs.protocol_error.r[0], err_unknown_packet_type.eq(0)), If(self.csrs.protocol_error.r[1], err_packet_truncated.eq(0)), If(self.csrs.protocol_error.r[2], err_buffer_space_timeout.eq(0))), If(rt_packet.err_unknown_packet_type, err_unknown_packet_type.eq(1)), If(rt_packet.err_packet_truncated, err_packet_truncated.eq(1)), If(signal_buffer_space_timeout, err_buffer_space_timeout.eq(1)) ] self.comb += self.csrs.protocol_error.w.eq( Cat(err_unknown_packet_type, err_packet_truncated, err_buffer_space_timeout)) # TSC synchronization self.comb += [ rt_packet.tsc_value.eq(tsc.coarse_ts), self.csrs.set_time.w.eq(rt_packet.set_time_stb) ] self.sync += [ If(rt_packet.set_time_ack, rt_packet.set_time_stb.eq(0)), If(self.csrs.set_time.re, rt_packet.set_time_stb.eq(1)) ] # chan_sel forcing chan_sel = Signal(24) self.comb += chan_sel.eq( Mux(self.csrs.force_destination.storage, self.csrs.destination.storage << 16, self.cri.chan_sel)) # common packet fields rt_packet_buffer_request = Signal() rt_packet_read_request = Signal() self.comb += [ rt_packet.sr_chan_sel.eq(chan_sel), rt_packet.sr_address.eq(self.cri.o_address), rt_packet.sr_data.eq(self.cri.o_data), If(rt_packet_read_request, rt_packet.sr_timestamp.eq(self.cri.i_timeout)).Else( rt_packet.sr_timestamp.eq(self.cri.o_timestamp)), If(rt_packet_buffer_request, rt_packet.sr_notwrite.eq(1), rt_packet.sr_address.eq(0)), If(rt_packet_read_request, rt_packet.sr_notwrite.eq(1), rt_packet.sr_address.eq(1)) ] # output status o_status_wait = Signal() o_status_underflow = Signal() self.comb += [ self.cri.o_status.eq(Cat(o_status_wait, o_status_underflow)), self.csrs.o_wait.status.eq(o_status_wait) ] o_underflow_set = Signal() self.sync += [ If(self.cri.cmd == cri.commands["write"], o_status_underflow.eq(0)), If(o_underflow_set, o_status_underflow.eq(1)) ] timeout_counter = WaitTimer(8191) self.submodules += timeout_counter cond_underflow = Signal() self.comb += cond_underflow.eq( (self.cri.o_timestamp[tsc.glbl_fine_ts_width:] - self.csrs.underflow_margin.storage[tsc.glbl_fine_ts_width:] ) < tsc.coarse_ts_sys) # buffer space buffer_space = Memory(16, 256) buffer_space_port = buffer_space.get_port(write_capable=True) self.specials += buffer_space, buffer_space_port buffer_space_load = Signal() buffer_space_dec = Signal() self.comb += [ buffer_space_port.adr.eq(chan_sel[16:]), buffer_space_port.we.eq(buffer_space_load | buffer_space_dec), If(buffer_space_load, buffer_space_port.dat_w.eq(rt_packet.buffer_space)).Else( buffer_space_port.dat_w.eq(buffer_space_port.dat_r - 1)) ] # input status i_status_wait_event = Signal() i_status_overflow = Signal() i_status_wait_status = Signal() self.comb += self.cri.i_status.eq( Cat(i_status_wait_event, i_status_overflow, i_status_wait_status)) load_read_reply = Signal() self.sync += [ If( load_read_reply, i_status_wait_event.eq(0), i_status_overflow.eq(0), If( rt_packet.read_no_event, If(rt_packet.read_is_overflow, i_status_overflow.eq(1)).Else( i_status_wait_event.eq(1))), self.cri.i_data.eq(rt_packet.read_data), self.cri.i_timestamp.eq(rt_packet.read_timestamp)) ] # FSM fsm = FSM() self.submodules += fsm fsm.act( "IDLE", If( self.cri.cmd == cri.commands["write"], If(cond_underflow, o_underflow_set.eq(1)).Else(NextState("WRITE"))), If(self.cri.cmd == cri.commands["read"], NextState("READ")), If(self.csrs.o_get_buffer_space.re, NextState("GET_BUFFER_SPACE"))) fsm.act( "WRITE", o_status_wait.eq(1), rt_packet.sr_stb.eq(1), If( rt_packet.sr_ack, buffer_space_dec.eq(1), If(buffer_space_port.dat_r <= 1, NextState("GET_BUFFER_SPACE")).Else(NextState("IDLE")))) fsm.act("GET_BUFFER_SPACE", o_status_wait.eq(1), rt_packet.buffer_space_not_ack.eq(1), rt_packet_buffer_request.eq(1), rt_packet.sr_stb.eq(1), If(rt_packet.sr_ack, NextState("GET_BUFFER_SPACE_REPLY"))) fsm.act( "GET_BUFFER_SPACE_REPLY", o_status_wait.eq(1), buffer_space_load.eq(1), rt_packet.buffer_space_not_ack.eq(1), If( rt_packet.buffer_space_not, If(rt_packet.buffer_space != 0, NextState("IDLE")).Else(NextState("GET_BUFFER_SPACE"))), timeout_counter.wait.eq(1), If(timeout_counter.done, signal_buffer_space_timeout.eq(1), NextState("IDLE"))) fsm.act("READ", i_status_wait_status.eq(1), rt_packet.read_not_ack.eq(1), rt_packet_read_request.eq(1), rt_packet.sr_stb.eq(1), If(rt_packet.sr_ack, NextState("GET_READ_REPLY"))) fsm.act( "GET_READ_REPLY", i_status_wait_status.eq(1), rt_packet.read_not_ack.eq(1), If(self.csrs.reset.storage | rt_packet.read_not, load_read_reply.eq(1), NextState("IDLE"))) # debug CSRs self.comb += self.csrs.o_dbg_buffer_space.status.eq( buffer_space_port.dat_r), self.sync += \ If((rt_packet.sr_stb & rt_packet.sr_ack & rt_packet_buffer_request), self.csrs.o_dbg_buffer_space_req_cnt.status.eq( self.csrs.o_dbg_buffer_space_req_cnt.status + 1) )
def __init__(self, rt_packet, channel_count, fine_ts_width): self.csrs = _CSRs() self.cri = cri.Interface() # reset local_reset = Signal(reset=1) self.sync += local_reset.eq(~self.csrs.link_up.storage) local_reset.attr.add("no_retiming") self.clock_domains.cd_sys_with_rst = ClockDomain() self.clock_domains.cd_rtio_with_rst = ClockDomain() self.comb += [ self.cd_sys_with_rst.clk.eq(ClockSignal()), self.cd_sys_with_rst.rst.eq(local_reset) ] self.comb += self.cd_rtio_with_rst.clk.eq(ClockSignal("rtio")) self.specials += AsyncResetSynchronizer(self.cd_rtio_with_rst, local_reset) # protocol errors err_unknown_packet_type = Signal() err_packet_truncated = Signal() signal_buffer_space_timeout = Signal() err_buffer_space_timeout = Signal() self.sync.sys_with_rst += [ If( self.csrs.protocol_error.re, If(self.csrs.protocol_error.r[0], err_unknown_packet_type.eq(0)), If(self.csrs.protocol_error.r[1], err_packet_truncated.eq(0)), If(self.csrs.protocol_error.r[2], err_buffer_space_timeout.eq(0))), If(rt_packet.err_unknown_packet_type, err_unknown_packet_type.eq(1)), If(rt_packet.err_packet_truncated, err_packet_truncated.eq(1)), If(signal_buffer_space_timeout, err_buffer_space_timeout.eq(1)) ] self.comb += self.csrs.protocol_error.w.eq( Cat(err_unknown_packet_type, err_packet_truncated, err_buffer_space_timeout)) # master RTIO counter and counter synchronization self.submodules.counter = RTIOCounter(64 - fine_ts_width) self.comb += [ self.cri.counter.eq(self.counter.value_sys << fine_ts_width), rt_packet.tsc_value.eq(self.counter.value_rtio), self.csrs.set_time.w.eq(rt_packet.set_time_stb) ] self.sync += [ If(rt_packet.set_time_ack, rt_packet.set_time_stb.eq(0)), If(self.csrs.set_time.re, rt_packet.set_time_stb.eq(1)) ] # common packet fields chan_sel = self.cri.chan_sel[:16] rt_packet_buffer_request = Signal() rt_packet_read_request = Signal() self.comb += [ rt_packet.sr_channel.eq(chan_sel), rt_packet.sr_address.eq(self.cri.o_address), rt_packet.sr_data.eq(self.cri.o_data), rt_packet.sr_timestamp.eq(self.cri.timestamp), If(rt_packet_buffer_request, rt_packet.sr_notwrite.eq(1), rt_packet.sr_address.eq(0)), If(rt_packet_read_request, rt_packet.sr_notwrite.eq(1), rt_packet.sr_address.eq(1)) ] # output status o_status_wait = Signal() o_status_underflow = Signal() self.comb += [ self.cri.o_status.eq( Cat(o_status_wait, o_status_underflow, ~self.csrs.link_up.storage)), self.csrs.o_wait.status.eq(o_status_wait) ] o_underflow_set = Signal() self.sync.sys_with_rst += [ If(self.cri.cmd == cri.commands["write"], o_status_underflow.eq(0)), If(o_underflow_set, o_status_underflow.eq(1)) ] timeout_counter = WaitTimer(8191) self.submodules += timeout_counter cond_underflow = Signal() self.comb += cond_underflow.eq( (self.cri.timestamp[fine_ts_width:] - self.csrs.underflow_margin.storage[fine_ts_width:] ) < self.counter.value_sys) buffer_space = Signal(16) # input status i_status_wait_event = Signal() i_status_overflow = Signal() i_status_wait_status = Signal() self.comb += self.cri.i_status.eq( Cat(i_status_wait_event, i_status_overflow, i_status_wait_status, ~self.csrs.link_up.storage)) load_read_reply = Signal() self.sync.sys_with_rst += [ If( load_read_reply, i_status_wait_event.eq(0), i_status_overflow.eq(0), If( rt_packet.read_no_event, If(rt_packet.read_is_overflow, i_status_overflow.eq(1)).Else( i_status_wait_event.eq(1))), self.cri.i_data.eq(rt_packet.read_data), self.cri.i_timestamp.eq(rt_packet.read_timestamp)) ] # FSM fsm = ClockDomainsRenamer("sys_with_rst")(FSM()) self.submodules += fsm fsm.act( "IDLE", If( self.cri.cmd == cri.commands["write"], If(cond_underflow, o_underflow_set.eq(1)).Else(NextState("WRITE"))), If(self.cri.cmd == cri.commands["read"], NextState("READ")), If(self.csrs.o_get_buffer_space.re, NextState("GET_BUFFER_SPACE"))) fsm.act( "WRITE", o_status_wait.eq(1), rt_packet.sr_stb.eq(1), If( rt_packet.sr_ack, NextValue(buffer_space, buffer_space - 1), If(buffer_space <= 1, NextState("GET_BUFFER_SPACE")).Else(NextState("IDLE")))) fsm.act("GET_BUFFER_SPACE", o_status_wait.eq(1), rt_packet.buffer_space_not_ack.eq(1), rt_packet_buffer_request.eq(1), rt_packet.sr_stb.eq(1), If(rt_packet.sr_ack, NextState("GET_BUFFER_SPACE_REPLY"))) fsm.act( "GET_BUFFER_SPACE_REPLY", o_status_wait.eq(1), NextValue(buffer_space, rt_packet.buffer_space), rt_packet.buffer_space_not_ack.eq(1), If( rt_packet.buffer_space_not, If(rt_packet.buffer_space != 0, NextState("IDLE")).Else(NextState("GET_BUFFER_SPACE"))), timeout_counter.wait.eq(1), If(timeout_counter.done, signal_buffer_space_timeout.eq(1), NextState("GET_BUFFER_SPACE"))) fsm.act("READ", i_status_wait_status.eq(1), rt_packet.read_not_ack.eq(1), rt_packet_read_request.eq(1), rt_packet.sr_stb.eq(1), If(rt_packet.sr_ack, NextState("GET_READ_REPLY"))) fsm.act( "GET_READ_REPLY", i_status_wait_status.eq(1), rt_packet.read_not_ack.eq(1), If(rt_packet.read_not, load_read_reply.eq(1), NextState("IDLE"))) # debug CSRs self.comb += self.csrs.o_dbg_buffer_space.status.eq(buffer_space), self.sync += \ If((rt_packet.sr_stb & rt_packet.sr_ack & rt_packet_buffer_request), self.csrs.o_dbg_buffer_space_req_cnt.status.eq( self.csrs.o_dbg_buffer_space_req_cnt.status + 1) )
def __init__(self, tsc, chanif, rx_synchronizer=None): self.reset = CSRStorage(reset=1) self.reset_phy = CSRStorage(reset=1) self.tsc_loaded = CSR() # master interface in the rtio domain self.cri = cri.Interface() self.async_errors = Record(async_errors_layout) self.clock_domains.cd_rio = ClockDomain() self.clock_domains.cd_rio_phy = ClockDomain() self.comb += [ self.cd_rio.clk.eq(ClockSignal("rtio")), self.cd_rio_phy.clk.eq(ClockSignal("rtio")) ] reset = Signal() reset_phy = Signal() reset.attr.add("no_retiming") reset_phy.attr.add("no_retiming") self.sync += [ reset.eq(self.reset.storage), reset_phy.eq(self.reset_phy.storage) ] self.specials += [ AsyncResetSynchronizer(self.cd_rio, reset), AsyncResetSynchronizer(self.cd_rio_phy, reset_phy) ] self.submodules.link_layer = link_layer.LinkLayer( chanif.encoder, chanif.decoders) self.comb += self.link_layer.rx_ready.eq(chanif.rx_ready) if rx_synchronizer is None: rx_synchronizer = GenericRXSynchronizer() self.submodules += rx_synchronizer link_layer_sync = SimpleNamespace( tx_aux_frame=self.link_layer.tx_aux_frame, tx_aux_data=self.link_layer.tx_aux_data, tx_aux_ack=self.link_layer.tx_aux_ack, tx_rt_frame=self.link_layer.tx_rt_frame, tx_rt_data=self.link_layer.tx_rt_data, rx_aux_stb=rx_synchronizer.resync(self.link_layer.rx_aux_stb), rx_aux_frame=rx_synchronizer.resync(self.link_layer.rx_aux_frame), rx_aux_frame_perm=rx_synchronizer.resync( self.link_layer.rx_aux_frame_perm), rx_aux_data=rx_synchronizer.resync(self.link_layer.rx_aux_data), rx_rt_frame=rx_synchronizer.resync(self.link_layer.rx_rt_frame), rx_rt_frame_perm=rx_synchronizer.resync( self.link_layer.rx_rt_frame_perm), rx_rt_data=rx_synchronizer.resync(self.link_layer.rx_rt_data)) self.submodules.link_stats = link_layer.LinkLayerStats( link_layer_sync, "rtio") self.submodules.rt_packet = ClockDomainsRenamer("rtio")( rt_packet_satellite.RTPacketSatellite(link_layer_sync, interface=self.cri)) self.comb += self.rt_packet.reset.eq(self.cd_rio.rst) self.comb += [ tsc.load.eq(self.rt_packet.tsc_load), tsc.load_value.eq(self.rt_packet.tsc_load_value) ] ps_tsc_load = PulseSynchronizer("rtio", "sys") self.submodules += ps_tsc_load self.comb += ps_tsc_load.i.eq(self.rt_packet.tsc_load) self.sync += [ If(self.tsc_loaded.re, self.tsc_loaded.w.eq(0)), If(ps_tsc_load.o, self.tsc_loaded.w.eq(1)) ] self.submodules.rt_errors = rt_errors_satellite.RTErrorsSatellite( self.rt_packet, tsc, self.async_errors)
def __init__(self, channels, fine_ts_width=None, guard_io_cycles=20): if fine_ts_width is None: fine_ts_width = max( rtlink.get_fine_ts_width(c.interface) for c in channels) self.cri = cri.Interface() self.reset = CSR() self.reset_phy = CSR() self.async_error = CSR(2) # Clocking/Reset # Create rsys, rio and rio_phy domains based on sys and rtio # with reset controlled by CRI. # # The `rio` CD contains logic that is reset with `core.reset()`. # That's state that could unduly affect subsequent experiments, # i.e. input overflows caused by input gates left open, FIFO events far # in the future blocking the experiment, pending RTIO or # wishbone bus transactions, etc. # The `rio_phy` CD contains state that is maintained across # `core.reset()`, i.e. TTL output state, OE, DDS state. cmd_reset = Signal(reset=1) cmd_reset_phy = Signal(reset=1) self.sync += [ cmd_reset.eq(self.reset.re), cmd_reset_phy.eq(self.reset_phy.re) ] cmd_reset.attr.add("no_retiming") cmd_reset_phy.attr.add("no_retiming") self.clock_domains.cd_rsys = ClockDomain() self.clock_domains.cd_rio = ClockDomain() self.clock_domains.cd_rio_phy = ClockDomain() self.comb += [ self.cd_rsys.clk.eq(ClockSignal()), self.cd_rsys.rst.eq(cmd_reset), self.cd_rio.clk.eq(ClockSignal("rtio")), self.cd_rio_phy.clk.eq(ClockSignal("rtio")) ] self.specials += AsyncResetSynchronizer(self.cd_rio, cmd_reset) self.specials += AsyncResetSynchronizer(self.cd_rio_phy, cmd_reset_phy) # Managers self.submodules.counter = RTIOCounter( len(self.cri.timestamp) - fine_ts_width) # Collision is not an asynchronous error with local RTIO, but # we treat it as such for consistency with DRTIO, where collisions # are reported by the satellites. o_underflow = Signal() o_sequence_error = Signal() o_collision = Signal() o_busy = Signal() self.sync.rsys += [ If( self.cri.cmd == cri.commands["write"], o_underflow.eq(0), o_sequence_error.eq(0), ) ] self.sync += [ If( self.async_error.re, If(self.async_error.r[0], o_collision.eq(0)), If(self.async_error.r[1], o_busy.eq(0)), ) ] o_statuses, i_statuses = [], [] i_datas, i_timestamps = [], [] i_ack = Signal() sel = self.cri.chan_sel[:16] for n, channel in enumerate(channels): if isinstance(channel, LogChannel): o_statuses.append(1) i_datas.append(0) i_timestamps.append(0) i_statuses.append(0) continue selected = Signal() self.comb += selected.eq(sel == n) o_manager = _OutputManager(channel.interface.o, self.counter, channel.ofifo_depth, guard_io_cycles) self.submodules += o_manager if hasattr(o_manager.ev, "data"): self.comb += o_manager.ev.data.eq(self.cri.o_data) if hasattr(o_manager.ev, "address"): self.comb += o_manager.ev.address.eq(self.cri.o_address) ts_shift = len(self.cri.timestamp) - len(o_manager.ev.timestamp) self.comb += o_manager.ev.timestamp.eq( self.cri.timestamp[ts_shift:]) self.comb += o_manager.we.eq( selected & (self.cri.cmd == cri.commands["write"])) self.sync.rsys += [ If(o_manager.underflow, o_underflow.eq(1)), If(o_manager.sequence_error, o_sequence_error.eq(1)) ] self.sync += [ If(o_manager.collision, o_collision.eq(1)), If(o_manager.busy, o_busy.eq(1)) ] o_statuses.append(o_manager.writable) if channel.interface.i is not None: i_manager = _InputManager(channel.interface.i, self.counter, channel.ififo_depth) self.submodules += i_manager if hasattr(i_manager.ev, "data"): i_datas.append(i_manager.ev.data) else: i_datas.append(0) if channel.interface.i.timestamped: ts_shift = (len(self.cri.i_timestamp) - len(i_manager.ev.timestamp)) i_timestamps.append(i_manager.ev.timestamp << ts_shift) else: i_timestamps.append(0) overflow = Signal() self.sync.rsys += [ If(selected & i_ack, overflow.eq(0)), If(i_manager.overflow, overflow.eq(1)) ] self.comb += i_manager.re.eq(selected & i_ack & ~overflow) i_statuses.append(Cat(i_manager.readable & ~overflow, overflow)) else: i_datas.append(0) i_timestamps.append(0) i_statuses.append(0) o_status_raw = Signal() self.comb += [ o_status_raw.eq(Array(o_statuses)[sel]), self.cri.o_status.eq( Cat(~o_status_raw, o_underflow, o_sequence_error)), self.async_error.w.eq(Cat(o_collision, o_busy)) ] i_status_raw = Signal(2) self.comb += i_status_raw.eq(Array(i_statuses)[sel]) input_timeout = Signal.like(self.cri.timestamp) input_pending = Signal() self.sync.rsys += [ i_ack.eq(0), If( i_ack, self.cri.i_status.eq(Cat(~i_status_raw[0], i_status_raw[1], 0)), self.cri.i_data.eq(Array(i_datas)[sel]), self.cri.i_timestamp.eq(Array(i_timestamps)[sel]), ), If((self.cri.counter >= input_timeout) | (i_status_raw != 0), If(input_pending, i_ack.eq(1)), input_pending.eq(0)), If(self.cri.cmd == cri.commands["read"], input_timeout.eq(self.cri.timestamp), input_pending.eq(1), self.cri.i_status.eq(0b100)) ] self.comb += self.cri.counter.eq( self.counter.value_sys << fine_ts_width)
def __init__(self, link_layer, interface=None): self.reset = Signal() self.unknown_packet_type = Signal() self.packet_truncated = Signal() self.buffer_space_timeout = Signal() self.tsc_load = Signal() self.tsc_load_value = Signal(64) if interface is None: interface = cri.Interface() self.cri = interface # # # # RX/TX datapath assert len(link_layer.tx_rt_data) == len(link_layer.rx_rt_data) assert len(link_layer.tx_rt_data) % 8 == 0 ws = len(link_layer.tx_rt_data) rx_plm = get_m2s_layouts(ws) rx_dp = ReceiveDatapath(link_layer.rx_rt_frame, link_layer.rx_rt_data, rx_plm) self.submodules += rx_dp tx_plm = get_s2m_layouts(ws) tx_dp = TransmitDatapath(link_layer.tx_rt_frame, link_layer.tx_rt_data, tx_plm) self.submodules += tx_dp # RX write data buffer write_data_buffer_load = Signal() write_data_buffer_cnt = Signal(max=512 // ws + 1) write_data_buffer = Signal(512) self.sync += \ If(write_data_buffer_load, Case(write_data_buffer_cnt, {i: write_data_buffer[i*ws:(i+1)*ws].eq(rx_dp.data_r) for i in range(512//ws)}), write_data_buffer_cnt.eq(write_data_buffer_cnt + 1) ).Else( write_data_buffer_cnt.eq(0) ) # RX->TX echo_req = Signal() buffer_space_set = Signal() buffer_space_req = Signal() buffer_space_ack = Signal() self.sync += [ If(buffer_space_ack, buffer_space_req.eq(0)), If(buffer_space_set, buffer_space_req.eq(1)), ] buffer_space_update = Signal() buffer_space = Signal(16) self.sync += If(buffer_space_update, buffer_space.eq(self.cri.o_buffer_space)) load_read_request = Signal() clear_read_request = Signal() read_request_pending = Signal() self.sync += [ If(clear_read_request | self.reset, read_request_pending.eq(0)), If( load_read_request, read_request_pending.eq(1), ) ] # RX FSM cri_read = Signal() cri_buffer_space = Signal() self.comb += [ self.tsc_load_value.eq(rx_dp.packet_as["set_time"].timestamp), If( cri_read | read_request_pending, self.cri.chan_sel.eq(rx_dp.packet_as["read_request"].chan_sel), ).Elif( cri_buffer_space, self.cri.chan_sel.eq( rx_dp.packet_as["buffer_space_request"].destination << 16) ).Else(self.cri.chan_sel.eq(rx_dp.packet_as["write"].chan_sel), ), self.cri.i_timeout.eq(rx_dp.packet_as["read_request"].timeout), self.cri.o_timestamp.eq(rx_dp.packet_as["write"].timestamp), self.cri.o_address.eq(rx_dp.packet_as["write"].address), self.cri.o_data.eq( Cat(rx_dp.packet_as["write"].short_data, write_data_buffer)), ] rx_fsm = FSM(reset_state="INPUT") self.submodules += rx_fsm ongoing_packet_next = Signal() ongoing_packet = Signal() self.sync += ongoing_packet.eq(ongoing_packet_next) timeout_counter = WaitTimer(8191) self.submodules += timeout_counter rx_fsm.act( "INPUT", If( rx_dp.frame_r, rx_dp.packet_buffer_load.eq(1), If( rx_dp.packet_last, Case( rx_dp.packet_type, { # echo must have fixed latency, so there is no memory # mechanism rx_plm.types["echo_request"]: echo_req.eq(1), rx_plm.types["set_time"]: NextState("SET_TIME"), rx_plm.types["write"]: NextState("WRITE"), rx_plm.types["buffer_space_request"]: NextState("BUFFER_SPACE_REQUEST"), rx_plm.types["read_request"]: NextState("READ_REQUEST"), "default": self.unknown_packet_type.eq(1) })).Else(ongoing_packet_next.eq(1)), If(~rx_dp.frame_r & ongoing_packet, self.packet_truncated.eq(1)))) rx_fsm.act("SET_TIME", self.tsc_load.eq(1), NextState("INPUT")) # CRI mux defaults to write information rx_fsm.act( "WRITE", If( write_data_buffer_cnt == rx_dp.packet_as["write"].extra_data_cnt, NextState("WRITE_CMD")).Else( write_data_buffer_load.eq(1), If(~rx_dp.frame_r, self.packet_truncated.eq(1), NextState("INPUT")))) rx_fsm.act("WRITE_CMD", self.cri.cmd.eq(cri.commands["write"]), NextState("INPUT")) rx_fsm.act("BUFFER_SPACE_REQUEST", cri_buffer_space.eq(1), NextState("BUFFER_SPACE_REQUEST_CMD")) rx_fsm.act("BUFFER_SPACE_REQUEST_CMD", cri_buffer_space.eq(1), self.cri.cmd.eq(cri.commands["get_buffer_space"]), NextState("BUFFER_SPACE")) rx_fsm.act( "BUFFER_SPACE", cri_buffer_space.eq(1), timeout_counter.wait.eq(1), If(timeout_counter.done, self.buffer_space_timeout.eq(1), NextState("INPUT")).Elif(self.cri.o_buffer_space_valid, buffer_space_set.eq(1), buffer_space_update.eq(1), NextState("INPUT"))) rx_fsm.act("READ_REQUEST", cri_read.eq(1), NextState("READ_REQUEST_CMD")) rx_fsm.act("READ_REQUEST_CMD", load_read_request.eq(1), cri_read.eq(1), self.cri.cmd.eq(cri.commands["read"]), NextState("INPUT")) # TX FSM tx_fsm = FSM(reset_state="IDLE") self.submodules += tx_fsm tx_fsm.act( "IDLE", If(echo_req, NextState("ECHO")), If(buffer_space_req, NextState("BUFFER_SPACE")), If(read_request_pending & ~self.cri.i_status[2], NextState("READ"), If(self.cri.i_status[0], NextState("READ_TIMEOUT")), If(self.cri.i_status[1], NextState("READ_OVERFLOW")))) tx_fsm.act("ECHO", tx_dp.send("echo_reply"), If(tx_dp.packet_last, NextState("IDLE"))) tx_fsm.act("BUFFER_SPACE", buffer_space_ack.eq(1), tx_dp.send("buffer_space_reply", space=buffer_space), If(tx_dp.packet_last, NextState("IDLE"))) tx_fsm.act("READ_TIMEOUT", tx_dp.send("read_reply_noevent", overflow=0), clear_read_request.eq(1), If(tx_dp.packet_last, NextState("IDLE"))) tx_fsm.act("READ_OVERFLOW", tx_dp.send("read_reply_noevent", overflow=1), clear_read_request.eq(1), If(tx_dp.packet_last, NextState("IDLE"))) tx_fsm.act( "READ", tx_dp.send("read_reply", timestamp=self.cri.i_timestamp, data=self.cri.i_data), If(tx_dp.packet_last, clear_read_request.eq(1), NextState("IDLE")))
def __init__(self, lane_count, seqn_width, layout_payload, compensation, glbl_fine_ts_width, enable_spread=True, quash_channels=[], interface=None): if lane_count & (lane_count - 1): raise NotImplementedError("lane count must be a power of 2") if interface is None: interface = cri.Interface() self.cri = interface self.sequence_error = Signal() self.sequence_error_channel = Signal(16) self.minimum_coarse_timestamp = Signal(64 - glbl_fine_ts_width) self.output = [ Record(layouts.fifo_ingress(seqn_width, layout_payload)) for _ in range(lane_count) ] # # # o_status_wait = Signal() o_status_underflow = Signal() self.comb += self.cri.o_status.eq( Cat(o_status_wait, o_status_underflow)) # internal state current_lane = Signal(max=lane_count) last_coarse_timestamp = Signal(64 - glbl_fine_ts_width) last_lane_coarse_timestamps = Array( Signal(64 - glbl_fine_ts_width) for _ in range(lane_count)) seqn = Signal(seqn_width) # distribute data to lanes for lio in self.output: self.comb += [ lio.seqn.eq(seqn), lio.payload.channel.eq(self.cri.chan_sel[:16]), lio.payload.timestamp.eq(self.cri.timestamp), ] if hasattr(lio.payload, "address"): self.comb += lio.payload.address.eq(self.cri.o_address) if hasattr(lio.payload, "data"): self.comb += lio.payload.data.eq(self.cri.o_data) # when timestamp and channel arrive in cycle #1, prepare computations us_timestamp_width = 64 - glbl_fine_ts_width coarse_timestamp = Signal(us_timestamp_width) self.comb += coarse_timestamp.eq( self.cri.timestamp[glbl_fine_ts_width:]) min_minus_timestamp = Signal((us_timestamp_width + 1, True)) laneAmin_minus_timestamp = Signal((us_timestamp_width + 1, True)) laneBmin_minus_timestamp = Signal((us_timestamp_width + 1, True)) last_minus_timestamp = Signal((us_timestamp_width + 1, True)) current_lane_plus_one = Signal(max=lane_count) self.comb += current_lane_plus_one.eq(current_lane + 1) self.sync += [ min_minus_timestamp.eq(self.minimum_coarse_timestamp - coarse_timestamp), laneAmin_minus_timestamp.eq( last_lane_coarse_timestamps[current_lane] - coarse_timestamp), laneBmin_minus_timestamp.eq( last_lane_coarse_timestamps[current_lane_plus_one] - coarse_timestamp), last_minus_timestamp.eq(last_coarse_timestamp - coarse_timestamp) ] quash = Signal() self.sync += quash.eq(0) for channel in quash_channels: self.sync += If(self.cri.chan_sel[:16] == channel, quash.eq(1)) latency_compensation = Memory(14, len(compensation), init=compensation) latency_compensation_port = latency_compensation.get_port() self.specials += latency_compensation, latency_compensation_port self.comb += latency_compensation_port.adr.eq(self.cri.chan_sel[:16]) # cycle #2, write compensation = Signal((14, True)) self.comb += compensation.eq(latency_compensation_port.dat_r) timestamp_above_min = Signal() timestamp_above_laneA_min = Signal() timestamp_above_laneB_min = Signal() timestamp_above_lane_min = Signal() force_laneB = Signal() use_laneB = Signal() use_lanen = Signal(max=lane_count) do_write = Signal() do_underflow = Signal() do_sequence_error = Signal() self.comb += [ timestamp_above_min.eq(min_minus_timestamp - compensation < 0), timestamp_above_laneA_min.eq( laneAmin_minus_timestamp - compensation < 0), timestamp_above_laneB_min.eq( laneBmin_minus_timestamp - compensation < 0), If(force_laneB | (last_minus_timestamp - compensation >= 0), use_lanen.eq(current_lane + 1), use_laneB.eq(1)).Else(use_lanen.eq(current_lane), use_laneB.eq(0)), timestamp_above_lane_min.eq( Mux(use_laneB, timestamp_above_laneB_min, timestamp_above_laneA_min)), If( ~quash, do_write.eq((self.cri.cmd == cri.commands["write"]) & timestamp_above_min & timestamp_above_lane_min), do_underflow.eq((self.cri.cmd == cri.commands["write"]) & ~timestamp_above_min), do_sequence_error.eq((self.cri.cmd == cri.commands["write"]) & timestamp_above_min & ~timestamp_above_lane_min), ), Array(lio.we for lio in self.output)[use_lanen].eq(do_write) ] compensated_timestamp = Signal(64) self.comb += compensated_timestamp.eq( self.cri.timestamp + (compensation << glbl_fine_ts_width)) self.sync += [ If( do_write, If(use_laneB, current_lane.eq(current_lane + 1)), last_coarse_timestamp.eq( compensated_timestamp[glbl_fine_ts_width:]), last_lane_coarse_timestamps[use_lanen].eq( compensated_timestamp[glbl_fine_ts_width:]), seqn.eq(seqn + 1), ) ] for lio in self.output: self.comb += lio.payload.timestamp.eq(compensated_timestamp) # cycle #3, read status current_lane_writable = Signal() self.comb += [ current_lane_writable.eq( Array(lio.writable for lio in self.output)[current_lane]), o_status_wait.eq(~current_lane_writable) ] self.sync += [ If(self.cri.cmd == cri.commands["write"], o_status_underflow.eq(0)), If(do_underflow, o_status_underflow.eq(1)), self.sequence_error.eq(do_sequence_error), self.sequence_error_channel.eq(self.cri.chan_sel[:16]) ] # current lane has been full, spread events by switching to the next. if enable_spread: current_lane_writable_r = Signal(reset=1) self.sync += [ current_lane_writable_r.eq(current_lane_writable), If(~current_lane_writable_r & current_lane_writable, force_laneB.eq(1)), If(do_write, force_laneB.eq(0)) ]
def __init__(self, rt_packet, channel_count, fine_ts_width): self.csrs = _CSRs() self.cri = cri.Interface() # protocol errors err_unknown_packet_type = Signal() err_packet_truncated = Signal() signal_fifo_space_timeout = Signal() err_fifo_space_timeout = Signal() self.sync.sys_with_rst += [ If( self.csrs.protocol_error.re, If(self.csrs.protocol_error.r[0], err_unknown_packet_type.eq(0)), If(self.csrs.protocol_error.r[1], err_packet_truncated.eq(0)), If(self.csrs.protocol_error.r[2], err_fifo_space_timeout.eq(0))), If(rt_packet.err_unknown_packet_type, err_unknown_packet_type.eq(1)), If(rt_packet.err_packet_truncated, err_packet_truncated.eq(1)), If(signal_fifo_space_timeout, err_fifo_space_timeout.eq(1)) ] self.comb += self.csrs.protocol_error.w.eq( Cat(err_unknown_packet_type, err_packet_truncated, err_fifo_space_timeout)) # channel selection chan_sel = Signal(16) self.comb += chan_sel.eq( Mux(self.csrs.chan_sel_override_en.storage, self.csrs.chan_sel_override.storage, self.cri.chan_sel[:16])) # master RTIO counter and counter synchronization self.submodules.counter = RTIOCounter(64 - fine_ts_width) self.comb += self.cri.counter.eq( self.counter.value_sys << fine_ts_width) tsc_correction = Signal(64) self.csrs.tsc_correction.storage.attr.add("no_retiming") self.specials += MultiReg(self.csrs.tsc_correction.storage, tsc_correction) self.comb += [ rt_packet.tsc_value.eq(self.counter.value_rtio + tsc_correction), self.csrs.set_time.w.eq(rt_packet.set_time_stb) ] self.sync += [ If(rt_packet.set_time_ack, rt_packet.set_time_stb.eq(0)), If(self.csrs.set_time.re, rt_packet.set_time_stb.eq(1)) ] # reset self.sync += [ If(rt_packet.reset_ack, rt_packet.reset_stb.eq(0)), If(self.csrs.reset.re, rt_packet.reset_stb.eq(1), rt_packet.reset_phy.eq(0)), If(self.csrs.reset_phy.re, rt_packet.reset_stb.eq(1), rt_packet.reset_phy.eq(1)), ] local_reset = Signal(reset=1) self.sync += local_reset.eq(self.csrs.reset.re) local_reset.attr.add("no_retiming") self.clock_domains.cd_sys_with_rst = ClockDomain() self.clock_domains.cd_rtio_with_rst = ClockDomain() self.comb += [ self.cd_sys_with_rst.clk.eq(ClockSignal()), self.cd_sys_with_rst.rst.eq(local_reset) ] self.comb += self.cd_rtio_with_rst.clk.eq(ClockSignal("rtio")) self.specials += AsyncResetSynchronizer(self.cd_rtio_with_rst, local_reset) # remote channel status cache fifo_spaces_mem = Memory(16, channel_count) fifo_spaces = fifo_spaces_mem.get_port(write_capable=True) self.specials += fifo_spaces_mem, fifo_spaces last_timestamps_mem = Memory(64, channel_count) last_timestamps = last_timestamps_mem.get_port(write_capable=True) self.specials += last_timestamps_mem, last_timestamps # common packet fields rt_packet_fifo_request = Signal() rt_packet_read_request = Signal() self.comb += [ fifo_spaces.adr.eq(chan_sel), last_timestamps.adr.eq(chan_sel), last_timestamps.dat_w.eq(self.cri.timestamp), rt_packet.sr_channel.eq(chan_sel), rt_packet.sr_address.eq(self.cri.o_address), rt_packet.sr_data.eq(self.cri.o_data), rt_packet.sr_timestamp.eq(self.cri.timestamp), If(rt_packet_fifo_request, rt_packet.sr_notwrite.eq(1), rt_packet.sr_address.eq(0)), If(rt_packet_read_request, rt_packet.sr_notwrite.eq(1), rt_packet.sr_address.eq(1)) ] # output status o_status_wait = Signal() o_status_underflow = Signal() o_status_sequence_error = Signal() self.comb += [ self.cri.o_status.eq( Cat(o_status_wait, o_status_underflow, o_status_sequence_error)), self.csrs.o_wait.status.eq(o_status_wait) ] o_sequence_error_set = Signal() o_underflow_set = Signal() self.sync.sys_with_rst += [ If( self.cri.cmd == cri.commands["write"], o_status_underflow.eq(0), o_status_sequence_error.eq(0), ), If(o_underflow_set, o_status_underflow.eq(1)), If(o_sequence_error_set, o_status_sequence_error.eq(1)) ] timeout_counter = WaitTimer(8191) self.submodules += timeout_counter cond_sequence_error = self.cri.timestamp < last_timestamps.dat_r cond_underflow = ((self.cri.timestamp[fine_ts_width:] - self.csrs.underflow_margin.storage[fine_ts_width:]) < self.counter.value_sys) # input status i_status_wait_event = Signal() i_status_overflow = Signal() i_status_wait_status = Signal() self.comb += self.cri.i_status.eq( Cat(i_status_wait_event, i_status_overflow, i_status_wait_status)) load_read_reply = Signal() self.sync.sys_with_rst += [ If( load_read_reply, i_status_wait_event.eq(0), i_status_overflow.eq(0), If( rt_packet.read_no_event, If(rt_packet.read_is_overflow, i_status_overflow.eq(1)).Else( i_status_wait_event.eq(1))), self.cri.i_data.eq(rt_packet.read_data), self.cri.i_timestamp.eq(rt_packet.read_timestamp)) ] # FSM fsm = ClockDomainsRenamer("sys_with_rst")(FSM()) self.submodules += fsm fsm.act( "IDLE", If( self.cri.cmd == cri.commands["write"], If(cond_sequence_error, o_sequence_error_set.eq(1)).Elif( cond_underflow, o_underflow_set.eq(1)).Else(NextState("WRITE"))), If(self.cri.cmd == cri.commands["read"], NextState("READ")), If(self.csrs.o_get_fifo_space.re, NextState("GET_FIFO_SPACE"))) fsm.act( "WRITE", o_status_wait.eq(1), rt_packet.sr_stb.eq(1), If( rt_packet.sr_ack, fifo_spaces.we.eq(1), fifo_spaces.dat_w.eq(fifo_spaces.dat_r - 1), last_timestamps.we.eq(1), If(fifo_spaces.dat_r <= 1, NextState("GET_FIFO_SPACE")).Else(NextState("IDLE")))) fsm.act("GET_FIFO_SPACE", o_status_wait.eq(1), rt_packet.fifo_space_not_ack.eq(1), rt_packet_fifo_request.eq(1), rt_packet.sr_stb.eq(1), If(rt_packet.sr_ack, NextState("GET_FIFO_SPACE_REPLY"))) fsm.act( "GET_FIFO_SPACE_REPLY", o_status_wait.eq(1), fifo_spaces.dat_w.eq(rt_packet.fifo_space), fifo_spaces.we.eq(1), rt_packet.fifo_space_not_ack.eq(1), If( rt_packet.fifo_space_not, If(rt_packet.fifo_space != 0, NextState("IDLE")).Else(NextState("GET_FIFO_SPACE"))), timeout_counter.wait.eq(1), If(timeout_counter.done, signal_fifo_space_timeout.eq(1), NextState("GET_FIFO_SPACE"))) fsm.act("READ", i_status_wait_status.eq(1), rt_packet.read_not_ack.eq(1), rt_packet_read_request.eq(1), rt_packet.sr_stb.eq(1), If(rt_packet.sr_ack, NextState("GET_READ_REPLY"))) fsm.act( "GET_READ_REPLY", i_status_wait_status.eq(1), rt_packet.read_not_ack.eq(1), If(rt_packet.read_not, load_read_reply.eq(1), NextState("IDLE"))) # channel state access self.comb += [ self.csrs.o_dbg_fifo_space.status.eq(fifo_spaces.dat_r), self.csrs.o_dbg_last_timestamp.status.eq(last_timestamps.dat_r), If(self.csrs.o_reset_channel_status.re, fifo_spaces.dat_w.eq(0), fifo_spaces.we.eq(1), last_timestamps.dat_w.eq(0), last_timestamps.we.eq(1)) ] self.sync += \ If((rt_packet.sr_stb & rt_packet.sr_ack & rt_packet_fifo_request), self.csrs.o_dbg_fifo_space_req_cnt.status.eq( self.csrs.o_dbg_fifo_space_req_cnt.status + 1) )