Esempio n. 1
0
    def __init__(self, tsc, channels, lane_count=8, fifo_depth=128):
        self.cri = cri.Interface()
        self.async_errors = Record(async_errors_layout)

        chan_fine_ts_width = max(
            max(
                rtlink.get_fine_ts_width(channel.interface.o)
                for channel in channels),
            max(
                rtlink.get_fine_ts_width(channel.interface.i)
                for channel in channels))
        assert tsc.glbl_fine_ts_width >= chan_fine_ts_width

        self.submodules.outputs = ClockDomainsRenamer("rio")(SED(
            channels,
            tsc.glbl_fine_ts_width,
            "sync",
            lane_count=lane_count,
            fifo_depth=fifo_depth,
            enable_spread=False,
            report_buffer_space=True,
            interface=self.cri))
        self.comb += self.outputs.coarse_timestamp.eq(tsc.coarse_ts)
        self.sync.rtio += self.outputs.minimum_coarse_timestamp.eq(
            tsc.coarse_ts + 16)

        self.submodules.inputs = ClockDomainsRenamer("rio")(InputCollector(
            tsc, channels, "sync", interface=self.cri))

        for attr, _ in async_errors_layout:
            self.comb += getattr(self.async_errors,
                                 attr).eq(getattr(self.outputs, attr))
Esempio n. 2
0
File: core.py Progetto: JQIamo/artiq
    def __init__(self, interface, counter, fifo_depth):
        data_width = rtlink.get_data_width(interface)
        fine_ts_width = rtlink.get_fine_ts_width(interface)

        ev_layout = []
        if data_width:
            ev_layout.append(("data", data_width))
        if interface.timestamped:
            ev_layout.append(("timestamp", counter.width + fine_ts_width))
        self.ev = Record(ev_layout)

        self.readable = Signal()
        self.re = Signal()

        self.overflow = Signal()  # pulsed

        # # #

        fifo = ClockDomainsRenamer({"read": "rsys", "write": "rio"})(
            AsyncFIFO(layout_len(ev_layout), fifo_depth))
        self.submodules += fifo
        fifo_in = Record(ev_layout)
        fifo_out = Record(ev_layout)
        self.comb += [
            fifo.din.eq(fifo_in.raw_bits()),
            fifo_out.raw_bits().eq(fifo.dout)
        ]

        # latency compensation
        if interface.delay:
            counter_rtio = Signal.like(counter.value_rtio, reset_less=True)
            self.sync.rtio += counter_rtio.eq(counter.value_rtio -
                                              (interface.delay + 1))
        else:
            counter_rtio = counter.value_rtio

        # FIFO write
        if data_width:
            self.comb += fifo_in.data.eq(interface.data)
        if interface.timestamped:
            if fine_ts_width:
                full_ts = Cat(interface.fine_ts, counter_rtio)
            else:
                full_ts = counter_rtio
            self.comb += fifo_in.timestamp.eq(full_ts)
        self.comb += fifo.we.eq(interface.stb)

        # FIFO read
        self.comb += [
            self.ev.eq(fifo_out),
            self.readable.eq(fifo.readable),
            fifo.re.eq(self.re)
        ]

        overflow_transfer = BlindTransfer()
        self.submodules += overflow_transfer
        self.comb += [
            overflow_transfer.i.eq(fifo.we & ~fifo.writable),
            self.overflow.eq(overflow_transfer.o),
        ]
Esempio n. 3
0
File: core.py Progetto: cr1901/artiq
    def __init__(self, interface, counter, fifo_depth):
        data_width = rtlink.get_data_width(interface)
        fine_ts_width = rtlink.get_fine_ts_width(interface)

        ev_layout = []
        if data_width:
            ev_layout.append(("data", data_width))
        if interface.timestamped:
            ev_layout.append(("timestamp", counter.width + fine_ts_width))
        self.ev = Record(ev_layout)

        self.readable = Signal()
        self.re = Signal()
        
        self.overflow = Signal()  # pulsed

        # # #

        fifo = ClockDomainsRenamer({"read": "rsys", "write": "rio"})(
            AsyncFIFO(layout_len(ev_layout), fifo_depth))
        self.submodules += fifo
        fifo_in = Record(ev_layout)
        fifo_out = Record(ev_layout)
        self.comb += [
            fifo.din.eq(fifo_in.raw_bits()),
            fifo_out.raw_bits().eq(fifo.dout)
        ]

        # FIFO write
        if data_width:
            self.comb += fifo_in.data.eq(interface.data)
        if interface.timestamped:
            if fine_ts_width:
                full_ts = Cat(interface.fine_ts, counter.value_rio)
            else:
                full_ts = counter.value_rio
            self.comb += fifo_in.timestamp.eq(full_ts)
        self.comb += fifo.we.eq(interface.stb)

        # FIFO read
        self.comb += [
            self.ev.eq(fifo_out),
            self.readable.eq(fifo.readable),
            fifo.re.eq(self.re)
        ]

        overflow_sync = PulseSynchronizer("rio", "rsys")
        overflow_ack_sync = PulseSynchronizer("rsys", "rio")
        self.submodules += overflow_sync, overflow_ack_sync
        overflow_blind = Signal()
        self.comb += overflow_sync.i.eq(fifo.we & ~fifo.writable & ~overflow_blind)
        self.sync.rio += [
            If(fifo.we & ~fifo.writable, overflow_blind.eq(1)),
            If(overflow_ack_sync.o, overflow_blind.eq(0))
        ]
        self.comb += [
            overflow_ack_sync.i.eq(overflow_sync.o),
            self.overflow.eq(overflow_sync.o)
        ]
Esempio n. 4
0
    def __init__(self, interface, counter, fifo_depth):
        data_width = rtlink.get_data_width(interface)
        fine_ts_width = rtlink.get_fine_ts_width(interface)

        ev_layout = []
        if data_width:
            ev_layout.append(("data", data_width))
        if interface.timestamped:
            ev_layout.append(("timestamp", counter.width + fine_ts_width))
        self.ev = Record(ev_layout)

        self.readable = Signal()
        self.re = Signal()

        self.overflow = Signal()  # pulsed

        # # #

        fifo = RenameClockDomains(AsyncFIFO(ev_layout, fifo_depth), {
            "read": "rsys",
            "write": "rio"
        })
        self.submodules += fifo

        # FIFO write
        if data_width:
            self.comb += fifo.din.data.eq(interface.data)
        if interface.timestamped:
            if fine_ts_width:
                full_ts = Cat(interface.fine_ts, counter.value_rio)
            else:
                full_ts = counter.value_rio
            self.comb += fifo.din.timestamp.eq(full_ts)
        self.comb += fifo.we.eq(interface.stb)

        # FIFO read
        self.comb += [
            self.ev.eq(fifo.dout),
            self.readable.eq(fifo.readable),
            fifo.re.eq(self.re)
        ]

        overflow_sync = PulseSynchronizer("rio", "rsys")
        overflow_ack_sync = PulseSynchronizer("rsys", "rio")
        self.submodules += overflow_sync, overflow_ack_sync
        overflow_blind = Signal()
        self.comb += overflow_sync.i.eq(fifo.we & ~fifo.writable
                                        & ~overflow_blind)
        self.sync.rio += [
            If(fifo.we & ~fifo.writable, overflow_blind.eq(1)),
            If(overflow_ack_sync.o, overflow_blind.eq(0))
        ]
        self.comb += [
            overflow_ack_sync.i.eq(overflow_sync.o),
            self.overflow.eq(overflow_sync.o)
        ]
Esempio n. 5
0
def get_channel_layout(coarse_ts_width, interface):
    data_width = rtlink.get_data_width(interface)
    fine_ts_width = rtlink.get_fine_ts_width(interface)

    layout = []
    if data_width:
        layout.append(("data", data_width))
    if interface.timestamped:
        layout.append(("timestamp", coarse_ts_width + fine_ts_width))

    return layout
Esempio n. 6
0
def get_channel_layout(coarse_ts_width, interface):
    data_width = rtlink.get_data_width(interface)
    fine_ts_width = rtlink.get_fine_ts_width(interface)

    layout = []
    if data_width:
        layout.append(("data", data_width))
    if interface.timestamped:
        layout.append(("timestamp", coarse_ts_width + fine_ts_width))

    return layout
Esempio n. 7
0
    def __init__(self, interface, counter, fifo_depth):
        data_width = rtlink.get_data_width(interface)
        fine_ts_width = rtlink.get_fine_ts_width(interface)

        ev_layout = []
        if data_width:
            ev_layout.append(("data", data_width))
        if interface.timestamped:
            ev_layout.append(("timestamp", counter.width + fine_ts_width))
        self.ev = Record(ev_layout)

        self.readable = Signal()
        self.re = Signal()

        self.overflow = Signal()  # pulsed

        # # #

        fifo = ClockDomainsRenamer({
            "read": "rsys",
            "write": "rio"
        })(AsyncFIFO(layout_len(ev_layout), fifo_depth))
        self.submodules += fifo
        fifo_in = Record(ev_layout)
        fifo_out = Record(ev_layout)
        self.comb += [
            fifo.din.eq(fifo_in.raw_bits()),
            fifo_out.raw_bits().eq(fifo.dout)
        ]

        # FIFO write
        if data_width:
            self.comb += fifo_in.data.eq(interface.data)
        if interface.timestamped:
            if fine_ts_width:
                full_ts = Cat(interface.fine_ts, counter.value_rtio)
            else:
                full_ts = counter.value_rtio
            self.comb += fifo_in.timestamp.eq(full_ts)
        self.comb += fifo.we.eq(interface.stb)

        # FIFO read
        self.comb += [
            self.ev.eq(fifo_out),
            self.readable.eq(fifo.readable),
            fifo.re.eq(self.re)
        ]

        overflow_transfer = _BlindTransfer()
        self.submodules += overflow_transfer
        self.comb += [
            overflow_transfer.i.eq(fifo.we & ~fifo.writable),
            self.overflow.eq(overflow_transfer.o),
        ]
Esempio n. 8
0
File: core.py Progetto: m-labs/artiq
    def __init__(self, tsc, channels, lane_count=8, fifo_depth=128):
        self.cri = cri.Interface()
        self.async_errors = Record(async_errors_layout)

        chan_fine_ts_width = max(max(rtlink.get_fine_ts_width(channel.interface.o)
                                     for channel in channels),
                                 max(rtlink.get_fine_ts_width(channel.interface.i)
                                     for channel in channels))
        assert tsc.glbl_fine_ts_width >= chan_fine_ts_width

        self.submodules.outputs = ClockDomainsRenamer("rio")(
            SED(channels, tsc.glbl_fine_ts_width, "sync",
                lane_count=lane_count, fifo_depth=fifo_depth,
                enable_spread=False, report_buffer_space=True,
                interface=self.cri))
        self.comb += self.outputs.coarse_timestamp.eq(tsc.coarse_ts)
        self.sync.rtio += self.outputs.minimum_coarse_timestamp.eq(tsc.coarse_ts + 16)

        self.submodules.inputs = ClockDomainsRenamer("rio")(
            InputCollector(tsc, channels, "sync", interface=self.cri))

        for attr, _ in async_errors_layout:
            self.comb += getattr(self.async_errors, attr).eq(getattr(self.outputs, attr))
Esempio n. 9
0
    def __init__(self, channels, fine_ts_width=None, guard_io_cycles=20):
        if fine_ts_width is None:
            fine_ts_width = max(
                rtlink.get_fine_ts_width(c.interface) for c in channels)

        self.cri = cri.Interface()
        self.reset = CSR()
        self.reset_phy = CSR()
        self.comb += self.cri.arb_gnt.eq(1)

        # Clocking/Reset
        # Create rsys, rio and rio_phy domains based on sys and rtio
        # with reset controlled by CRI.
        cmd_reset = Signal(reset=1)
        cmd_reset_phy = Signal(reset=1)
        self.sync += [
            cmd_reset.eq(self.reset.re),
            cmd_reset_phy.eq(self.reset_phy.re)
        ]
        cmd_reset.attr.add("no_retiming")
        cmd_reset_phy.attr.add("no_retiming")

        self.clock_domains.cd_rsys = ClockDomain()
        self.clock_domains.cd_rio = ClockDomain()
        self.clock_domains.cd_rio_phy = ClockDomain()
        self.comb += [
            self.cd_rsys.clk.eq(ClockSignal()),
            self.cd_rsys.rst.eq(cmd_reset)
        ]
        self.comb += self.cd_rio.clk.eq(ClockSignal("rtio"))
        self.specials += AsyncResetSynchronizer(self.cd_rio, cmd_reset)
        self.comb += self.cd_rio_phy.clk.eq(ClockSignal("rtio"))
        self.specials += AsyncResetSynchronizer(self.cd_rio_phy, cmd_reset_phy)

        # Managers
        self.submodules.counter = RTIOCounter(
            len(self.cri.o_timestamp) - fine_ts_width)

        i_datas, i_timestamps = [], []
        o_statuses, i_statuses = [], []
        sel = self.cri.chan_sel[:16]
        for n, channel in enumerate(channels):
            if isinstance(channel, LogChannel):
                i_datas.append(0)
                i_timestamps.append(0)
                i_statuses.append(0)
                continue

            selected = Signal()
            self.comb += selected.eq(sel == n)

            o_manager = _OutputManager(channel.interface.o, self.counter,
                                       channel.ofifo_depth, guard_io_cycles)
            self.submodules += o_manager

            if hasattr(o_manager.ev, "data"):
                self.comb += o_manager.ev.data.eq(self.cri.o_data)
            if hasattr(o_manager.ev, "address"):
                self.comb += o_manager.ev.address.eq(self.cri.o_address)
            ts_shift = len(self.cri.o_timestamp) - len(o_manager.ev.timestamp)
            self.comb += o_manager.ev.timestamp.eq(
                self.cri.o_timestamp[ts_shift:])

            self.comb += o_manager.we.eq(
                selected & (self.cri.cmd == cri.commands["write"]))

            underflow = Signal()
            sequence_error = Signal()
            collision = Signal()
            busy = Signal()
            self.sync.rsys += [
                If(self.cri.cmd == cri.commands["o_underflow_reset"],
                   underflow.eq(0)),
                If(self.cri.cmd == cri.commands["o_sequence_error_reset"],
                   sequence_error.eq(0)),
                If(self.cri.cmd == cri.commands["o_collision_reset"],
                   collision.eq(0)),
                If(self.cri.cmd == cri.commands["o_busy_reset"], busy.eq(0)),
                If(o_manager.underflow, underflow.eq(1)),
                If(o_manager.sequence_error, sequence_error.eq(1)),
                If(o_manager.collision, collision.eq(1)),
                If(o_manager.busy, busy.eq(1))
            ]
            o_statuses.append(
                Cat(~o_manager.writable, underflow, sequence_error, collision,
                    busy))

            if channel.interface.i is not None:
                i_manager = _InputManager(channel.interface.i, self.counter,
                                          channel.ififo_depth)
                self.submodules += i_manager

                if hasattr(i_manager.ev, "data"):
                    i_datas.append(i_manager.ev.data)
                else:
                    i_datas.append(0)
                if channel.interface.i.timestamped:
                    ts_shift = (len(self.cri.i_timestamp) -
                                len(i_manager.ev.timestamp))
                    i_timestamps.append(i_manager.ev.timestamp << ts_shift)
                else:
                    i_timestamps.append(0)

                self.comb += i_manager.re.eq(
                    selected & (self.cri.cmd == cri.commands["read"]))

                overflow = Signal()
                self.sync.rsys += [
                    If(
                        selected &
                        (self.cri.cmd == cri.commands["i_overflow_reset"]),
                        overflow.eq(0)),
                    If(i_manager.overflow, overflow.eq(1))
                ]
                i_statuses.append(Cat(~i_manager.readable, overflow))

            else:
                i_datas.append(0)
                i_timestamps.append(0)
                i_statuses.append(0)
        self.comb += [
            self.cri.i_data.eq(Array(i_datas)[sel]),
            self.cri.i_timestamp.eq(Array(i_timestamps)[sel]),
            self.cri.o_status.eq(Array(o_statuses)[sel]),
            self.cri.i_status.eq(Array(i_statuses)[sel])
        ]

        self.comb += self.cri.counter.eq(
            self.counter.value_sys << fine_ts_width)
Esempio n. 10
0
    def __init__(self, tsc, channels, lane_count=8, fifo_depth=128):
        self.cri = cri.Interface()
        self.reset = CSR()
        self.reset_phy = CSR()
        self.async_error = CSR(3)
        self.collision_channel = CSRStatus(16)
        self.busy_channel = CSRStatus(16)
        self.sequence_error_channel = CSRStatus(16)

        # Clocking/Reset
        # Create rsys, rio and rio_phy domains based on sys and rtio
        # with reset controlled by CSR.
        #
        # The `rio` CD contains logic that is reset with `core.reset()`.
        # That's state that could unduly affect subsequent experiments,
        # i.e. input overflows caused by input gates left open, FIFO events far
        # in the future blocking the experiment, pending RTIO or
        # wishbone bus transactions, etc.
        # The `rio_phy` CD contains state that is maintained across
        # `core.reset()`, i.e. TTL output state, OE, DDS state.
        cmd_reset = Signal(reset=1)
        cmd_reset_phy = Signal(reset=1)
        self.sync += [
            cmd_reset.eq(self.reset.re),
            cmd_reset_phy.eq(self.reset_phy.re)
        ]
        cmd_reset.attr.add("no_retiming")
        cmd_reset_phy.attr.add("no_retiming")

        self.clock_domains.cd_rsys = ClockDomain()
        self.clock_domains.cd_rio = ClockDomain()
        self.clock_domains.cd_rio_phy = ClockDomain()
        self.comb += [
            self.cd_rsys.clk.eq(ClockSignal()),
            self.cd_rsys.rst.eq(cmd_reset),
            self.cd_rio.clk.eq(ClockSignal("rtio")),
            self.cd_rio_phy.clk.eq(ClockSignal("rtio"))
        ]
        self.specials += AsyncResetSynchronizer(self.cd_rio, cmd_reset)
        self.specials += AsyncResetSynchronizer(self.cd_rio_phy, cmd_reset_phy)

        # TSC
        chan_fine_ts_width = max(
            max(
                rtlink.get_fine_ts_width(channel.interface.o)
                for channel in channels),
            max(
                rtlink.get_fine_ts_width(channel.interface.i)
                for channel in channels))
        assert tsc.glbl_fine_ts_width >= chan_fine_ts_width

        # Outputs/Inputs
        quash_channels = [
            n for n, c in enumerate(channels) if isinstance(c, LogChannel)
        ]

        outputs = SED(channels,
                      tsc.glbl_fine_ts_width,
                      "async",
                      quash_channels=quash_channels,
                      lane_count=lane_count,
                      fifo_depth=fifo_depth,
                      interface=self.cri)
        self.submodules += outputs
        self.comb += outputs.coarse_timestamp.eq(tsc.coarse_ts)
        self.sync += outputs.minimum_coarse_timestamp.eq(tsc.coarse_ts_sys +
                                                         16)

        inputs = InputCollector(tsc,
                                channels,
                                "async",
                                quash_channels=quash_channels,
                                interface=self.cri)
        self.submodules += inputs

        # Asychronous output errors
        o_collision_sync = BlindTransfer(data_width=16)
        o_busy_sync = BlindTransfer(data_width=16)
        self.submodules += o_collision_sync, o_busy_sync
        o_collision = Signal()
        o_busy = Signal()
        o_sequence_error = Signal()
        self.sync += [
            If(
                self.async_error.re,
                If(self.async_error.r[0], o_collision.eq(0)),
                If(self.async_error.r[1], o_busy.eq(0)),
                If(self.async_error.r[2], o_sequence_error.eq(0)),
            ),
            If(
                o_collision_sync.o, o_collision.eq(1),
                If(~o_collision,
                   self.collision_channel.status.eq(o_collision_sync.data_o))),
            If(o_busy_sync.o, o_busy.eq(1),
               If(~o_busy, self.busy_channel.status.eq(o_busy_sync.data_o))),
            If(
                outputs.sequence_error, o_sequence_error.eq(1),
                If(
                    ~o_sequence_error,
                    self.sequence_error_channel.status.eq(
                        outputs.sequence_error_channel)))
        ]
        self.comb += self.async_error.w.eq(
            Cat(o_collision, o_busy, o_sequence_error))

        self.comb += [
            o_collision_sync.i.eq(outputs.collision),
            o_collision_sync.data_i.eq(outputs.collision_channel),
            o_busy_sync.i.eq(outputs.busy),
            o_busy_sync.data_i.eq(outputs.busy_channel)
        ]
Esempio n. 11
0
File: core.py Progetto: JQIamo/artiq
    def __init__(self, channels, fine_ts_width=None, guard_io_cycles=20):
        if fine_ts_width is None:
            fine_ts_width = max(rtlink.get_fine_ts_width(c.interface)
                                for c in channels)

        self.cri = cri.Interface()
        self.reset = CSR()
        self.reset_phy = CSR()
        self.async_error = CSR(2)

        # Clocking/Reset
        # Create rsys, rio and rio_phy domains based on sys and rtio
        # with reset controlled by CRI.
        #
        # The `rio` CD contains logic that is reset with `core.reset()`.
        # That's state that could unduly affect subsequent experiments,
        # i.e. input overflows caused by input gates left open, FIFO events far
        # in the future blocking the experiment, pending RTIO or
        # wishbone bus transactions, etc.
        # The `rio_phy` CD contains state that is maintained across
        # `core.reset()`, i.e. TTL output state, OE, DDS state.
        cmd_reset = Signal(reset=1)
        cmd_reset_phy = Signal(reset=1)
        self.sync += [
            cmd_reset.eq(self.reset.re),
            cmd_reset_phy.eq(self.reset_phy.re)
        ]
        cmd_reset.attr.add("no_retiming")
        cmd_reset_phy.attr.add("no_retiming")

        self.clock_domains.cd_rsys = ClockDomain()
        self.clock_domains.cd_rio = ClockDomain()
        self.clock_domains.cd_rio_phy = ClockDomain()
        self.comb += [
            self.cd_rsys.clk.eq(ClockSignal()),
            self.cd_rsys.rst.eq(cmd_reset),
            self.cd_rio.clk.eq(ClockSignal("rtio")),
            self.cd_rio_phy.clk.eq(ClockSignal("rtio"))
        ]
        self.specials += AsyncResetSynchronizer(self.cd_rio, cmd_reset)
        self.specials += AsyncResetSynchronizer(self.cd_rio_phy, cmd_reset_phy)

        # Managers
        self.submodules.counter = RTIOCounter(len(self.cri.timestamp) - fine_ts_width)

        # Collision is not an asynchronous error with local RTIO, but
        # we treat it as such for consistency with DRTIO, where collisions
        # are reported by the satellites.
        o_underflow = Signal()
        o_sequence_error = Signal()
        o_collision = Signal()
        o_busy = Signal()
        self.sync.rsys += [
            If(self.cri.cmd == cri.commands["write"],
                o_underflow.eq(0),
                o_sequence_error.eq(0),
            )
        ]
        self.sync += [
            If(self.async_error.re,
                If(self.async_error.r[0], o_collision.eq(0)),
                If(self.async_error.r[1], o_busy.eq(0)),
            )
        ]

        o_statuses, i_statuses = [], []
        i_datas, i_timestamps = [], []
        i_ack = Signal()
        sel = self.cri.chan_sel[:16]
        for n, channel in enumerate(channels):
            if isinstance(channel, LogChannel):
                o_statuses.append(1)
                i_datas.append(0)
                i_timestamps.append(0)
                i_statuses.append(0)
                continue

            selected = Signal()
            self.comb += selected.eq(sel == n)

            o_manager = _OutputManager(channel.interface.o, self.counter,
                                       channel.ofifo_depth, guard_io_cycles)
            self.submodules += o_manager

            if hasattr(o_manager.ev, "data"):
                self.comb += o_manager.ev.data.eq(self.cri.o_data)
            if hasattr(o_manager.ev, "address"):
                self.comb += o_manager.ev.address.eq(self.cri.o_address)
            ts_shift = len(self.cri.timestamp) - len(o_manager.ev.timestamp)
            self.comb += o_manager.ev.timestamp.eq(self.cri.timestamp[ts_shift:])

            self.comb += o_manager.we.eq(selected & (self.cri.cmd == cri.commands["write"]))

            self.sync.rsys += [
                If(o_manager.underflow, o_underflow.eq(1)),
                If(o_manager.sequence_error, o_sequence_error.eq(1))
            ]
            self.sync += [
                If(o_manager.collision, o_collision.eq(1)),
                If(o_manager.busy, o_busy.eq(1))
            ]
            o_statuses.append(o_manager.writable)

            if channel.interface.i is not None:
                i_manager = _InputManager(channel.interface.i, self.counter,
                                          channel.ififo_depth)
                self.submodules += i_manager

                if hasattr(i_manager.ev, "data"):
                    i_datas.append(i_manager.ev.data)
                else:
                    i_datas.append(0)
                if channel.interface.i.timestamped:
                    ts_shift = (len(self.cri.i_timestamp) - len(i_manager.ev.timestamp))
                    i_timestamps.append(i_manager.ev.timestamp << ts_shift)
                else:
                    i_timestamps.append(0)

                overflow = Signal()
                self.sync.rsys += [
                    If(selected & i_ack,
                       overflow.eq(0)),
                    If(i_manager.overflow,
                       overflow.eq(1))
                ]
                self.comb += i_manager.re.eq(selected & i_ack & ~overflow)
                i_statuses.append(Cat(i_manager.readable & ~overflow, overflow))

            else:
                i_datas.append(0)
                i_timestamps.append(0)
                i_statuses.append(0)

        o_status_raw = Signal()
        self.comb += [
            o_status_raw.eq(Array(o_statuses)[sel]),
            self.cri.o_status.eq(Cat(
                ~o_status_raw, o_underflow, o_sequence_error)),
            self.async_error.w.eq(Cat(o_collision, o_busy))
        ]

        i_status_raw = Signal(2)
        self.comb += i_status_raw.eq(Array(i_statuses)[sel])
        input_timeout = Signal.like(self.cri.timestamp)
        input_pending = Signal()
        self.sync.rsys += [
            i_ack.eq(0),
            If(i_ack,
                self.cri.i_status.eq(Cat(~i_status_raw[0], i_status_raw[1], 0)),
                self.cri.i_data.eq(Array(i_datas)[sel]),
                self.cri.i_timestamp.eq(Array(i_timestamps)[sel]),
            ),
            If((self.cri.counter >= input_timeout) | (i_status_raw != 0),
                If(input_pending, i_ack.eq(1)),
                input_pending.eq(0)
            ),
            If(self.cri.cmd == cri.commands["read"],
                input_timeout.eq(self.cri.timestamp),
                input_pending.eq(1),
                self.cri.i_status.eq(0b100)
            )
        ]

        self.comb += self.cri.counter.eq(self.counter.value_sys << fine_ts_width)
Esempio n. 12
0
    def __init__(self, channels, fine_ts_width=None, guard_io_cycles=20):
        if fine_ts_width is None:
            fine_ts_width = max(
                rtlink.get_fine_ts_width(c.interface) for c in channels)

        self.cri = cri.Interface()
        self.reset = CSR()
        self.reset_phy = CSR()
        self.async_error = CSR(2)

        # Clocking/Reset
        # Create rsys, rio and rio_phy domains based on sys and rtio
        # with reset controlled by CRI.
        #
        # The `rio` CD contains logic that is reset with `core.reset()`.
        # That's state that could unduly affect subsequent experiments,
        # i.e. input overflows caused by input gates left open, FIFO events far
        # in the future blocking the experiment, pending RTIO or
        # wishbone bus transactions, etc.
        # The `rio_phy` CD contains state that is maintained across
        # `core.reset()`, i.e. TTL output state, OE, DDS state.
        cmd_reset = Signal(reset=1)
        cmd_reset_phy = Signal(reset=1)
        self.sync += [
            cmd_reset.eq(self.reset.re),
            cmd_reset_phy.eq(self.reset_phy.re)
        ]
        cmd_reset.attr.add("no_retiming")
        cmd_reset_phy.attr.add("no_retiming")

        self.clock_domains.cd_rsys = ClockDomain()
        self.clock_domains.cd_rio = ClockDomain()
        self.clock_domains.cd_rio_phy = ClockDomain()
        self.comb += [
            self.cd_rsys.clk.eq(ClockSignal()),
            self.cd_rsys.rst.eq(cmd_reset),
            self.cd_rio.clk.eq(ClockSignal("rtio")),
            self.cd_rio_phy.clk.eq(ClockSignal("rtio"))
        ]
        self.specials += AsyncResetSynchronizer(self.cd_rio, cmd_reset)
        self.specials += AsyncResetSynchronizer(self.cd_rio_phy, cmd_reset_phy)

        # Managers
        self.submodules.counter = RTIOCounter(
            len(self.cri.timestamp) - fine_ts_width)

        # Collision is not an asynchronous error with local RTIO, but
        # we treat it as such for consistency with DRTIO, where collisions
        # are reported by the satellites.
        o_underflow = Signal()
        o_sequence_error = Signal()
        o_collision = Signal()
        o_busy = Signal()
        self.sync.rsys += [
            If(
                self.cri.cmd == cri.commands["write"],
                o_underflow.eq(0),
                o_sequence_error.eq(0),
            )
        ]
        self.sync += [
            If(
                self.async_error.re,
                If(self.async_error.r[0], o_collision.eq(0)),
                If(self.async_error.r[1], o_busy.eq(0)),
            )
        ]

        o_statuses, i_statuses = [], []
        i_datas, i_timestamps = [], []
        i_ack = Signal()
        sel = self.cri.chan_sel[:16]
        for n, channel in enumerate(channels):
            if isinstance(channel, LogChannel):
                o_statuses.append(1)
                i_datas.append(0)
                i_timestamps.append(0)
                i_statuses.append(0)
                continue

            selected = Signal()
            self.comb += selected.eq(sel == n)

            o_manager = _OutputManager(channel.interface.o, self.counter,
                                       channel.ofifo_depth, guard_io_cycles)
            self.submodules += o_manager

            if hasattr(o_manager.ev, "data"):
                self.comb += o_manager.ev.data.eq(self.cri.o_data)
            if hasattr(o_manager.ev, "address"):
                self.comb += o_manager.ev.address.eq(self.cri.o_address)
            ts_shift = len(self.cri.timestamp) - len(o_manager.ev.timestamp)
            self.comb += o_manager.ev.timestamp.eq(
                self.cri.timestamp[ts_shift:])

            self.comb += o_manager.we.eq(
                selected & (self.cri.cmd == cri.commands["write"]))

            self.sync.rsys += [
                If(o_manager.underflow, o_underflow.eq(1)),
                If(o_manager.sequence_error, o_sequence_error.eq(1))
            ]
            self.sync += [
                If(o_manager.collision, o_collision.eq(1)),
                If(o_manager.busy, o_busy.eq(1))
            ]
            o_statuses.append(o_manager.writable)

            if channel.interface.i is not None:
                i_manager = _InputManager(channel.interface.i, self.counter,
                                          channel.ififo_depth)
                self.submodules += i_manager

                if hasattr(i_manager.ev, "data"):
                    i_datas.append(i_manager.ev.data)
                else:
                    i_datas.append(0)
                if channel.interface.i.timestamped:
                    ts_shift = (len(self.cri.i_timestamp) -
                                len(i_manager.ev.timestamp))
                    i_timestamps.append(i_manager.ev.timestamp << ts_shift)
                else:
                    i_timestamps.append(0)

                overflow = Signal()
                self.sync.rsys += [
                    If(selected & i_ack, overflow.eq(0)),
                    If(i_manager.overflow, overflow.eq(1))
                ]
                self.comb += i_manager.re.eq(selected & i_ack & ~overflow)
                i_statuses.append(Cat(i_manager.readable & ~overflow,
                                      overflow))

            else:
                i_datas.append(0)
                i_timestamps.append(0)
                i_statuses.append(0)

        o_status_raw = Signal()
        self.comb += [
            o_status_raw.eq(Array(o_statuses)[sel]),
            self.cri.o_status.eq(
                Cat(~o_status_raw, o_underflow, o_sequence_error)),
            self.async_error.w.eq(Cat(o_collision, o_busy))
        ]

        i_status_raw = Signal(2)
        self.comb += i_status_raw.eq(Array(i_statuses)[sel])
        input_timeout = Signal.like(self.cri.timestamp)
        input_pending = Signal()
        self.sync.rsys += [
            i_ack.eq(0),
            If(
                i_ack,
                self.cri.i_status.eq(Cat(~i_status_raw[0], i_status_raw[1],
                                         0)),
                self.cri.i_data.eq(Array(i_datas)[sel]),
                self.cri.i_timestamp.eq(Array(i_timestamps)[sel]),
            ),
            If((self.cri.counter >= input_timeout) | (i_status_raw != 0),
               If(input_pending, i_ack.eq(1)), input_pending.eq(0)),
            If(self.cri.cmd == cri.commands["read"],
               input_timeout.eq(self.cri.timestamp), input_pending.eq(1),
               self.cri.i_status.eq(0b100))
        ]

        self.comb += self.cri.counter.eq(
            self.counter.value_sys << fine_ts_width)
Esempio n. 13
0
    def __init__(self, channels, full_ts_width=63, guard_io_cycles=20):
        data_width = max(rtlink.get_data_width(c.interface)
                         for c in channels)
        address_width = max(rtlink.get_address_width(c.interface)
                            for c in channels)
        fine_ts_width = max(rtlink.get_fine_ts_width(c.interface)
                            for c in channels)

        self.data_width = data_width
        self.address_width = address_width
        self.fine_ts_width = fine_ts_width

        # CSRs
        self.kcsrs = _KernelCSRs(bits_for(len(channels)-1),
                                 data_width, address_width,
                                 full_ts_width)

        # Clocking/Reset
        # Create rsys, rio and rio_phy domains based on sys and rtio
        # with reset controlled by CSR.
        self.clock_domains.cd_rsys = ClockDomain()
        self.clock_domains.cd_rio = ClockDomain()
        self.clock_domains.cd_rio_phy = ClockDomain()
        self.comb += [
            self.cd_rsys.clk.eq(ClockSignal()),
            self.cd_rsys.rst.eq(self.kcsrs.reset.storage)
        ]
        self.comb += self.cd_rio.clk.eq(ClockSignal("rtio"))
        self.specials += AsyncResetSynchronizer(
            self.cd_rio,
            self.kcsrs.reset.storage | ResetSignal("rtio",
                                                   allow_reset_less=True))
        self.comb += self.cd_rio_phy.clk.eq(ClockSignal("rtio"))
        self.specials += AsyncResetSynchronizer(
            self.cd_rio_phy,
            self.kcsrs.reset_phy.storage | ResetSignal("rtio",
                                                       allow_reset_less=True))

        # Managers
        self.submodules.counter = _RTIOCounter(full_ts_width - fine_ts_width)

        i_datas, i_timestamps = [], []
        o_statuses, i_statuses = [], []
        sel = self.kcsrs.chan_sel.storage
        for n, channel in enumerate(channels):
            if isinstance(channel, LogChannel):
                i_datas.append(0)
                i_timestamps.append(0)
                i_statuses.append(0)
                continue

            selected = Signal()
            self.comb += selected.eq(sel == n)

            o_manager = _OutputManager(channel.interface.o, self.counter,
                                       channel.ofifo_depth, guard_io_cycles)
            self.submodules += o_manager

            if hasattr(o_manager.ev, "data"):
                self.comb += o_manager.ev.data.eq(
                    self.kcsrs.o_data.storage)
            if hasattr(o_manager.ev, "address"):
                self.comb += o_manager.ev.address.eq(
                    self.kcsrs.o_address.storage)
            ts_shift = (len(self.kcsrs.o_timestamp.storage)
                        - len(o_manager.ev.timestamp))
            self.comb += o_manager.ev.timestamp.eq(
                self.kcsrs.o_timestamp.storage[ts_shift:])

            self.comb += o_manager.we.eq(selected & self.kcsrs.o_we.re)

            underflow = Signal()
            sequence_error = Signal()
            collision = Signal()
            busy = Signal()
            self.sync.rsys += [
                If(selected & self.kcsrs.o_underflow_reset.re,
                   underflow.eq(0)),
                If(selected & self.kcsrs.o_sequence_error_reset.re,
                   sequence_error.eq(0)),
                If(selected & self.kcsrs.o_collision_reset.re,
                   collision.eq(0)),
                If(selected & self.kcsrs.o_busy_reset.re,
                   busy.eq(0)),
                If(o_manager.underflow, underflow.eq(1)),
                If(o_manager.sequence_error, sequence_error.eq(1)),
                If(o_manager.collision, collision.eq(1)),
                If(o_manager.busy, busy.eq(1))
            ]
            o_statuses.append(Cat(~o_manager.writable,
                                  underflow,
                                  sequence_error,
                                  collision,
                                  busy))

            if channel.interface.i is not None:
                i_manager = _InputManager(channel.interface.i, self.counter,
                                          channel.ififo_depth)
                self.submodules += i_manager

                if hasattr(i_manager.ev, "data"):
                    i_datas.append(i_manager.ev.data)
                else:
                    i_datas.append(0)
                if channel.interface.i.timestamped:
                    ts_shift = (len(self.kcsrs.i_timestamp.status)
                                - len(i_manager.ev.timestamp))
                    i_timestamps.append(i_manager.ev.timestamp << ts_shift)
                else:
                    i_timestamps.append(0)

                self.comb += i_manager.re.eq(selected & self.kcsrs.i_re.re)

                overflow = Signal()
                self.sync.rsys += [
                    If(selected & self.kcsrs.i_overflow_reset.re,
                       overflow.eq(0)),
                    If(i_manager.overflow,
                       overflow.eq(1))
                ]
                i_statuses.append(Cat(~i_manager.readable, overflow))

            else:
                i_datas.append(0)
                i_timestamps.append(0)
                i_statuses.append(0)
        if data_width:
            self.comb += self.kcsrs.i_data.status.eq(Array(i_datas)[sel])
        self.comb += [
            self.kcsrs.i_timestamp.status.eq(Array(i_timestamps)[sel]),
            self.kcsrs.o_status.status.eq(Array(o_statuses)[sel]),
            self.kcsrs.i_status.status.eq(Array(i_statuses)[sel])
        ]

        # Counter access
        self.sync += \
           If(self.kcsrs.counter_update.re,
               self.kcsrs.counter.status.eq(self.counter.value_sys
                                                << fine_ts_width)
           )
Esempio n. 14
0
    def __init__(self, interface, counter, fifo_depth, guard_io_cycles):
        data_width = rtlink.get_data_width(interface)
        address_width = rtlink.get_address_width(interface)
        fine_ts_width = rtlink.get_fine_ts_width(interface)

        ev_layout = []
        if data_width:
            ev_layout.append(("data", data_width))
        if address_width:
            ev_layout.append(("address", address_width))
        ev_layout.append(("timestamp", counter.width + fine_ts_width))
        # ev must be valid 1 cycle before we to account for the latency in
        # generating replace, sequence_error and collision
        self.ev = Record(ev_layout)

        self.writable = Signal()
        self.we = Signal()  # maximum throughput 1/2

        self.underflow = Signal()  # valid 1 cycle after we, pulsed
        self.sequence_error = Signal()
        self.collision = Signal()
        self.busy = Signal()  # pulsed

        # # #

        # FIFO
        fifo = ClockDomainsRenamer({"write": "rsys", "read": "rio"})(
            AsyncFIFO(layout_len(ev_layout), fifo_depth))
        self.submodules += fifo
        fifo_in = Record(ev_layout)
        fifo_out = Record(ev_layout)
        self.comb += [
            fifo.din.eq(fifo_in.raw_bits()),
            fifo_out.raw_bits().eq(fifo.dout)
        ]

        # Buffer
        buf_pending = Signal()
        buf = Record(ev_layout)
        buf_just_written = Signal()

        # Special cases
        replace = Signal()
        sequence_error = Signal()
        collision = Signal()
        any_error = Signal()
        if interface.enable_replace:
            # Note: replace may be asserted at the same time as collision
            # when addresses are different. In that case, it is a collision.
            self.sync.rsys += replace.eq(self.ev.timestamp == buf.timestamp)
            # Detect sequence errors on coarse timestamps only
            # so that they are mutually exclusive with collision errors.
        self.sync.rsys += sequence_error.eq(self.ev.timestamp[fine_ts_width:] <
                                            buf.timestamp[fine_ts_width:])
        if interface.enable_replace:
            if address_width:
                different_addresses = self.ev.address != buf.address
            else:
                different_addresses = 0
            if fine_ts_width:
                self.sync.rsys += collision.eq(
                    (self.ev.timestamp[fine_ts_width:] == buf.timestamp[fine_ts_width:])
                    & ((self.ev.timestamp[:fine_ts_width] != buf.timestamp[:fine_ts_width])
                       |different_addresses))
        else:
            self.sync.rsys += collision.eq(
                self.ev.timestamp[fine_ts_width:] == buf.timestamp[fine_ts_width:])
        self.comb += [
            any_error.eq(sequence_error | collision),
            self.sequence_error.eq(self.we & sequence_error),
            self.collision.eq(self.we & collision)
        ]

        # Buffer read and FIFO write
        self.comb += fifo_in.eq(buf)
        in_guard_time = Signal()
        self.comb += in_guard_time.eq(
            buf.timestamp[fine_ts_width:]
                < counter.value_sys + guard_io_cycles)
        self.sync.rsys += If(in_guard_time, buf_pending.eq(0))
        self.comb += \
            If(buf_pending,
                If(in_guard_time,
                    If(buf_just_written,
                        self.underflow.eq(1)
                    ).Else(
                        fifo.we.eq(1)
                    )
                ),
                If(self.we & ~replace & ~any_error,
                   fifo.we.eq(1)
                )
            )

        # Buffer write
        # Must come after read to handle concurrent read+write properly
        self.sync.rsys += [
            buf_just_written.eq(0),
            If(self.we & ~any_error,
                buf_just_written.eq(1),
                buf_pending.eq(1),
                buf.eq(self.ev)
            )
        ]
        self.comb += self.writable.eq(fifo.writable)

        # Buffer output of FIFO to improve timing
        dout_stb = Signal()
        dout_ack = Signal()
        dout = Record(ev_layout)
        self.sync.rio += \
            If(fifo.re,
                dout_stb.eq(1),
                dout.eq(fifo_out)
            ).Elif(dout_ack,
                dout_stb.eq(0)
            )
        self.comb += fifo.re.eq(fifo.readable & (~dout_stb | dout_ack))

        # FIFO read through buffer
        self.comb += [
            dout_ack.eq(
                dout.timestamp[fine_ts_width:] == counter.value_rtio),
            interface.stb.eq(dout_stb & dout_ack)
        ]

        busy_transfer = _BlindTransfer()
        self.submodules += busy_transfer
        self.comb += [
            busy_transfer.i.eq(interface.stb & interface.busy),
            self.busy.eq(busy_transfer.o),
        ]

        if data_width:
            self.comb += interface.data.eq(dout.data)
        if address_width:
            self.comb += interface.address.eq(dout.address)
        if fine_ts_width:
            self.comb += interface.fine_ts.eq(dout.timestamp[:fine_ts_width])
Esempio n. 15
0
    def add_input(self, n, channel):
        rt_packet = self.rt_packet

        interface = channel.interface.i
        if interface is None:
            return
        data_width = rtlink.get_data_width(interface)
        fine_ts_width = rtlink.get_fine_ts_width(interface)

        selected = Signal()
        self.comb += selected.eq(rt_packet.read_channel == n)

        # latency compensation
        if interface.delay:
            tsc_comp = Signal.like(self.tsc)
            self.sync.rtio += tsc_comp.eq(self.tsc - interface.delay + 1)
        else:
            tsc_comp = self.tsc

        # FIFO
        ev_layout = []
        if data_width:
            ev_layout.append(("data", data_width))
        if interface.timestamped:
            ev_layout.append(("timestamp", len(self.tsc) + fine_ts_width))

        fifo = ClockDomainsRenamer("rio")(SyncFIFOBuffered(
            layout_len(ev_layout), channel.ififo_depth))
        self.submodules += fifo
        fifo_in = Record(ev_layout)
        fifo_out = Record(ev_layout)
        self.comb += [
            fifo.din.eq(fifo_in.raw_bits()),
            fifo_out.raw_bits().eq(fifo.dout)
        ]

        # FIFO write
        if data_width:
            self.comb += fifo_in.data.eq(interface.data)
        if interface.timestamped:
            if fine_ts_width:
                full_ts = Cat(interface.fine_ts, tsc_comp)
            else:
                full_ts = tsc_comp
            self.comb += fifo_in.timestamp.eq(full_ts)
        self.comb += fifo.we.eq(interface.stb)

        overflow = Signal()
        self.comb += If(selected, rt_packet.read_overflow.eq(overflow))
        self.sync.rio += [
            If(selected & rt_packet.read_overflow_ack, overflow.eq(0)),
            If(fifo.we & ~fifo.writable, overflow.eq(1))
        ]

        # FIFO read
        if data_width:
            self.comb += If(selected, rt_packet.read_data.eq(fifo_out.data))
        if interface.timestamped:
            self.comb += If(selected,
                            rt_packet.read_timestamp.eq(fifo_out.timestamp))
        self.comb += [
            If(selected, rt_packet.read_readable.eq(fifo.readable),
               fifo.re.eq(rt_packet.read_consume))
        ]
Esempio n. 16
0
File: core.py Progetto: m-labs/artiq
    def __init__(self, tsc, channels, lane_count=8, fifo_depth=128):
        self.cri = cri.Interface()
        self.reset = CSR()
        self.reset_phy = CSR()
        self.async_error = CSR(3)
        self.collision_channel = CSRStatus(16)
        self.busy_channel = CSRStatus(16)
        self.sequence_error_channel = CSRStatus(16)

        # Clocking/Reset
        # Create rsys, rio and rio_phy domains based on sys and rtio
        # with reset controlled by CSR.
        #
        # The `rio` CD contains logic that is reset with `core.reset()`.
        # That's state that could unduly affect subsequent experiments,
        # i.e. input overflows caused by input gates left open, FIFO events far
        # in the future blocking the experiment, pending RTIO or
        # wishbone bus transactions, etc.
        # The `rio_phy` CD contains state that is maintained across
        # `core.reset()`, i.e. TTL output state, OE, DDS state.
        cmd_reset = Signal(reset=1)
        cmd_reset_phy = Signal(reset=1)
        self.sync += [
            cmd_reset.eq(self.reset.re),
            cmd_reset_phy.eq(self.reset_phy.re)
        ]
        cmd_reset.attr.add("no_retiming")
        cmd_reset_phy.attr.add("no_retiming")

        self.clock_domains.cd_rsys = ClockDomain()
        self.clock_domains.cd_rio = ClockDomain()
        self.clock_domains.cd_rio_phy = ClockDomain()
        self.comb += [
            self.cd_rsys.clk.eq(ClockSignal()),
            self.cd_rsys.rst.eq(cmd_reset),
            self.cd_rio.clk.eq(ClockSignal("rtio")),
            self.cd_rio_phy.clk.eq(ClockSignal("rtio"))
        ]
        self.specials += AsyncResetSynchronizer(self.cd_rio, cmd_reset)
        self.specials += AsyncResetSynchronizer(self.cd_rio_phy, cmd_reset_phy)

        # TSC
        chan_fine_ts_width = max(max(rtlink.get_fine_ts_width(channel.interface.o)
                                     for channel in channels),
                                 max(rtlink.get_fine_ts_width(channel.interface.i)
                                     for channel in channels))
        assert tsc.glbl_fine_ts_width >= chan_fine_ts_width

        # Outputs/Inputs
        quash_channels = [n for n, c in enumerate(channels) if isinstance(c, LogChannel)]

        outputs = SED(channels, tsc.glbl_fine_ts_width, "async",
            quash_channels=quash_channels,
            lane_count=lane_count, fifo_depth=fifo_depth,
            interface=self.cri)
        self.submodules += outputs
        self.comb += outputs.coarse_timestamp.eq(tsc.coarse_ts)
        self.sync += outputs.minimum_coarse_timestamp.eq(tsc.coarse_ts_sys + 16)

        inputs = InputCollector(tsc, channels, "async",
            quash_channels=quash_channels,
            interface=self.cri)
        self.submodules += inputs

        # Asychronous output errors
        o_collision_sync = BlindTransfer(data_width=16)
        o_busy_sync = BlindTransfer(data_width=16)
        self.submodules += o_collision_sync, o_busy_sync
        o_collision = Signal()
        o_busy = Signal()
        o_sequence_error = Signal()
        self.sync += [
            If(self.async_error.re,
                If(self.async_error.r[0], o_collision.eq(0)),
                If(self.async_error.r[1], o_busy.eq(0)),
                If(self.async_error.r[2], o_sequence_error.eq(0)),
            ),
            If(o_collision_sync.o, 
                o_collision.eq(1),
                If(~o_collision,
                    self.collision_channel.status.eq(o_collision_sync.data_o)
                )
            ),
            If(o_busy_sync.o, 
                o_busy.eq(1),
                If(~o_busy,
                    self.busy_channel.status.eq(o_busy_sync.data_o)
                )
            ),
            If(outputs.sequence_error, 
                o_sequence_error.eq(1),
                If(~o_sequence_error,
                    self.sequence_error_channel.status.eq(outputs.sequence_error_channel)
                )
            )
        ]
        self.comb += self.async_error.w.eq(Cat(o_collision, o_busy, o_sequence_error))

        self.comb += [
            o_collision_sync.i.eq(outputs.collision),
            o_collision_sync.data_i.eq(outputs.collision_channel),
            o_busy_sync.i.eq(outputs.busy),
            o_busy_sync.data_i.eq(outputs.busy_channel)
        ]
Esempio n. 17
0
    def add_input(self, n, channel):
        rt_packet = self.rt_packet

        interface = channel.interface.i
        if interface is None:
            return
        data_width = rtlink.get_data_width(interface)
        fine_ts_width = rtlink.get_fine_ts_width(interface)

        selected = Signal()
        self.comb += selected.eq(rt_packet.read_channel == n)

        # latency compensation
        if interface.delay:
            tsc_comp = Signal.like(self.tsc)
            self.sync.rtio += tsc_comp.eq(self.tsc - interface.delay + 1)
        else:
            tsc_comp = self.tsc

        # FIFO
        ev_layout = []
        if data_width:
            ev_layout.append(("data", data_width))
        if interface.timestamped:
            ev_layout.append(("timestamp", len(self.tsc) + fine_ts_width))

        fifo = ClockDomainsRenamer("rio")(
            SyncFIFOBuffered(layout_len(ev_layout), channel.ififo_depth))
        self.submodules += fifo
        fifo_in = Record(ev_layout)
        fifo_out = Record(ev_layout)
        self.comb += [
            fifo.din.eq(fifo_in.raw_bits()),
            fifo_out.raw_bits().eq(fifo.dout)
        ]

        # FIFO write
        if data_width:
            self.comb += fifo_in.data.eq(interface.data)
        if interface.timestamped:
            if fine_ts_width:
                full_ts = Cat(interface.fine_ts, tsc_comp)
            else:
                full_ts = tsc_comp
            self.comb += fifo_in.timestamp.eq(full_ts)
        self.comb += fifo.we.eq(interface.stb)

        overflow = Signal()
        self.comb += If(selected, rt_packet.read_overflow.eq(overflow))
        self.sync.rio += [
            If(selected & rt_packet.read_overflow_ack, overflow.eq(0)),
            If(fifo.we & ~fifo.writable, overflow.eq(1))
        ]

        # FIFO read
        if data_width:
            self.comb += If(selected, rt_packet.read_data.eq(fifo_out.data))
        if interface.timestamped:
            self.comb += If(selected, rt_packet.read_timestamp.eq(fifo_out.timestamp))
        self.comb += [
            If(selected,
                rt_packet.read_readable.eq(fifo.readable),
                fifo.re.eq(rt_packet.read_consume)
            )
        ]
Esempio n. 18
0
    def add_output(self, n, channel):
        rt_packet = self.rt_packet
        max_fine_ts_width = self.max_fine_ts_width

        interface = channel.interface.o
        data_width = rtlink.get_data_width(interface)
        address_width = rtlink.get_address_width(interface)
        fine_ts_width = rtlink.get_fine_ts_width(interface)
        assert fine_ts_width <= max_fine_ts_width

        we = Signal()
        self.comb += we.eq(rt_packet.write_stb
                           & (rt_packet.write_channel == n))
        write_timestamp = rt_packet.write_timestamp[max_fine_ts_width-fine_ts_width:]
        write_timestamp_coarse = rt_packet.write_timestamp[max_fine_ts_width:]
        write_timestamp_fine = rt_packet.write_timestamp[max_fine_ts_width-fine_ts_width:max_fine_ts_width]

        # latency compensation
        if interface.delay:
            tsc_comp = Signal.like(self.tsc)
            self.sync.rtio += tsc_comp.eq(self.tsc - interface.delay + 1)
        else:
            tsc_comp = self.tsc

        # FIFO
        ev_layout = []
        if data_width:
            ev_layout.append(("data", data_width))
        if address_width:
            ev_layout.append(("address", address_width))
        ev_layout.append(("timestamp", len(self.tsc) + fine_ts_width))

        fifo = ClockDomainsRenamer("rio")(
            SyncFIFOBuffered(layout_len(ev_layout), channel.ofifo_depth))
        self.submodules += fifo
        fifo_in = Record(ev_layout)
        fifo_out = Record(ev_layout)
        self.comb += [
            fifo.din.eq(fifo_in.raw_bits()),
            fifo_out.raw_bits().eq(fifo.dout)
        ]

        # Buffer
        buf_pending = Signal()
        buf = Record(ev_layout)
        buf_just_written = Signal()

        # Special cases
        replace = Signal()
        sequence_error = Signal()
        collision = Signal()
        any_error = Signal()
        if interface.enable_replace:
            # Note: replace may be asserted at the same time as collision
            # when addresses are different. In that case, it is a collision.
            self.sync.rio += replace.eq(write_timestamp == buf.timestamp)
        # Detect sequence errors on coarse timestamps only
        # so that they are mutually exclusive with collision errors.
        self.sync.rio += sequence_error.eq(write_timestamp_coarse < buf.timestamp[fine_ts_width:])
        if interface.enable_replace:
            if address_width:
                different_addresses = rt_packet.write_address != buf.address
            else:
                different_addresses = 0
            if fine_ts_width:
                self.sync.rio += collision.eq(
                    (write_timestamp_coarse == buf.timestamp[fine_ts_width:])
                    & ((write_timestamp_fine != buf.timestamp[:fine_ts_width])
                       |different_addresses))
            else:
                self.sync.rio += collision.eq(
                    (write_timestamp == buf.timestamp) & different_addresses)
        else:
            self.sync.rio += collision.eq(
                write_timestamp_coarse == buf.timestamp[fine_ts_width:])
        self.comb += any_error.eq(sequence_error | collision)
        self.sync.rio += [
            If(we & sequence_error, self.write_sequence_error.eq(1)),
            If(we & collision, self.collision.eq(1))
        ]

        # Buffer read and FIFO write
        self.comb += fifo_in.eq(buf)
        in_guard_time = Signal()
        self.comb += in_guard_time.eq(
            buf.timestamp[fine_ts_width:] < tsc_comp + 4)
        self.sync.rio += If(in_guard_time, buf_pending.eq(0))
        report_underflow = Signal()
        self.comb += \
            If(buf_pending,
                If(in_guard_time,
                    If(buf_just_written,
                        report_underflow.eq(1)
                    ).Else(
                        fifo.we.eq(1)
                    )
                ),
                If(we & ~replace & ~any_error,
                   fifo.we.eq(1)
                )
            )
        self.sync.rio += If(report_underflow, self.write_underflow.eq(1))

        # Buffer write
        # Must come after read to handle concurrent read+write properly
        self.sync.rio += [
            buf_just_written.eq(0),
            If(we & ~any_error,
                buf_just_written.eq(1),
                buf_pending.eq(1),
                buf.timestamp.eq(write_timestamp),
                buf.data.eq(rt_packet.write_data) if data_width else [],
                buf.address.eq(rt_packet.write_address) if address_width else [],
            ),
            If(we & ~fifo.writable, self.write_overflow.eq(1))
        ]

        # FIFO level
        self.sync.rio += \
            If(rt_packet.fifo_space_update &
               (rt_packet.fifo_space_channel == n),
                rt_packet.fifo_space.eq(channel.ofifo_depth - fifo.level))

        # FIFO read
        self.sync.rio += [
            fifo.re.eq(0),
            interface.stb.eq(0),
            If(fifo.readable &
               (fifo_out.timestamp[fine_ts_width:] == tsc_comp),
                fifo.re.eq(1),
                interface.stb.eq(1)
            )
        ]
        if data_width:
            self.sync.rio += interface.data.eq(fifo_out.data)
        if address_width:
            self.sync.rio += interface.address.eq(fifo_out.address)
        if fine_ts_width:
            self.sync.rio += interface.fine_ts.eq(fifo_out.timestamp[:fine_ts_width])

        self.sync.rio += If(interface.stb & interface.busy, self.busy.eq(1))
Esempio n. 19
0
File: iot.py Progetto: atcher0/artiq
    def __init__(self, rt_packets, channels, max_fine_ts_width, full_ts_width):
        tsc = Signal(full_ts_width - max_fine_ts_width)
        self.sync.rtio += \
            If(rt_packets.tsc_load,
                tsc.eq(rt_packets.tsc_value)
            ).Else(
                tsc.eq(tsc + 1)
            )

        for n, channel in enumerate(channels):
            interface = channel.interface.o
            data_width = rtlink.get_data_width(interface)
            address_width = rtlink.get_address_width(interface)
            fine_ts_width = rtlink.get_fine_ts_width(interface)
            assert fine_ts_width <= max_fine_ts_width

            # FIFO
            ev_layout = []
            if data_width:
                ev_layout.append(("data", data_width))
            if address_width:
                ev_layout.append(("address", address_width))
            ev_layout.append(("timestamp", len(tsc) + fine_ts_width))

            fifo = ClockDomainsRenamer("rio")(SyncFIFOBuffered(
                layout_len(ev_layout), channel.ofifo_depth))
            self.submodules += fifo
            fifo_in = Record(ev_layout)
            fifo_out = Record(ev_layout)
            self.comb += [
                fifo.din.eq(fifo_in.raw_bits()),
                fifo_out.raw_bits().eq(fifo.dout)
            ]

            # FIFO level
            self.sync.rio += \
                If(rt_packets.fifo_space_update &
                   (rt_packets.fifo_space_channel == n),
                    rt_packets.fifo_space.eq(channel.ofifo_depth - fifo.level))

            # FIFO write
            self.comb += fifo.we.eq(rt_packets.write_stb
                                    & (rt_packets.write_channel == n))
            self.sync.rio += [
                If(rt_packets.write_overflow_ack,
                   rt_packets.write_overflow.eq(0)),
                If(rt_packets.write_underflow_ack,
                   rt_packets.write_underflow.eq(0)),
                If(
                    fifo.we, If(~fifo.writable,
                                rt_packets.write_overflow.eq(1)),
                    If(
                        rt_packets.write_timestamp[max_fine_ts_width:] <
                        (tsc + 4), rt_packets.write_underflow.eq(1)))
            ]
            if data_width:
                self.comb += fifo_in.data.eq(rt_packets.write_data)
            if address_width:
                self.comb += fifo_in.address.eq(rt_packets.write_address)
            self.comb += fifo_in.timestamp.eq(
                rt_packets.write_timestamp[max_fine_ts_width - fine_ts_width:])

            # FIFO read
            self.sync.rio += [
                fifo.re.eq(0),
                interface.stb.eq(0),
                If(fifo.readable & (fifo_out.timestamp[fine_ts_width:] == tsc),
                   fifo.re.eq(1), interface.stb.eq(1))
            ]
            if data_width:
                self.sync.rio += interface.data.eq(fifo_out.data)
            if address_width:
                self.sync.rio += interface.address.eq(fifo_out.address)
            if fine_ts_width:
                self.sync.rio += interface.fine_ts.eq(
                    fifo_out.timestamp[:fine_ts_width])
Esempio n. 20
0
    def __init__(self, channels, full_ts_width=63, guard_io_cycles=20):
        data_width = max(rtlink.get_data_width(c.interface) for c in channels)
        address_width = max(
            rtlink.get_address_width(c.interface) for c in channels)
        fine_ts_width = max(
            rtlink.get_fine_ts_width(c.interface) for c in channels)

        self.data_width = data_width
        self.address_width = address_width
        self.fine_ts_width = fine_ts_width

        # CSRs
        self.kcsrs = _KernelCSRs(bits_for(len(channels) - 1), data_width,
                                 address_width, full_ts_width)

        # Clocking/Reset
        # Create rsys, rio and rio_phy domains based on sys and rtio
        # with reset controlled by CSR.
        self.clock_domains.cd_rsys = ClockDomain()
        self.clock_domains.cd_rio = ClockDomain()
        self.clock_domains.cd_rio_phy = ClockDomain()
        self.comb += [
            self.cd_rsys.clk.eq(ClockSignal()),
            self.cd_rsys.rst.eq(self.kcsrs.reset.storage)
        ]
        self.comb += self.cd_rio.clk.eq(ClockSignal("rtio"))
        self.specials += AsyncResetSynchronizer(
            self.cd_rio, self.kcsrs.reset.storage
            | ResetSignal("rtio", allow_reset_less=True))
        self.comb += self.cd_rio_phy.clk.eq(ClockSignal("rtio"))
        self.specials += AsyncResetSynchronizer(
            self.cd_rio_phy, self.kcsrs.reset_phy.storage
            | ResetSignal("rtio", allow_reset_less=True))

        # Managers
        self.submodules.counter = _RTIOCounter(full_ts_width - fine_ts_width)

        i_datas, i_timestamps = [], []
        o_statuses, i_statuses = [], []
        sel = self.kcsrs.chan_sel.storage
        for n, channel in enumerate(channels):
            if isinstance(channel, LogChannel):
                i_datas.append(0)
                i_timestamps.append(0)
                i_statuses.append(0)
                continue

            selected = Signal()
            self.comb += selected.eq(sel == n)

            o_manager = _OutputManager(channel.interface.o, self.counter,
                                       channel.ofifo_depth, guard_io_cycles)
            self.submodules += o_manager

            if hasattr(o_manager.ev, "data"):
                self.comb += o_manager.ev.data.eq(self.kcsrs.o_data.storage)
            if hasattr(o_manager.ev, "address"):
                self.comb += o_manager.ev.address.eq(
                    self.kcsrs.o_address.storage)
            ts_shift = (len(self.kcsrs.o_timestamp.storage) -
                        len(o_manager.ev.timestamp))
            self.comb += o_manager.ev.timestamp.eq(
                self.kcsrs.o_timestamp.storage[ts_shift:])

            self.comb += o_manager.we.eq(selected & self.kcsrs.o_we.re)

            underflow = Signal()
            sequence_error = Signal()
            collision = Signal()
            busy = Signal()
            self.sync.rsys += [
                If(selected & self.kcsrs.o_underflow_reset.re,
                   underflow.eq(0)),
                If(selected & self.kcsrs.o_sequence_error_reset.re,
                   sequence_error.eq(0)),
                If(selected & self.kcsrs.o_collision_reset.re,
                   collision.eq(0)),
                If(selected & self.kcsrs.o_busy_reset.re, busy.eq(0)),
                If(o_manager.underflow, underflow.eq(1)),
                If(o_manager.sequence_error, sequence_error.eq(1)),
                If(o_manager.collision, collision.eq(1)),
                If(o_manager.busy, busy.eq(1))
            ]
            o_statuses.append(
                Cat(~o_manager.writable, underflow, sequence_error, collision,
                    busy))

            if channel.interface.i is not None:
                i_manager = _InputManager(channel.interface.i, self.counter,
                                          channel.ififo_depth)
                self.submodules += i_manager

                if hasattr(i_manager.ev, "data"):
                    i_datas.append(i_manager.ev.data)
                else:
                    i_datas.append(0)
                if channel.interface.i.timestamped:
                    ts_shift = (len(self.kcsrs.i_timestamp.status) -
                                len(i_manager.ev.timestamp))
                    i_timestamps.append(i_manager.ev.timestamp << ts_shift)
                else:
                    i_timestamps.append(0)

                self.comb += i_manager.re.eq(selected & self.kcsrs.i_re.re)

                overflow = Signal()
                self.sync.rsys += [
                    If(selected & self.kcsrs.i_overflow_reset.re,
                       overflow.eq(0)),
                    If(i_manager.overflow, overflow.eq(1))
                ]
                i_statuses.append(Cat(~i_manager.readable, overflow))

            else:
                i_datas.append(0)
                i_timestamps.append(0)
                i_statuses.append(0)
        if data_width:
            self.comb += self.kcsrs.i_data.status.eq(Array(i_datas)[sel])
        self.comb += [
            self.kcsrs.i_timestamp.status.eq(Array(i_timestamps)[sel]),
            self.kcsrs.o_status.status.eq(Array(o_statuses)[sel]),
            self.kcsrs.i_status.status.eq(Array(i_statuses)[sel])
        ]

        # Counter access
        self.sync += \
           If(self.kcsrs.counter_update.re,
               self.kcsrs.counter.status.eq(self.counter.value_sys
                                                << fine_ts_width)
           )
Esempio n. 21
0
    def __init__(self, interface, counter, fifo_depth, guard_io_cycles):
        data_width = rtlink.get_data_width(interface)
        address_width = rtlink.get_address_width(interface)
        fine_ts_width = rtlink.get_fine_ts_width(interface)

        ev_layout = []
        if data_width:
            ev_layout.append(("data", data_width))
        if address_width:
            ev_layout.append(("address", address_width))
        ev_layout.append(("timestamp", counter.width + fine_ts_width))
        # ev must be valid 1 cycle before we to account for the latency in
        # generating replace, sequence_error and collision
        self.ev = Record(ev_layout)

        self.writable = Signal()
        self.we = Signal()  # maximum throughput 1/2

        self.underflow = Signal()  # valid 1 cycle after we, pulsed
        self.sequence_error = Signal()
        self.collision = Signal()
        self.busy = Signal()  # pulsed

        # # #

        # FIFO
        fifo = ClockDomainsRenamer({
            "write": "rsys",
            "read": "rio"
        })(AsyncFIFO(layout_len(ev_layout), fifo_depth))
        self.submodules += fifo
        fifo_in = Record(ev_layout)
        fifo_out = Record(ev_layout)
        self.comb += [
            fifo.din.eq(fifo_in.raw_bits()),
            fifo_out.raw_bits().eq(fifo.dout)
        ]

        # Buffer
        buf_pending = Signal()
        buf = Record(ev_layout)
        buf_just_written = Signal()

        # Special cases
        replace = Signal()
        sequence_error = Signal()
        collision = Signal()
        any_error = Signal()
        if interface.enable_replace:
            # Note: replace may be asserted at the same time as collision
            # when addresses are different. In that case, it is a collision.
            self.sync.rsys += replace.eq(self.ev.timestamp == buf.timestamp)
            # Detect sequence errors on coarse timestamps only
            # so that they are mutually exclusive with collision errors.
        self.sync.rsys += sequence_error.eq(
            self.ev.timestamp[fine_ts_width:] < buf.timestamp[fine_ts_width:])
        if interface.enable_replace:
            if address_width:
                different_addresses = self.ev.address != buf.address
            else:
                different_addresses = 0
            if fine_ts_width:
                self.sync.rsys += collision.eq(
                    (self.ev.timestamp[fine_ts_width:] ==
                     buf.timestamp[fine_ts_width:])
                    & ((self.ev.timestamp[:fine_ts_width] !=
                        buf.timestamp[:fine_ts_width])
                       | different_addresses))
        else:
            self.sync.rsys += collision.eq(self.ev.timestamp[fine_ts_width:] ==
                                           buf.timestamp[fine_ts_width:])
        self.comb += [
            any_error.eq(sequence_error | collision),
            self.sequence_error.eq(self.we & sequence_error),
            self.collision.eq(self.we & collision)
        ]

        # Buffer read and FIFO write
        self.comb += fifo_in.eq(buf)
        in_guard_time = Signal()
        self.comb += in_guard_time.eq(
            buf.timestamp[fine_ts_width:] < counter.value_sys +
            guard_io_cycles)
        self.sync.rsys += If(in_guard_time, buf_pending.eq(0))
        self.comb += \
            If(buf_pending,
                If(in_guard_time,
                    If(buf_just_written,
                        self.underflow.eq(1)
                    ).Else(
                        fifo.we.eq(1)
                    )
                ),
                If(self.we & ~replace & ~any_error,
                   fifo.we.eq(1)
                )
            )

        # Buffer write
        # Must come after read to handle concurrent read+write properly
        self.sync.rsys += [
            buf_just_written.eq(0),
            If(self.we & ~any_error, buf_just_written.eq(1), buf_pending.eq(1),
               buf.eq(self.ev))
        ]
        self.comb += self.writable.eq(fifo.writable)

        # Buffer output of FIFO to improve timing
        dout_stb = Signal()
        dout_ack = Signal()
        dout = Record(ev_layout)
        self.sync.rio += \
            If(fifo.re,
                dout_stb.eq(1),
                dout.eq(fifo_out)
            ).Elif(dout_ack,
                dout_stb.eq(0)
            )
        self.comb += fifo.re.eq(fifo.readable & (~dout_stb | dout_ack))

        # latency compensation
        if interface.delay:
            counter_rtio = Signal.like(counter.value_rtio)
            self.sync.rtio += counter_rtio.eq(counter.value_rtio -
                                              interface.delay + 1)
        else:
            counter_rtio = counter.value_rtio

        # FIFO read through buffer
        self.comb += [
            dout_ack.eq(dout.timestamp[fine_ts_width:] == counter_rtio),
            interface.stb.eq(dout_stb & dout_ack)
        ]

        busy_transfer = BlindTransfer()
        self.submodules += busy_transfer
        self.comb += [
            busy_transfer.i.eq(interface.stb & interface.busy),
            self.busy.eq(busy_transfer.o),
        ]

        if data_width:
            self.comb += interface.data.eq(dout.data)
        if address_width:
            self.comb += interface.address.eq(dout.address)
        if fine_ts_width:
            self.comb += interface.fine_ts.eq(dout.timestamp[:fine_ts_width])
Esempio n. 22
0
    def add_output(self, n, channel):
        rt_packet = self.rt_packet
        max_fine_ts_width = self.max_fine_ts_width

        interface = channel.interface.o
        data_width = rtlink.get_data_width(interface)
        address_width = rtlink.get_address_width(interface)
        fine_ts_width = rtlink.get_fine_ts_width(interface)
        assert fine_ts_width <= max_fine_ts_width

        we = Signal()
        self.comb += we.eq(rt_packet.write_stb
                           & (rt_packet.write_channel == n))
        write_timestamp = rt_packet.write_timestamp[max_fine_ts_width -
                                                    fine_ts_width:]
        write_timestamp_coarse = rt_packet.write_timestamp[max_fine_ts_width:]
        write_timestamp_fine = rt_packet.write_timestamp[
            max_fine_ts_width - fine_ts_width:max_fine_ts_width]

        # latency compensation
        if interface.delay:
            tsc_comp = Signal.like(self.tsc)
            self.sync.rtio += tsc_comp.eq(self.tsc - interface.delay + 1)
        else:
            tsc_comp = self.tsc

        # FIFO
        ev_layout = []
        if data_width:
            ev_layout.append(("data", data_width))
        if address_width:
            ev_layout.append(("address", address_width))
        ev_layout.append(("timestamp", len(self.tsc) + fine_ts_width))

        fifo = ClockDomainsRenamer("rio")(SyncFIFOBuffered(
            layout_len(ev_layout), channel.ofifo_depth))
        self.submodules += fifo
        fifo_in = Record(ev_layout)
        fifo_out = Record(ev_layout)
        self.comb += [
            fifo.din.eq(fifo_in.raw_bits()),
            fifo_out.raw_bits().eq(fifo.dout)
        ]

        # Buffer
        buf_pending = Signal()
        buf = Record(ev_layout)
        buf_just_written = Signal()

        # Special cases
        replace = Signal()
        sequence_error = Signal()
        collision = Signal()
        any_error = Signal()
        if interface.enable_replace:
            # Note: replace may be asserted at the same time as collision
            # when addresses are different. In that case, it is a collision.
            self.sync.rio += replace.eq(write_timestamp == buf.timestamp)
        # Detect sequence errors on coarse timestamps only
        # so that they are mutually exclusive with collision errors.
        self.sync.rio += sequence_error.eq(
            write_timestamp_coarse < buf.timestamp[fine_ts_width:])
        if interface.enable_replace:
            if address_width:
                different_addresses = rt_packet.write_address != buf.address
            else:
                different_addresses = 0
            if fine_ts_width:
                self.sync.rio += collision.eq(
                    (write_timestamp_coarse == buf.timestamp[fine_ts_width:])
                    & ((write_timestamp_fine != buf.timestamp[:fine_ts_width])
                       | different_addresses))
            else:
                self.sync.rio += collision.eq(
                    (write_timestamp == buf.timestamp) & different_addresses)
        else:
            self.sync.rio += collision.eq(
                write_timestamp_coarse == buf.timestamp[fine_ts_width:])
        self.comb += any_error.eq(sequence_error | collision)
        self.sync.rio += [
            If(we & sequence_error, self.write_sequence_error.eq(1)),
            If(we & collision, self.collision.eq(1))
        ]

        # Buffer read and FIFO write
        self.comb += fifo_in.eq(buf)
        in_guard_time = Signal()
        self.comb += in_guard_time.eq(
            buf.timestamp[fine_ts_width:] < tsc_comp + 4)
        self.sync.rio += If(in_guard_time, buf_pending.eq(0))
        report_underflow = Signal()
        self.comb += \
            If(buf_pending,
                If(in_guard_time,
                    If(buf_just_written,
                        report_underflow.eq(1)
                    ).Else(
                        fifo.we.eq(1)
                    )
                ),
                If(we & ~replace & ~any_error,
                   fifo.we.eq(1)
                )
            )
        self.sync.rio += If(report_underflow, self.write_underflow.eq(1))

        # Buffer write
        # Must come after read to handle concurrent read+write properly
        self.sync.rio += [
            buf_just_written.eq(0),
            If(
                we & ~any_error,
                buf_just_written.eq(1),
                buf_pending.eq(1),
                buf.timestamp.eq(write_timestamp),
                buf.data.eq(rt_packet.write_data) if data_width else [],
                buf.address.eq(rt_packet.write_address)
                if address_width else [],
            ),
            If(we & ~fifo.writable, self.write_overflow.eq(1))
        ]

        # FIFO level
        self.sync.rio += \
            If(rt_packet.fifo_space_update &
               (rt_packet.fifo_space_channel == n),
                rt_packet.fifo_space.eq(channel.ofifo_depth - fifo.level))

        # FIFO read
        self.sync.rio += [
            fifo.re.eq(0),
            interface.stb.eq(0),
            If(
                fifo.readable
                & (fifo_out.timestamp[fine_ts_width:] == tsc_comp),
                fifo.re.eq(1), interface.stb.eq(1))
        ]
        if data_width:
            self.sync.rio += interface.data.eq(fifo_out.data)
        if address_width:
            self.sync.rio += interface.address.eq(fifo_out.address)
        if fine_ts_width:
            self.sync.rio += interface.fine_ts.eq(
                fifo_out.timestamp[:fine_ts_width])

        self.sync.rio += If(interface.stb & interface.busy, self.busy.eq(1))
Esempio n. 23
0
File: core.py Progetto: cr1901/artiq
    def __init__(self, interface, counter, fifo_depth, guard_io_cycles):
        data_width = rtlink.get_data_width(interface)
        address_width = rtlink.get_address_width(interface)
        fine_ts_width = rtlink.get_fine_ts_width(interface)

        ev_layout = []
        if data_width:
            ev_layout.append(("data", data_width))
        if address_width:
            ev_layout.append(("address", address_width))
        ev_layout.append(("timestamp", counter.width + fine_ts_width))
        # ev must be valid 1 cycle before we to account for the latency in
        # generating replace, sequence_error and nop
        self.ev = Record(ev_layout)

        self.writable = Signal()
        self.we = Signal()  # maximum throughput 1/2

        self.underflow = Signal()  # valid 1 cycle after we, pulsed
        self.sequence_error = Signal()
        self.collision_error = Signal()

        # # #

        # FIFO
        fifo = ClockDomainsRenamer({"write": "rsys", "read": "rio"})(
            AsyncFIFO(layout_len(ev_layout), fifo_depth))
        self.submodules += fifo
        fifo_in = Record(ev_layout)
        fifo_out = Record(ev_layout)
        self.comb += [
            fifo.din.eq(fifo_in.raw_bits()),
            fifo_out.raw_bits().eq(fifo.dout)
        ]

        # Buffer
        buf_pending = Signal()
        buf = Record(ev_layout)
        buf_just_written = Signal()

        # Special cases
        replace = Signal()
        sequence_error = Signal()
        collision_error = Signal()
        any_error = Signal()
        nop = Signal()
        self.sync.rsys += [
            # Note: replace does not perform any RTLink address checks,
            # i.e. a write to a different address will be silently replaced
            # as well.
            replace.eq(self.ev.timestamp == buf.timestamp),
            # Detect sequence errors on coarse timestamps only
            # so that they are mutually exclusive with collision errors.
            sequence_error.eq(self.ev.timestamp[fine_ts_width:]
                              < buf.timestamp[fine_ts_width:])
        ]
        if fine_ts_width:
            self.sync.rsys += collision_error.eq(
                (self.ev.timestamp[fine_ts_width:] == buf.timestamp[fine_ts_width:])
                & (self.ev.timestamp[:fine_ts_width] != buf.timestamp[:fine_ts_width]))
        self.comb += any_error.eq(sequence_error | collision_error)
        if interface.suppress_nop:
            # disable NOP at reset: do not suppress a first write with all 0s
            nop_en = Signal(reset=0)
            addresses_equal = [getattr(self.ev, a) == getattr(buf, a)
                               for a in ("data", "address")
                               if hasattr(self.ev, a)]
            if addresses_equal:
                self.sync.rsys += nop.eq(
                    nop_en & reduce(and_, addresses_equal))
            else:
                self.comb.eq(nop.eq(0))
            self.sync.rsys += [
                # buf now contains valid data. enable NOP.
                If(self.we & ~any_error, nop_en.eq(1)),
                # underflows cancel the write. allow it to be retried.
                If(self.underflow, nop_en.eq(0))
            ]
        self.comb += [
            self.sequence_error.eq(self.we & sequence_error),
            self.collision_error.eq(self.we & collision_error)
        ]

        # Buffer read and FIFO write
        self.comb += fifo_in.eq(buf)
        in_guard_time = Signal()
        self.comb += in_guard_time.eq(
            buf.timestamp[fine_ts_width:]
                < counter.value_sys + guard_io_cycles)
        self.sync.rsys += If(in_guard_time, buf_pending.eq(0))
        self.comb += \
            If(buf_pending,
                If(in_guard_time,
                    If(buf_just_written,
                        self.underflow.eq(1)
                    ).Else(
                        fifo.we.eq(1)
                    )
                ),
                If(self.we & ~replace & ~nop & ~any_error,
                   fifo.we.eq(1)
                )
            )

        # Buffer write
        # Must come after read to handle concurrent read+write properly
        self.sync.rsys += [
            buf_just_written.eq(0),
            If(self.we & ~nop & ~any_error,
                buf_just_written.eq(1),
                buf_pending.eq(1),
                buf.eq(self.ev)
            )
        ]
        self.comb += self.writable.eq(fifo.writable)

        # Buffer output of FIFO to improve timing
        dout_stb = Signal()
        dout_ack = Signal()
        dout = Record(ev_layout)
        self.sync.rio += \
            If(fifo.re,
                dout_stb.eq(1),
                dout.eq(fifo_out)
            ).Elif(dout_ack,
                dout_stb.eq(0)
            )
        self.comb += fifo.re.eq(fifo.readable & (~dout_stb | dout_ack))

        # FIFO read through buffer
        # TODO: report error on stb & busy
        self.comb += [
            dout_ack.eq(
                dout.timestamp[fine_ts_width:] == counter.value_rio),
            interface.stb.eq(dout_stb & dout_ack)
        ]
        if data_width:
            self.comb += interface.data.eq(dout.data)
        if address_width:
            self.comb += interface.address.eq(dout.address)
        if fine_ts_width:
            self.comb += interface.fine_ts.eq(dout.timestamp[:fine_ts_width])
Esempio n. 24
0
    def __init__(self, interface, counter, fifo_depth, guard_io_cycles):
        data_width = rtlink.get_data_width(interface)
        address_width = rtlink.get_address_width(interface)
        fine_ts_width = rtlink.get_fine_ts_width(interface)

        ev_layout = []
        if data_width:
            ev_layout.append(("data", data_width))
        if address_width:
            ev_layout.append(("address", address_width))
        ev_layout.append(("timestamp", counter.width + fine_ts_width))
        # ev must be valid 1 cycle before we to account for the latency in
        # generating replace, sequence_error and nop
        self.ev = Record(ev_layout)

        self.writable = Signal()
        self.we = Signal()  # maximum throughput 1/2

        self.underflow = Signal()  # valid 1 cycle after we, pulsed
        self.sequence_error = Signal()
        self.collision_error = Signal()

        # # #

        # FIFO
        fifo = RenameClockDomains(AsyncFIFO(ev_layout, fifo_depth), {
            "write": "rsys",
            "read": "rio"
        })
        self.submodules += fifo

        # Buffer
        buf_pending = Signal()
        buf = Record(ev_layout)
        buf_just_written = Signal()

        # Special cases
        replace = Signal()
        sequence_error = Signal()
        collision_error = Signal()
        any_error = Signal()
        nop = Signal()
        self.sync.rsys += [
            # Note: replace does not perform any RTLink address checks,
            # i.e. a write to a different address will be silently replaced
            # as well.
            replace.eq(self.ev.timestamp == buf.timestamp),
            # Detect sequence errors on coarse timestamps only
            # so that they are mutually exclusive with collision errors.
            sequence_error.eq(self.ev.timestamp[fine_ts_width:] <
                              buf.timestamp[fine_ts_width:])
        ]
        if fine_ts_width:
            self.sync.rsys += collision_error.eq(
                (self.ev.timestamp[fine_ts_width:] ==
                 buf.timestamp[fine_ts_width:])
                & (self.ev.timestamp[:fine_ts_width] !=
                   buf.timestamp[:fine_ts_width]))
        self.comb += any_error.eq(sequence_error | collision_error)
        if interface.suppress_nop:
            # disable NOP at reset: do not suppress a first write with all 0s
            nop_en = Signal(reset=0)
            self.sync.rsys += [
                nop.eq(nop_en & optree("&", [
                    getattr(self.ev, a) == getattr(buf, a)
                    for a in ("data", "address") if hasattr(self.ev, a)
                ],
                                       default=0)),
                # buf now contains valid data. enable NOP.
                If(self.we & ~any_error, nop_en.eq(1)),
                # underflows cancel the write. allow it to be retried.
                If(self.underflow, nop_en.eq(0))
            ]
        self.comb += [
            self.sequence_error.eq(self.we & sequence_error),
            self.collision_error.eq(self.we & collision_error)
        ]

        # Buffer read and FIFO write
        self.comb += fifo.din.eq(buf)
        in_guard_time = Signal()
        self.comb += in_guard_time.eq(
            buf.timestamp[fine_ts_width:] < counter.value_sys +
            guard_io_cycles)
        self.sync.rsys += If(in_guard_time, buf_pending.eq(0))
        self.comb += \
            If(buf_pending,
                If(in_guard_time,
                    If(buf_just_written,
                        self.underflow.eq(1)
                    ).Else(
                        fifo.we.eq(1)
                    )
                ),
                If(self.we & ~replace & ~nop & ~any_error,
                   fifo.we.eq(1)
                )
            )

        # Buffer write
        # Must come after read to handle concurrent read+write properly
        self.sync.rsys += [
            buf_just_written.eq(0),
            If(self.we & ~nop & ~any_error, buf_just_written.eq(1),
               buf_pending.eq(1), buf.eq(self.ev))
        ]
        self.comb += self.writable.eq(fifo.writable)

        # Buffer output of FIFO to improve timing
        dout_stb = Signal()
        dout_ack = Signal()
        dout = Record(ev_layout)
        self.sync.rio += \
            If(fifo.re,
                dout_stb.eq(1),
                dout.eq(fifo.dout)
            ).Elif(dout_ack,
                dout_stb.eq(0)
            )
        self.comb += fifo.re.eq(fifo.readable & (~dout_stb | dout_ack))

        # FIFO read through buffer
        # TODO: report error on stb & busy
        self.comb += [
            dout_ack.eq(dout.timestamp[fine_ts_width:] == counter.value_rio),
            interface.stb.eq(dout_stb & dout_ack)
        ]
        if data_width:
            self.comb += interface.data.eq(dout.data)
        if address_width:
            self.comb += interface.address.eq(dout.address)
        if fine_ts_width:
            self.comb += interface.fine_ts.eq(dout.timestamp[:fine_ts_width])