def __init__(self, ddr_wr_port, ddr_rd_port, udp_port): SIZE = 1024 * 1024 SIZE = 1024 self.fifo_full = CSRStatus(reset=0) self.fifo_error = CSRStatus(reset=0) self.fifo_load = CSRStorage( reset=0) # Load the coefficients in memory to the ROI Summer self.fifo_read = CSRStorage(reset=0) self.fifo_size = CSRStorage(32, reset=SIZE) self.dst_ip = CSRStorage(32, reset=convert_ip("192.168.1.114")) self.dst_port = CSRStorage(16, reset=7778) dw = 64 print( f"Write port: A ({ddr_wr_port.address_width})/ D ({ddr_wr_port.data_width})" ) print( f"Read port: A ({ddr_rd_port.address_width})/ D ({ddr_rd_port.data_width})" ) self.submodules.dram_fifo = dram_fifo = LiteDRAMFIFO( data_width=dw, base=0, depth=SIZE, write_port=ddr_wr_port, read_port=ddr_rd_port, with_bypass=True, ) # self.mf = mf = Signal(reset=0) # mf == More Fragments # self.fragment_offset = fragment_offset = Signal(13, reset=0) # self.identification = identification = Signal(16, reset=0) self.submodules.adcs = adcs = ADCStream(1, dw) self.fifo_counter = fifo_counter = Signal(24) self.load_fifo = load_fifo = Signal() # adc --> buffer_fifo self.submodules.buffer_fifo = buffer_fifo = stream.SyncFIFO( stream.EndpointDescription([("data", dw)]), 256, buffered=True) # buffer_fifo --> dram_fifo fifo_size = Signal(32) self.sync += [ fifo_size.eq(self.fifo_size.storage), If(self.fifo_load.re & self.fifo_load.storage, fifo_counter.eq(0), load_fifo.eq(1)), If(load_fifo & adcs.source.valid, self.fifo_full.status.eq(0), self.fifo_error.status.eq(~dram_fifo.dram_fifo.ctrl.writable), fifo_counter.eq(fifo_counter + 1)), If((fifo_counter == fifo_size - 1) & adcs.source.valid, load_fifo.eq(0), self.fifo_full.status.eq(1)), ] self.comb += [ buffer_fifo.sink.data.eq(adcs.source.data), buffer_fifo.sink.valid.eq(adcs.source.valid & load_fifo), buffer_fifo.source.connect(dram_fifo.sink), ] # fifo --> stride converter self.submodules.stride_converter = sc = stream.Converter( dw, udp_port.dw) self.read_from_dram_fifo = read_from_dram_fifo = Signal() self.comb += [dram_fifo.source.connect(sc.sink)] self.receive_count = receive_count = Signal(24) self.sync += [ If(dram_fifo.source.valid & dram_fifo.source.ready, receive_count.eq(receive_count + 1)).Elif( read_from_dram_fifo == 0, receive_count.eq(0)) ] # --> udp fragmenter --> self.submodules.udp_fragmenter = udp_fragmenter = UDPFragmenter( udp_port.dw) self.sync += read_from_dram_fifo.eq(self.fifo_read.storage) self.comb += If( read_from_dram_fifo, # TODO: There is a bug somewhere in the converter, # its source.last somehow gets set, no idea why. That signal is of no real use # for the fragmenter anyways, so we live without it sc.source.connect(udp_fragmenter.sink, omit={'total_size', 'last'})) # TODO: 8 should be adcstream data width // 8 self.comb += udp_fragmenter.sink.length.eq(fifo_size << log2_int(dw // 8)) self.comb += udp_fragmenter.source.connect(udp_port.sink) self.comb += [ # param udp_port.sink.src_port.eq(4321), udp_port.sink.dst_port.eq(self.dst_port.storage), udp_port.sink.ip_address.eq(self.dst_ip.storage), # udp_port.sink.ip_address.eq(convert_ip("192.168.88.101")), # payload udp_port.sink.error.eq(0) ] # debug self.first_sample, self.last_sample = Signal(16), Signal(16) self.sync += [ If(fifo_counter == 1, self.first_sample.eq(adcs.source.data[:16])), If(fifo_counter == SIZE - 2, self.last_sample.eq(adcs.source.data[:16])), ]
def _get_uart_fifo(depth, sink_cd="sys", source_cd="sys"): if sink_cd != source_cd: fifo = stream.AsyncFIFO([("data", 8)], depth) return ClockDomainsRenamer({"write": sink_cd, "read": source_cd})(fifo) else: return stream.SyncFIFO([("data", 8)], depth)
def __init__(self, pads, dw=32, timeout=1024): read_fifo = ClockDomainsRenamer({ "write": "usb", "read": "sys" })(stream.AsyncFIFO(phy_description(dw), 128)) write_fifo = ClockDomainsRenamer({ "write": "sys", "read": "usb" })(stream.AsyncFIFO(phy_description(dw), 128)) read_buffer = ClockDomainsRenamer("usb")(stream.SyncFIFO( phy_description(dw), 4)) self.comb += read_buffer.source.connect(read_fifo.sink) self.submodules += read_fifo self.submodules += read_buffer self.submodules += write_fifo self.read_buffer = read_buffer self.sink = write_fifo.sink self.source = read_fifo.source self.tdata_w = tdata_w = Signal(dw) self.data_r = data_r = Signal(dw) self.data_oe = data_oe = Signal() self.specials += Tristate(pads.data, tdata_w, data_oe, data_r) data_w = Signal(dw) _data_w = Signal(dw) self.sync.usb += [_data_w.eq(data_w)] for i in range(dw): self.specials += [ Instance("ODDR", p_DDR_CLK_EDGE="SAME_EDGE", i_C=ClockSignal("usb"), i_CE=1, i_S=0, i_R=0, i_D1=_data_w[i], i_D2=data_w[i], o_Q=tdata_w[i]) ] self.rd_n = rd_n = Signal() _rd_n = Signal(reset=1) self.wr_n = wr_n = Signal() _wr_n = Signal(reset=1) self.oe_n = oe_n = Signal() _oe_n = Signal(reset=1) self.sync.usb += [ _rd_n.eq(rd_n), _wr_n.eq(wr_n), _oe_n.eq(oe_n), ] self.specials += [ Instance("ODDR", p_DDR_CLK_EDGE="SAME_EDGE", i_C=ClockSignal("usb"), i_CE=1, i_S=0, i_R=0, i_D1=_rd_n, i_D2=rd_n, o_Q=pads.rd_n), Instance("ODDR", p_DDR_CLK_EDGE="SAME_EDGE", i_C=ClockSignal("usb"), i_CE=1, i_S=0, i_R=0, i_D1=_wr_n, i_D2=wr_n, o_Q=pads.wr_n), Instance("ODDR", p_DDR_CLK_EDGE="SAME_EDGE", i_C=ClockSignal("usb"), i_CE=1, i_S=0, i_R=0, i_D1=_oe_n, i_D2=oe_n, o_Q=pads.oe_n) ] self.comb += [ pads.rst.eq(~ResetSignal("usb")), pads.be.eq(0xf), pads.siwua.eq(1), data_oe.eq(oe_n), ] fsm = FSM() self.submodules.fsm = ClockDomainsRenamer("usb")(fsm) self.tempsendval = tempsendval = Signal(dw) self.temptosend = temptosend = Signal() self.tempreadval = tempreadval = Signal(dw) self.temptoread = temptoread = Signal() self.wants_read = wants_read = Signal() self.wants_write = wants_write = Signal() self.cnt_write = cnt_write = Signal(max=timeout + 1) self.cnt_read = cnt_read = Signal(max=timeout + 1) first_write = Signal() self.comb += [ wants_read.eq(~temptoread & ~pads.rxf_n), wants_write.eq((temptosend | write_fifo.source.valid) & (pads.txe_n == 0)), ] self.fsmstate = Signal(4) self.comb += [ self.fsmstate.eq( Cat(fsm.ongoing("IDLE"), fsm.ongoing("WRITE"), fsm.ongoing("RDWAIT"), fsm.ongoing("READ"))) ] self.sync.usb += [ If(~fsm.ongoing("READ"), If(temptoread, If(read_buffer.sink.ready, temptoread.eq(0)))) ] self.comb += [ If( ~fsm.ongoing("READ"), If( temptoread, read_buffer.sink.data.eq(tempreadval), read_buffer.sink.valid.eq(1), )) ] fsm.act( "IDLE", rd_n.eq(1), wr_n.eq(1), If( wants_write, oe_n.eq(1), NextValue(cnt_write, 0), NextValue(first_write, 1), NextState("WRITE"), ).Elif(wants_read, oe_n.eq(0), NextState("RDWAIT")).Else(oe_n.eq(1), )) fsm.act( "WRITE", If( wants_read, NextValue(cnt_write, cnt_write + 1), ), NextValue(first_write, 0), rd_n.eq(1), If( pads.txe_n, oe_n.eq(1), wr_n.eq(1), write_fifo.source.ready.eq(0), If(write_fifo.source.valid & ~first_write, NextValue(temptosend, 1)), NextState("IDLE")).Elif( temptosend, oe_n.eq(1), data_w.eq(tempsendval), wr_n.eq(0), NextValue(temptosend, 0)).Elif(cnt_write > timeout, oe_n.eq(0), NextState("RDWAIT")).Elif( write_fifo.source.valid, oe_n.eq(1), data_w.eq(write_fifo.source.data), write_fifo.source.ready.eq(1), NextValue(tempsendval, write_fifo.source.data), NextValue(temptosend, 0), wr_n.eq(0), ).Else(oe_n.eq(1), wr_n.eq(1), NextValue(temptosend, 0), NextState("IDLE"))) fsm.act("RDWAIT", rd_n.eq(0), oe_n.eq(0), wr_n.eq(1), NextValue(cnt_read, 0), NextState("READ")) fsm.act( "READ", If( wants_write, NextValue(cnt_read, cnt_read + 1), ), wr_n.eq(1), If( pads.rxf_n, oe_n.eq(0), rd_n.eq(1), NextState("IDLE"), ).Elif( cnt_read > timeout, NextValue(cnt_write, 0), NextValue(first_write, 1), NextState("WRITE"), oe_n.eq(1), ).Else( oe_n.eq(0), read_buffer.sink.valid.eq(1), read_buffer.sink.data.eq(data_r), NextValue(tempreadval, data_r), If(read_buffer.sink.ready, rd_n.eq(0)).Else(NextValue(temptoread, 1), NextState("IDLE"), rd_n.eq(1))))
def __init__(self, axi, port, buffer_depth, base_address): assert axi.address_width >= log2_int(base_address) assert axi.data_width == port.data_width self.cmd_request = Signal() self.cmd_grant = Signal() # # # ashift = log2_int(port.data_width // 8) # Burst to Beat ---------------------------------------------------------------------------- aw_buffer = stream.Buffer( ax_description(axi.address_width, axi.id_width)) self.submodules += aw_buffer self.comb += axi.aw.connect(aw_buffer.sink) aw = stream.Endpoint(ax_description(axi.address_width, axi.id_width)) aw_burst2beat = AXIBurst2Beat(aw_buffer.source, aw) self.submodules.aw_burst2beat = aw_burst2beat # Write Buffer ----------------------------------------------------------------------------- w_buffer = stream.SyncFIFO(w_description(axi.data_width, axi.id_width), buffer_depth, buffered=True) self.submodules.w_buffer = w_buffer # Write ID Buffer & Response --------------------------------------------------------------- id_buffer = stream.SyncFIFO([("id", axi.id_width)], buffer_depth) resp_buffer = stream.SyncFIFO([("id", axi.id_width), ("resp", 2)], buffer_depth) self.submodules += id_buffer, resp_buffer self.comb += [ id_buffer.sink.valid.eq(aw.valid & aw.first & aw.ready), id_buffer.sink.id.eq(aw.id), If( w_buffer.source.valid & w_buffer.source.last & w_buffer.source.ready, resp_buffer.sink.valid.eq(1), resp_buffer.sink.resp.eq(RESP_OKAY), resp_buffer.sink.id.eq(id_buffer.source.id), id_buffer.source.ready.eq(1)), resp_buffer.source.connect(axi.b) ] # Command ---------------------------------------------------------------------------------- # Accept and send command to the controller only if: # - Address & Data request are *both* valid. # - Data buffer is not full. self.comb += [ self.cmd_request.eq(aw.valid & axi.w.valid & w_buffer.sink.ready), If( self.cmd_request & self.cmd_grant, port.cmd.valid.eq(1), port.cmd.we.eq(1), port.cmd.addr.eq((aw.addr - base_address) >> ashift), aw.ready.eq(port.cmd.ready), axi.w.connect(w_buffer.sink, omit={"valid", "ready"}), If(port.cmd.ready, w_buffer.sink.valid.eq(1), axi.w.ready.eq(1))) ] # Write Data ------------------------------------------------------------------------------- self.comb += [ w_buffer.source.connect(port.wdata, omit={"strb", "id"}), port.wdata.we.eq(w_buffer.source.strb) ]
def __init__(self, n, aw, address_align, settings): self.req = req = Record(cmd_layout(aw)) self.refresh_req = Signal() self.refresh_gnt = Signal() a = settings.geom.addressbits ba = settings.geom.bankbits self.cmd = cmd = stream.Endpoint(cmd_request_rw_layout(a, ba)) # # # # Command buffer cmd_buffer_layout = [("we", 1), ("adr", len(req.adr))] cmd_buffer = stream.SyncFIFO(cmd_buffer_layout, settings.cmd_buffer_depth) self.submodules += cmd_buffer self.comb += [ req.connect(cmd_buffer.sink, omit=["wdata_valid", "wdata_ready", "rdata_valid", "rdata_ready", "lock"]), cmd_buffer.source.ready.eq(req.wdata_ready | req.rdata_valid), req.lock.eq(cmd_buffer.source.valid), ] slicer = _AddressSlicer(settings.geom.colbits, address_align) # Row tracking has_openrow = Signal() openrow = Signal(settings.geom.rowbits, reset_less=True) hit = Signal() self.comb += hit.eq(openrow == slicer.row(cmd_buffer.source.adr)) track_open = Signal() track_close = Signal() self.sync += \ If(track_close, has_openrow.eq(0) ).Elif(track_open, has_openrow.eq(1), openrow.eq(slicer.row(cmd_buffer.source.adr)) ) # Four Activate Window activate = Signal() activate_allowed = Signal(reset=1) tfaw = settings.timing.tFAW if tfaw is not None: activate_count = Signal(max=tfaw) activate_window = Signal(tfaw) self.sync += activate_window.eq(Cat(activate, activate_window)) for i in range(tfaw): next_activate_count = Signal(max=tfaw) self.comb += next_activate_count.eq(activate_count + activate_window[i]) activate_count = next_activate_count self.comb += If(activate_count >=4, activate_allowed.eq(0)) # CAS to CAS cas = Signal() cas_allowed = Signal(reset=1) tccd = settings.timing.tCCD if tccd is not None: cas_count = Signal(max=tccd+1) self.sync += \ If(cas, cas_count.eq(tccd-1) ).Elif(~cas_allowed, cas_count.eq(cas_count-1) ) self.comb += cas_allowed.eq(cas_count == 0) # Address generation sel_row_adr = Signal() self.comb += [ cmd.ba.eq(n), If(sel_row_adr, cmd.a.eq(slicer.row(cmd_buffer.source.adr)) ).Else( cmd.a.eq(slicer.col(cmd_buffer.source.adr)) ) ] # Respect write-to-precharge specification precharge_time = 2 + settings.timing.tWR - 1 + 1 self.submodules.precharge_timer = WaitTimer(precharge_time) self.comb += self.precharge_timer.wait.eq(~(cmd.valid & cmd.ready & cmd.is_write)) # Control and command generation FSM self.submodules.fsm = fsm = FSM() fsm.act("REGULAR", If(self.refresh_req, NextState("REFRESH") ).Elif(cmd_buffer.source.valid, If(has_openrow, If(hit, If(cas_allowed, cas.eq(1), # Note: write-to-read specification is enforced by # multiplexer cmd.valid.eq(1), If(cmd_buffer.source.we, req.wdata_ready.eq(cmd.ready), cmd.is_write.eq(1), cmd.we.eq(1), ).Else( req.rdata_valid.eq(cmd.ready), cmd.is_read.eq(1) ), cmd.cas.eq(1) ) ).Else( NextState("PRECHARGE") ) ).Else( If(activate_allowed, NextState("ACTIVATE") ) ) ) ) fsm.act("PRECHARGE", # Note: we are presenting the column address, A10 is always low If(self.precharge_timer.done, cmd.valid.eq(1), If(cmd.ready, NextState("TRP") ), cmd.ras.eq(1), cmd.we.eq(1), cmd.is_cmd.eq(1) ), track_close.eq(1) ) fsm.act("ACTIVATE", activate.eq(1), sel_row_adr.eq(1), track_open.eq(1), cmd.valid.eq(1), cmd.is_cmd.eq(1), If(cmd.ready, NextState("TRCD") ), cmd.ras.eq(1) ) fsm.act("REFRESH", If(self.precharge_timer.done, self.refresh_gnt.eq(1), ), track_close.eq(1), cmd.is_cmd.eq(1), If(~self.refresh_req, NextState("REGULAR") ) ) fsm.delayed_enter("TRP", "ACTIVATE", settings.timing.tRP-1) fsm.delayed_enter("TRCD", "REGULAR", settings.timing.tRCD-1)
def __init__(self, platform, with_cpu=False, with_analyzer=True, with_loopback=False): clk_freq = int(100e6) SoCSDRAM.__init__(self, platform, clk_freq, cpu_type="lm32" if with_cpu else None, integrated_rom_size=0x8000 if with_cpu else 0, integrated_sram_size=0x8000, with_uart=with_cpu, ident="PCIe Injector example design", with_timer=with_cpu ) self.submodules.crg = _CRG(platform) if not with_cpu: # use serial as wishbone bridge when no cpu self.add_cpu_or_bridge(UARTWishboneBridge(platform.request("serial"), clk_freq, baudrate=3000000)) self.add_wb_master(self.cpu_or_bridge.wishbone) # sdram self.submodules.ddrphy = a7ddrphy.A7DDRPHY(platform.request("ddram")) self.add_constant("READ_LEVELING_BITSLIP", 2) self.add_constant("READ_LEVELING_DELAY", 8) sdram_module = MT41K256M16(self.clk_freq, "1:4") self.register_sdram(self.ddrphy, sdram_module.geom_settings, sdram_module.timing_settings) # pcie endpoint self.submodules.pciephy = S7PCIEPHY(platform, platform.request("pcie_x1"), cd="sys") # usb core usb_pads = platform.request("usb_fifo") # self.submodules.usb_phy = FT245PHYSynchronous(usb_pads, clk_freq, fifo_depth=16) self.submodules.usb_phy = FT601Sync(usb_pads, dw=32, timeout=1024) if with_loopback: self.submodules.usb_loopback_fifo = stream.SyncFIFO(phy_description(32), 2048) self.comb += [ self.usb_phy.source.connect(self.usb_loopback_fifo.sink), self.usb_loopback_fifo.source.connect(self.usb_phy.sink) ] else: self.submodules.usb_core = USBCore(self.usb_phy, clk_freq) # usb <--> wishbone self.submodules.etherbone = Etherbone(self.usb_core, self.usb_map["wishbone"]) self.add_wb_master(self.etherbone.master.bus) # usb <--> tlp self.submodules.tlp = TLP(self.usb_core, self.usb_map["tlp"]) self.comb += [ self.pciephy.source.connect(self.tlp.sender.sink), self.tlp.receiver.source.connect(self.pciephy.sink) ] # wishbone --> msi self.submodules.msi = MSI() self.comb += self.msi.source.connect(self.pciephy.msi) # led blink usb_counter = Signal(32) self.sync.usb += usb_counter.eq(usb_counter + 1) self.comb += platform.request("user_led", 0).eq(usb_counter[26]) pcie_counter = Signal(32) self.sync.pcie += pcie_counter.eq(pcie_counter + 1) self.comb += platform.request("user_led", 1).eq(pcie_counter[26]) # timing constraints self.crg.cd_sys.clk.attr.add("keep") self.crg.cd_usb.clk.attr.add("keep") self.platform.add_period_constraint(self.crg.cd_sys.clk, 10.0) self.platform.add_period_constraint(self.crg.cd_usb.clk, 10.0) self.platform.add_period_constraint(self.platform.lookup_request("pcie_x1").clk_p, 10.0) if with_analyzer: analyzer_signals = [ self.pciephy.sink.valid, self.pciephy.sink.ready, self.pciephy.sink.last, self.pciephy.sink.dat, self.pciephy.sink.be, self.pciephy.source.valid, self.pciephy.source.ready, self.pciephy.source.last, self.pciephy.source.dat, self.pciephy.source.be ] self.submodules.analyzer = LiteScopeAnalyzer(analyzer_signals, 1024, cd="sys")
def __init__(self, cs_width=1, tx_fifo_depth=1, rx_fifo_depth=1): self.sink = stream.Endpoint(spi_phy2core_layout) self.source = stream.Endpoint(spi_core2phy_layout) self.cs = Signal(cs_width) assert self.sink.data.nbits == self.source.data.nbits self._cs = CSRStorage(cs_width) self._phyconfig = CSRStorage(fields=[ CSRField("len", size=8, offset=0, description="SPI Xfer length (in bits)."), CSRField("width", size=4, offset=8, description="SPI Xfer width (1/2/4/8)."), CSRField( "mask", size=8, offset=16, description= "SPI DQ output enable mask (set bits to ``1`` to enable output drivers on DQ lines)." ), ], description="SPI PHY settings.") self._rxtx = CSR(self.source.data.nbits) self._status = CSRStatus(fields=[ CSRField("tx_ready", size=1, offset=0, description="TX FIFO is not full."), CSRField("rx_ready", size=1, offset=1, description="RX FIFO is not empty."), ]) # # # # FIFOs. tx_fifo = stream.SyncFIFO(spi_core2phy_layout, depth=tx_fifo_depth) rx_fifo = stream.SyncFIFO(spi_phy2core_layout, depth=rx_fifo_depth) self.submodules += tx_fifo, rx_fifo self.comb += self.sink.connect(rx_fifo.sink) self.comb += tx_fifo.source.connect(self.source) # SPI CS. self.comb += self.cs.eq(self._cs.storage) # SPI TX (MOSI). self.comb += [ tx_fifo.sink.valid.eq(self._rxtx.re), self._status.fields.tx_ready.eq(tx_fifo.sink.ready), tx_fifo.sink.data.eq(self._rxtx.r), tx_fifo.sink.len.eq(self._phyconfig.fields.len), tx_fifo.sink.width.eq(self._phyconfig.fields.width), tx_fifo.sink.mask.eq(self._phyconfig.fields.mask), tx_fifo.sink.last.eq(1), ] # SPI RX (MISO). self.comb += [ rx_fifo.source.ready.eq(self._rxtx.we), self._status.fields.rx_ready.eq(rx_fifo.source.valid), self._rxtx.w.eq(rx_fifo.source.data), ]
def __init__(self, platform): self.sink = stream.Endpoint([("data", 16)]) self.source = stream.Endpoint([("data", 8)]) self.bus = wishbone.Interface() # # # # chroma upsampler ycbcr422to444 = ClockDomainsRenamer("encoder")(YCbCr422to444()) self.submodules += ycbcr422to444 self.comb += [ Record.connect(self.sink, ycbcr422to444.sink, omit=["data"]), ycbcr422to444.sink.y.eq(self.sink.data[:8]), ycbcr422to444.sink.cb_cr.eq(self.sink.data[8:]) ] fdct_fifo_rd = Signal() fdct_fifo_q = Signal(24) fdct_fifo_hf_full = Signal() fdct_data_d1 = Signal(24) fdct_data_d2 = Signal(24) fdct_data_d3 = Signal(24) fdct_data_d4 = Signal(24) fdct_data_d5 = Signal(24) self.sync.encoder += [ If( fdct_fifo_rd, fdct_data_d1.eq( Cat(ycbcr422to444.source.y, ycbcr422to444.source.cb, ycbcr422to444.source.cr)), ), fdct_data_d2.eq(fdct_data_d1), fdct_data_d3.eq(fdct_data_d2), fdct_data_d4.eq(fdct_data_d3), fdct_data_d5.eq(fdct_data_d4) ] self.comb += [ fdct_fifo_q.eq(fdct_data_d4), fdct_fifo_hf_full.eq(ycbcr422to444.source.valid), ycbcr422to444.source.ready.eq(fdct_fifo_rd) ] # output fifo output_fifo_almost_full = Signal() output_fifo = stream.SyncFIFO([("data", 8)], 1024, buffered=True) output_fifo = ClockDomainsRenamer("encoder")(output_fifo) self.submodules += output_fifo self.comb += [ output_fifo_almost_full.eq(output_fifo.fifo.level > 1024 - 128), Record.connect(output_fifo.source, self.source) ] # Wishbone cross domain crossing jpeg_bus = wishbone.Interface() self.specials += Instance( "wb_async_reg", i_wbm_clk=ClockSignal(), i_wbm_rst=ResetSignal(), i_wbm_adr_i=self.bus.adr, i_wbm_dat_i=self.bus.dat_w, o_wbm_dat_o=self.bus.dat_r, i_wbm_we_i=self.bus.we, i_wbm_sel_i=self.bus.sel, i_wbm_stb_i=self.bus.stb, o_wbm_ack_o=self.bus.ack, o_wbm_err_o=self.bus.err, #o_wbm_rty_o=, i_wbm_cyc_i=self.bus.cyc, i_wbs_clk=ClockSignal("encoder"), i_wbs_rst=ResetSignal("encoder"), o_wbs_adr_o=jpeg_bus.adr, i_wbs_dat_i=jpeg_bus.dat_r, o_wbs_dat_o=jpeg_bus.dat_w, o_wbs_we_o=jpeg_bus.we, o_wbs_sel_o=jpeg_bus.sel, o_wbs_stb_o=jpeg_bus.stb, i_wbs_ack_i=jpeg_bus.ack, i_wbs_err_i=jpeg_bus.err, i_wbs_rty_i=0, o_wbs_cyc_o=jpeg_bus.cyc) # encoder self.specials += Instance( "JpegEnc", i_CLK=ClockSignal("encoder"), i_RST=ResetSignal("encoder"), i_OPB_ABus=Cat(Signal(2), jpeg_bus.adr) & 0x3ff, i_OPB_BE=jpeg_bus.sel, i_OPB_DBus_in=jpeg_bus.dat_w, i_OPB_RNW=~jpeg_bus.we, i_OPB_select=jpeg_bus.stb & jpeg_bus.cyc, o_OPB_DBus_out=jpeg_bus.dat_r, o_OPB_XferAck=jpeg_bus.ack, #o_OPB_retry=, #o_OPB_toutSup=, o_OPB_errAck=jpeg_bus.err, o_fdct_fifo_rd=fdct_fifo_rd, i_fdct_fifo_q=fdct_fifo_q, i_fdct_fifo_hf_full=fdct_fifo_hf_full, #o_fdct_fifo_dval_o=, o_ram_byte=output_fifo.sink.data, o_ram_wren=output_fifo.sink.valid, #o_ram_wraddr=, #o_frame_size=, i_outif_almost_full=output_fifo_almost_full) # add vhdl sources platform.add_source_dir(os.path.join("gateware", "encoder", "vhdl")) # add verilog sources platform.add_source_dir(os.path.join("gateware", "encoder", "verilog"))
def __init__(self, clock_domain, dram_port, base, length): # ********************************************************* # * Interface * # ********************************************************* self.start = CSR() # Start recorder self.stop = CSR() # Stop recorder self.finished = CSRStatus() # Capture finished self.size = CSRStorage(32) # Post trigger size self.offset = CSRStorage(32) # Trigger offset (Pre trigger size) self.trigAddr = CSRStatus(32) # Trigger storage address self.state = CSRStatus(3) # Etats FSM self.mode = CSRStorage() # 0 = RAW, 1 = FRAME self.preCount = CSRStatus(32) # Frames written to memory self.postCount= CSRStatus(32) # Frames written to memory self.base = CSRConstant(base) self.length = CSRConstant(length) self.dw = CSRConstant(dram_port.data_width) self.enableTrigger = Signal() # Signal to enable the trigger self.forced = Signal() # Another recorder ask us to record datas self.record = Signal() # Start the other recorder self.trigExt = Signal() # Another recorder has trigged self.source = source = stream.Endpoint([("address", dram_port.address_width), ("data", dram_port.data_width)]) self.sink = sink = stream.Endpoint(trigger_layout) # ********************************************************* # * Signals * # ********************************************************* addr = Signal(dram_port.address_width) first = Signal() ext_trig = Signal() count = Signal(32) sof_count = Signal(3) _trigExt = Signal() _start = Signal() _stop = Signal() _finished = Signal() _size = Signal(32) _offset = Signal(32) _trigAddr = Signal(32) _state = Signal(3) _forced = Signal() _mode = Signal() _preCount = Signal(32) _postCount= Signal(32) # ********************************************************* # * Constants * # ********************************************************* ADDRINCR = dram_port.data_width//8 # Count mode RAW_MODE = 0 FRAME_MODE = 1 # Reserve 5 bits to indicate the number of valid chunks in record VALID_TOKEN_BITS = 5 # Reserve 3 bits to indicate the number of SOF in this block SOF_COUNT_BITS = 3 # Meta data position in DDR bloc write RECORD_START = -1 VALID_TOKEN_COUNT_START = RECORD_START VALID_TOKEN_COUNT_END = RECORD_START - VALID_TOKEN_BITS VALID_TOKEN_COUNT = slice(VALID_TOKEN_COUNT_END,VALID_TOKEN_COUNT_START) TRIG_EXT = VALID_TOKEN_COUNT_END - 1 SOF_COUNT_START = TRIG_EXT SOF_COUNT_END = TRIG_EXT - SOF_COUNT_BITS SOF_COUNT = slice(SOF_COUNT_END,SOF_COUNT_START) print("Memory data width = {:d} bits".format(dram_port.data_width)) trigger_nbits = len(stream.Endpoint(trigger_layout).payload.raw_bits()) print("Trigger stream data size = {:d} bits".format(trigger_nbits)) recorder_reserved_bits = len(first) + VALID_TOKEN_BITS + len(_trigExt) + SOF_COUNT_BITS data_per_chunk = (dram_port.data_width - recorder_reserved_bits) // trigger_nbits print("Chunks per block = {:d} ({:d} bits)".format(data_per_chunk, data_per_chunk * trigger_nbits)) print("Bits unused = {:d}".format(dram_port.data_width - recorder_reserved_bits - (data_per_chunk * trigger_nbits))) self.nb = CSRConstant(data_per_chunk) # ********************************************************* # * CDC * # ********************************************************* self.specials += MultiReg(self.start.re, _start, clock_domain) self.specials += MultiReg(self.stop.re, _stop, clock_domain) self.specials += MultiReg(_finished, self.finished.status, "sys") self.specials += MultiReg(self.size.storage, _size, clock_domain) self.specials += MultiReg(self.offset.storage, _offset, clock_domain) self.specials += MultiReg(_trigAddr, self.trigAddr.status, "sys") self.specials += MultiReg(_state, self.state.status, "sys") self.specials += MultiReg(self.forced, _forced, clock_domain) self.specials += MultiReg(self.mode.storage, _mode, clock_domain) self.specials += MultiReg(self.trigExt, _trigExt, clock_domain) self.specials += MultiReg(_preCount, self.preCount.status, "sys") self.specials += MultiReg(_postCount, self.postCount.status, "sys") # ********************************************************* # * Submodules * # ********************************************************* # Remove sof and eof from trigger_layout #fifo_layout = trigger_layout[:-2] stride = ResetInserter()(StrideConverter2(trigger_layout, recorder_layout(data_per_chunk), reverse=False, report_valid_token_count=True)) self.submodules.stride = ClockDomainsRenamer(clock_domain)(stride) fifo = ResetInserter()(stream.SyncFIFO(trigger_layout, 1024, buffered=True)) self.submodules.fifo = ClockDomainsRenamer(clock_domain)(fifo) # ********************************************************* # * Combinatorial * # ********************************************************* self.comb += [ sink.connect(self.fifo.sink), self.fifo.source.connect(stride.sink), source.valid.eq(stride.source.valid), stride.source.ready.eq(source.ready), source.address.eq(addr[log2_int(ADDRINCR):32]), source.data.eq(stride.source.payload.raw_bits()), source.data[RECORD_START].eq(first), # The MSb indicates the start of the recording # In case we read data back, and this bit is set, we know # we didn't cycle over the circular buffer source.data[VALID_TOKEN_COUNT].eq(stride.valid_token_count), source.data[TRIG_EXT].eq(ext_trig), source.data[SOF_COUNT].eq(sof_count), ] # ********************************************************* # * Synchronous * # ********************************************************* sync = getattr(self.sync, clock_domain) sync += [ # Count SOF If(stride.sink.valid & stride.sink.ready & stride.sink.sof, sof_count.eq(sof_count + 1)), # DRAM address increment If(stride.source.valid & stride.source.ready, first.eq(0), ext_trig.eq(0), #If at the same time we get a SOF entering the converter, count it If(stride.sink.valid & stride.sink.ready & stride.sink.sof, sof_count.eq(1), ).Else( sof_count.eq(0), ), addr.eq(addr + ADDRINCR), ), # DRAM address wrap If(addr == (base + length - ADDRINCR), addr.eq(base)), If(fifo.source.trig & fifo.source.valid & (_state == 6), ext_trig.eq(1)), If(_state == 0, addr.eq(base), first.eq(1), ), ] # ********************************************************* # * FSM * # ********************************************************* fsm = FSM(reset_state="IDLE") self.submodules.fsm = ClockDomainsRenamer(clock_domain)(fsm) fsm.act("IDLE", NextValue(_state, 0), NextValue(count, 0), NextValue(self.record, 0), NextValue(stride.flush, 0), NextValue(self.enableTrigger, 0), NextValue(_finished, 1), If(_start, NextValue(_finished, 0), NextState("FILL_PRE_TRIG"), NextValue(self.record, 1), stride.reset.eq(1), NextValue(_preCount, 0), NextValue(_postCount, 0), ), If(_forced, NextValue(_finished, 0), stride.reset.eq(1), NextState("FORCED"), NextValue(_preCount, 0), NextValue(_postCount, 0), ), ) fsm.act("FILL_PRE_TRIG", NextValue(_state, 1), NextValue(self.fifo.reset, 0), If(count == _offset, NextValue(count, 0), NextValue(self.enableTrigger, 1), NextState("WAIT_TRIGGER") ).Else( If(stride.sink.ready & stride.sink.valid, If(_mode == RAW_MODE, NextValue(count, count + 1), NextValue(_preCount, _preCount + 1), ).Else( If(fifo.source.eof, NextValue(count, count + 1), NextValue(_preCount, _preCount + 1), ) ) ) ), If(_stop, NextValue(stride.flush, 1), NextValue(self.fifo.reset, 1), NextState("ABORT") ) ) fsm.act("WAIT_TRIGGER", NextValue(_state, 2), If(stride.sink.ready & stride.sink.valid, If(fifo.source.trig & fifo.source.valid, NextValue(count, 0), # If stride is complete, this next data will be in the # next address block If(stride.valid_token_count == data_per_chunk, NextValue(_trigAddr, addr + ADDRINCR), ).Else( NextValue(_trigAddr, addr), ), NextState("FILL_POST_TRIG"), If(fifo.source.eof & (_size == 1), NextValue(stride.flush, 1), NextValue(self.fifo.reset, 1), NextState("DONE") ) ) ), If(_stop, NextValue(stride.flush, 1), NextValue(self.fifo.reset, 1), NextState("ABORT") ) ) fsm.act("FILL_POST_TRIG", NextValue(_state, 3), If(count == _size, NextValue(stride.flush, 1), NextValue(self.fifo.reset, 1), NextState("DONE") ).Else( If(stride.sink.ready & stride.sink.valid, If(_mode == RAW_MODE, NextValue(count, count + 1), NextValue(_postCount, _postCount + 1), ).Else( If(fifo.source.eof, NextValue(count, count + 1), NextValue(_postCount, _postCount + 1), ) ) ) ), If(_stop, NextValue(stride.flush, 1), NextValue(self.fifo.reset, 1), NextState("ABORT") ) ) fsm.act("DONE", NextValue(_state, 4), NextValue(self.enableTrigger, 0), NextValue(_finished, 1), NextValue(self.fifo.reset, 1), NextValue(self.record, 0), NextValue(stride.flush, 0), If(_stop, NextState("IDLE") ) ) fsm.act("ABORT", NextValue(_state, 5), NextValue(self.enableTrigger, 0), NextValue(_finished, 1), NextValue(self.fifo.reset, 1), NextValue(self.record, 0), NextValue(_trigAddr, addr), NextValue(stride.flush, 0), NextState("IDLE") ) fsm.act("FORCED", NextValue(_state, 6), NextValue(self.fifo.reset, 0), If(fifo.source.trig & fifo.source.valid, NextValue(_trigAddr, addr)), If(_forced == 0, NextValue(stride.flush, 1), NextState("IDLE"), ) )
def __init__(self, clock_domain, fifo_size): # ********************************************************* # * Interface * # ********************************************************* self.filterEnable = CSRStorage() self.filterConfig = CSRStorage(32) # Filter configuration self.tlpDllpTimeoutCnt = CSRStorage(32) # Payload size used for error detection self.ts = Signal(32) # Global time stamp self.trigExt = Signal() # Insert a trigger flag self.source = source = stream.Endpoint(trigger_layout) self.sink = sink = stream.Endpoint(descrambler_layout) # ********************************************************* # * Signals * # ********************************************************* _ts = Signal(32) _filterConfig = Signal(32) _tlpDllpTimeoutCnt = Signal(32) _filterEnable = Signal() _trigExt = Signal() trigPending = Signal() clearTrig = Signal() enabledDatas = Signal() self.trigPending = trigPending self._trigExt = _trigExt self.clearTrig = clearTrig skipEnabled = Signal() ftsEnabled = Signal() tlpEnabled = Signal() dllpEnabled = Signal() ts1Enabled = Signal() ts2Enabled = Signal() idleEnabled = Signal() count = Signal(8) insert_ts = Signal() state_after = Signal(4) last_ts = Signal(32) payload_cnt = Signal(32) from_error = Signal() ts_trig = Signal() # ********************************************************* # * CDC * # ********************************************************* self.specials += MultiReg(self.ts, _ts, clock_domain) self.specials += MultiReg(self.filterConfig.storage, _filterConfig, clock_domain) self.specials += MultiReg(self.filterEnable.storage, _filterEnable, clock_domain) self.specials += MultiReg(self.tlpDllpTimeoutCnt.storage, _tlpDllpTimeoutCnt, clock_domain) self.specials += MultiReg(self.trigExt, _trigExt, clock_domain) # ********************************************************* # * Submodules * # ********************************************************* fifo = ResetInserter()(stream.SyncFIFO(filter_fifo_layout, fifo_size)) self.submodules.fifo = ClockDomainsRenamer(clock_domain)(fifo) buf_in = stream.Buffer(descrambler_layout) self.submodules += ClockDomainsRenamer(clock_domain)(buf_in) buf_out = stream.Buffer(filter_fifo_layout) self.submodules += ClockDomainsRenamer(clock_domain)(buf_out) # ********************************************************* # * Combinatorial * # ********************************************************* self.comb += [ sink.connect(buf_in.sink), buf_in.source.connect(fifo.sink, omit={"valid", "ts", "error"}), self.sink.ready.eq(1), fifo.source.connect(buf_out.sink), skipEnabled.eq(_filterConfig[0]), ftsEnabled.eq( _filterConfig[1]), tlpEnabled.eq( _filterConfig[2]), dllpEnabled.eq(_filterConfig[3]), ts1Enabled.eq( _filterConfig[4]), ts2Enabled.eq( _filterConfig[5]), idleEnabled.eq( _filterConfig[6]), ] sync = getattr(self.sync, clock_domain) sync += [ If(_trigExt, trigPending.eq(1)), If(clearTrig, trigPending.eq(0)) ] # ********************************************************* # * FSM * # ********************************************************* fsmWriter = FSM(reset_state="NO_FILTER") self.submodules.fsmWriter = ClockDomainsRenamer(clock_domain)(fsmWriter) fsmReader = FSM(reset_state="NO_FILTER") self.submodules.fsmReader = ClockDomainsRenamer(clock_domain)(fsmReader) # ********************************************************* # * Filter control * # ********************************************************* fsmWriter.act("NO_FILTER", NextValue(fifo.sink.valid, 0), NextValue(buf_out.source.ready, 0), If(_filterEnable, self.fifo.reset.eq(1), NextState("FIND_DELIMITER"), ) ) # ******************************************************************** # * Writing side of the FIFO. * # * All frames are written to the FIFO, software IDLE are removed. * # * A timestamp is added to each symbol to keep track of the timing. * # ******************************************************************** # ********************************************************* # * Detect frames delimiter * # ********************************************************* fsmWriter.act("FIND_DELIMITER", NextValue(fifo.sink.valid, 0), NextValue(fifo.sink.error, 0), # Ordered sets If(sink.osets & (sink.data[8:16] == COM.value), NextValue(fifo.sink.valid, 1), NextValue(fifo.sink.ts, _ts), NextState("ORDERED_SETS"), ), # TLP If((sink.ctrl == 0b10) & (sink.data[8:16] == STP.value), NextValue(fifo.sink.valid, 1), NextValue(fifo.sink.ts, _ts), NextValue(payload_cnt, 0), NextState("TLP"), ), # DLLP If((sink.ctrl == 0b10) & (sink.data[8:16] == SDP.value), NextValue(fifo.sink.valid, 1), NextValue(fifo.sink.ts, _ts), NextValue(payload_cnt, 0), NextState("DLLP"), ), If(~_filterEnable, NextState("NO_FILTER"), ) ) # ********************************************************* # * An ordered set is detected * # ********************************************************* fsmWriter.act("ORDERED_SETS", NextValue(fifo.sink.ts, _ts), NextValue(fifo.sink.error, 0), # We are done If(sink.osets == 0, NextValue(fifo.sink.valid, 0), NextState("FIND_DELIMITER"), ), # It's a nested frame. Get directly to TLP If(sink.ctrl[1] & (sink.data[8:16] == STP.value), NextValue(fifo.sink.valid, 1), NextValue(payload_cnt, 0), NextState("TLP"), ), # It's a nested frame. Get directly to DLLP If(sink.ctrl[1] & (sink.data[8:16] == SDP.value), NextValue(fifo.sink.valid, 1), NextValue(payload_cnt, 0), NextState("DLLP"), ), If(~_filterEnable, NextState("NO_FILTER"), ) ) # ********************************************************* # * An TLP is detected * # ********************************************************* fsmWriter.act("TLP", NextValue(fifo.sink.ts, _ts), NextValue(fifo.sink.error, 0), NextValue(payload_cnt, payload_cnt + 1), # By default, is we have a K symbol before END, that's an error If((sink.ctrl[0] & (sink.data[0:8] != END.value)) | sink.ctrl[1] | (payload_cnt > _tlpDllpTimeoutCnt), NextValue(fifo.sink.error, 1), NextValue(fifo.sink.valid, 1), NextState("FIND_DELIMITER"), ).Else( NextValue(fifo.sink.error, 0), ), If((buf_in.source.ctrl[0]) & (buf_in.source.data[0:8] == END.value), NextValue(fifo.sink.valid, 0), NextValue(fifo.sink.error, 0), NextState("FIND_DELIMITER"), ), If(sink.ctrl[1] & (sink.data[8:16] == STP.value), NextValue(fifo.sink.valid, 1), NextValue(payload_cnt, 0), NextState("TLP"), ), If(sink.ctrl[1] & (sink.data[8:16] == SDP.value), NextValue(fifo.sink.valid, 1), NextValue(payload_cnt, 0), NextState("DLLP"), ), If(sink.osets & (sink.data[8:16] == COM.value), NextValue(fifo.sink.valid, 1), NextState("ORDERED_SETS"), ), If(~_filterEnable, NextState("NO_FILTER"), ) ) # ********************************************************* # * An DLLP is detected * # ********************************************************* fsmWriter.act("DLLP", NextValue(fifo.sink.ts, _ts), NextValue(fifo.sink.error, 0), NextValue(payload_cnt, payload_cnt + 1), # By default, is we have a K symbol before END, that's an error If((sink.ctrl[0] & (sink.data[0:8] != END.value)) | sink.ctrl[1]| (payload_cnt > _tlpDllpTimeoutCnt), NextValue(fifo.sink.error, 1), NextValue(fifo.sink.valid, 1), NextState("FIND_DELIMITER"), ).Else( NextValue(fifo.sink.error, 0), ), If((buf_in.source.ctrl[0]) & (buf_in.source.data[0:8] == END.value), NextValue(fifo.sink.valid, 0), NextValue(fifo.sink.error, 0), NextState("FIND_DELIMITER"), ), If(sink.ctrl[1] & (sink.data[8:16] == STP.value), NextValue(fifo.sink.valid, 1), NextValue(payload_cnt, 0), NextState("TLP"), ), If(sink.ctrl[1] & (sink.data[8:16] == SDP.value), NextValue(fifo.sink.valid, 1), NextValue(payload_cnt, 0), NextState("DLLP"), ), If(sink.osets & (sink.data[8:16] == COM.value), NextValue(fifo.sink.valid, 1), NextState("ORDERED_SETS"), ), If(~_filterEnable, NextState("NO_FILTER"), ) ) # ****************************************************************** # * Reading side of the FIFO. * # * Filtering takes place on the reading side of the FIFO. * # * A timestamp is added to each frame (or frame group) to keep * # * track of the timing. * # ****************************************************************** # ********************************************************* # * Filtering control * # ********************************************************* fsmReader.act("NO_FILTER", NextValue(source.data, sink.data), NextValue(source.ctrl, sink.ctrl), NextValue(source.valid, sink.valid), NextValue(source.time, 0), If(_filterEnable, NextState("FILTER"), ) ) # ********************************************************* # * Frame filtering * # ********************************************************* fsmReader.act("FILTER", NextValue(source.valid, 0), NextValue(source.time, 0), NextValue(buf_out.source.ready, 0), NextValue(from_error, 0), NextValue(source.eof, 0), NextValue(clearTrig, 0), NextValue(source.trig, 0), If(from_error, NextValue(buf_out.source.ready, 1), ), If(buf_out.source.valid, # Don't insert a new timestamp If(last_ts == buf_out.source.ts, insert_ts.eq(0), ).Else( insert_ts.eq(1), NextValue(last_ts, buf_out.source.ts), ), # ---- SKIP ---- If(buf_out.source.osets & (buf_out.source.type == osetsType.SKIP) & (buf_out.source.data[8:16] == COM.value), If(insert_ts, NextValue(source.data, buf_out.source.ts[16:32]), # When this frame is disabled, we need to change last_ts # in order to force ts insertion on the next frame. If(skipEnabled, NextValue(source.valid, 1), NextValue(source.time, 1), ), NextValue(buf_out.source.ready, 0), NextValue(state_after, state.SKIP), NextValue(ts_trig, 0), NextState("TIMESTAMP_LSB"), ).Else( NextValue(count, 0), NextValue(buf_out.source.ready, 1), NextState("SKIP"), ), If(~skipEnabled, NextValue(last_ts, 0)), ), # ---- IDLE ---- If(buf_out.source.osets & (buf_out.source.type == osetsType.IDLE) & (buf_out.source.data[8:16] == COM.value), If(insert_ts, NextValue(source.data, buf_out.source.ts[16:32]), # When this frame is disabled, we need to change last_ts # in order to force ts insertion on the next frame. If(idleEnabled, NextValue(source.valid, 1), NextValue(source.time, 1), ), NextValue(buf_out.source.ready, 0), NextValue(state_after, state.IDLE), NextValue(ts_trig, 0), NextState("TIMESTAMP_LSB"), ).Else( NextValue(count, 0), NextValue(buf_out.source.ready, 1), NextState("IDLE"), ), If(~idleEnabled, NextValue(last_ts, 0)), ), # ---- FTS ---- If(buf_out.source.osets & (buf_out.source.type == osetsType.FTS) & (buf_out.source.data[8:16] == COM.value), If(insert_ts, NextValue(source.data, buf_out.source.ts[16:32]), If(ftsEnabled, NextValue(source.valid, 1), NextValue(source.time, 1), ), NextValue(buf_out.source.ready, 0), NextValue(state_after, state.FTS), NextValue(ts_trig, 0), NextState("TIMESTAMP_LSB"), ).Else( NextValue(count, 0), NextValue(buf_out.source.ready, 1), NextState("FTS"), ), If(~ftsEnabled, NextValue(last_ts, 0)), ), # ---- TS1 ---- If(buf_out.source.osets & (buf_out.source.type == osetsType.TS1) & (buf_out.source.data[8:16] == COM.value), If(insert_ts, NextValue(source.data, buf_out.source.ts[16:32]), If(ts1Enabled, NextValue(source.valid, 1), NextValue(source.time, 1), ), NextValue(buf_out.source.ready, 0), NextValue(state_after, state.TS1), NextValue(ts_trig, 0), NextState("TIMESTAMP_LSB"), ).Else( NextValue(count, 0), NextValue(buf_out.source.ready, 1), NextState("TS1"), ), If(~ts1Enabled, NextValue(last_ts, 0)), ), # ---- TS2 ---- If(buf_out.source.osets & (buf_out.source.type == osetsType.TS2) & (buf_out.source.data[8:16] == COM.value), If(insert_ts, NextValue(source.data, buf_out.source.ts[16:32]), If(ts2Enabled, NextValue(source.valid, 1), NextValue(source.time, 1), ), NextValue(buf_out.source.ready, 0), NextValue(state_after, state.TS2), NextValue(ts_trig, 0), NextState("TIMESTAMP_LSB"), ).Else( NextValue(count, 0), NextValue(buf_out.source.ready, 1), NextState("TS2"), ), If(~ts2Enabled, NextValue(last_ts, 0)), ), # ---- TLP ---- If(buf_out.source.ctrl[1] & (buf_out.source.data[8:16] == STP.value), If(insert_ts, NextValue(source.data, buf_out.source.ts[16:32]), If(tlpEnabled, NextValue(source.valid, 1), NextValue(source.time, 1), ), NextValue(buf_out.source.ready, 0), NextValue(state_after, state.TLP), NextValue(ts_trig, 0), NextState("TIMESTAMP_LSB"), ).Else( NextValue(count, 0), NextValue(buf_out.source.ready, 1), NextState("TLP"), ), If(~tlpEnabled, NextValue(last_ts, 0)), ), # ---- DLLP ---- If(buf_out.source.ctrl[1] & (buf_out.source.data[8:16] == SDP.value), If(insert_ts, NextValue(source.data, buf_out.source.ts[16:32]), If(dllpEnabled, NextValue(source.valid, 1), NextValue(source.time, 1), ), NextValue(buf_out.source.ready, 0), NextValue(state_after, state.DLLP), NextValue(ts_trig, 0), NextState("TIMESTAMP_LSB"), ).Else( NextValue(count, 0), NextValue(buf_out.source.ready, 1), NextState("DLLP"), ), If(~dllpEnabled, NextValue(last_ts, 0)), ), # buf_out.source is not valid ).Else( If(trigPending, NextValue(buf_out.source.ready, 0), NextValue(state_after, state.FILTER), NextState("TIMESTAMP_MSB_TRIG"), ) ), If(~_filterEnable, NextState("NO_FILTER"), ) ) # ********************************************************* # * Insert LSB part of timestamp * # ********************************************************* fsmReader.act("TIMESTAMP_MSB_TRIG", NextValue(source.time, 1), NextValue(source.valid, 1), NextValue(source.data, _ts[16:32]), NextState("TIMESTAMP_LSB"), NextValue(source.trig, 1), NextValue(clearTrig, 1), NextValue(ts_trig, 1), ), # ********************************************************* # * Insert LSB part of timestamp * # ********************************************************* fsmReader.act("TIMESTAMP_LSB", NextValue(source.data, buf_out.source.ts[0:16]), If(ts_trig, NextValue(source.data, _ts[0:16]), NextValue(source.trig, 1), ).Else( NextValue(count, 0), ), NextValue(ts_trig, 0), NextValue(buf_out.source.ready, 1), If((state_after == state.FILTER), NextState("FILTER"), ), If((state_after == state.SKIP), If(skipEnabled, NextValue(source.valid, 1), ), NextState("SKIP"), ), If((state_after == state.IDLE), If(ftsEnabled, NextValue(source.valid, 1), ), NextState("IDLE"), ), If((state_after == state.FTS), If(ftsEnabled, NextValue(source.valid, 1), ), NextState("FTS"), ), If((state_after == state.TS1), If(ts1Enabled, NextValue(source.valid, 1), ), NextState("TS1"), ), If((state_after == state.TS2), If(ts2Enabled, NextValue(source.valid, 1), ), NextState("TS2"), ), If((state_after == state.DLLP), If(dllpEnabled, NextValue(source.valid, 1), ), NextState("DLLP"), ), If((state_after == state.TLP), If(tlpEnabled, NextValue(source.valid, 1), ), NextState("TLP"), ), If(~_filterEnable, NextState("NO_FILTER"), ) ) # ********************************************************* # * Read a SKIP from the FIFO * # ********************************************************* fsmReader.act("SKIP", NextValue(source.data, buf_out.source.data), NextValue(source.ctrl, buf_out.source.ctrl), NextValue(count, count + 1), NextValue(source.valid, 1), NextValue(last_ts, last_ts + 1), NextValue(source.time, 0), NextValue(source.trig, 0), NextValue(source.sof, 0), NextValue(source.eof, 0), If(trigPending, NextValue(buf_out.source.ready, 0), NextValue(state_after, state.SKIP), NextState("TIMESTAMP_MSB_TRIG"), ), If(count == 1, NextValue(buf_out.source.ready, 0), NextValue(source.valid, 0), NextState("FILTER"), ), If(skipEnabled, If(count == 0, NextValue(source.sof, 1)), If(count == 1, NextValue(source.eof, 1)), NextValue(source.valid, 1), ).Else( NextValue(source.valid, 0), ), If(~_filterEnable, NextState("NO_FILTER"), ) ) # ********************************************************* # * Read a IDLE from the FIFO * # ********************************************************* fsmReader.act("IDLE", NextValue(source.data, buf_out.source.data), NextValue(source.ctrl, buf_out.source.ctrl), NextValue(count, count + 1), NextValue(source.valid, 1), NextValue(last_ts, last_ts + 1), NextValue(source.time, 0), NextValue(source.trig, 0), NextValue(source.sof, 0), NextValue(source.eof, 0), If(trigPending, NextValue(buf_out.source.ready, 0), NextValue(state_after, state.IDLE), NextState("TIMESTAMP_MSB_TRIG"), ), If(count == 1, NextValue(buf_out.source.ready, 0), NextValue(source.valid, 0), NextValue(source.sof, 0), NextValue(source.eof, 1), NextState("FILTER"), ), If(idleEnabled, If(count == 0, NextValue(source.sof, 1)), If(count == 1, NextValue(source.eof, 1)), NextValue(source.valid, 1), ).Else( NextValue(source.valid, 0), ), If(~_filterEnable, NextState("NO_FILTER"), ) ) # ********************************************************* # * Read a FTS from the FIFO * # ********************************************************* fsmReader.act("FTS", NextValue(source.data, buf_out.source.data), NextValue(source.ctrl, buf_out.source.ctrl), NextValue(count, count + 1), NextValue(source.valid, 1), NextValue(last_ts, last_ts + 1), NextValue(source.time, 0), NextValue(source.trig, 0), NextValue(source.sof, 0), NextValue(source.eof, 0), If(trigPending, NextValue(buf_out.source.ready, 0), NextValue(state_after, state.FTS), NextState("TIMESTAMP_MSB_TRIG"), ), If(count == 1, NextValue(buf_out.source.ready, 0), NextValue(source.valid, 0), NextValue(source.sof, 0), NextValue(source.eof, 1), NextState("FILTER"), ), If(ftsEnabled, If(count == 0, NextValue(source.sof, 1)), If(count == 1, NextValue(source.eof, 1)), NextValue(source.valid, 1), ).Else( NextValue(source.valid, 0), ), If(~_filterEnable, NextState("NO_FILTER"), ) ) # ********************************************************* # * Read a TS1 from the FIFO * # ********************************************************* fsmReader.act("TS1", NextValue(source.data, buf_out.source.data), NextValue(source.ctrl, buf_out.source.ctrl), NextValue(count, count + 1), NextValue(source.valid, 1), NextValue(last_ts, last_ts + 1), NextValue(source.time, 0), NextValue(source.trig, 0), NextValue(source.sof, 0), NextValue(source.eof, 0), If(trigPending, NextValue(buf_out.source.ready, 0), NextValue(state_after, state.TS1), NextState("TIMESTAMP_MSB_TRIG"), ), If(count == 7, NextValue(buf_out.source.ready, 0), NextValue(source.valid, 0), NextValue(source.sof, 0), NextValue(source.eof, 1), NextState("FILTER"), ), If(ts1Enabled, If(count == 0, NextValue(source.sof, 1)), If(count == 7, NextValue(source.eof, 1)), NextValue(source.valid, 1), ).Else( NextValue(source.valid, 0), ), If(~_filterEnable, NextState("NO_FILTER"), ) ) # ********************************************************* # * Read a TS2 from the FIFO * # ********************************************************* fsmReader.act("TS2", NextValue(source.data, buf_out.source.data), NextValue(source.ctrl, buf_out.source.ctrl), NextValue(count, count + 1), NextValue(source.valid, 1), NextValue(last_ts, last_ts + 1), NextValue(source.time, 0), NextValue(source.trig, 0), NextValue(source.sof, 0), NextValue(source.eof, 0), If(trigPending, NextValue(buf_out.source.ready, 0), NextValue(state_after, state.TS2), NextState("TIMESTAMP_MSB_TRIG"), ), If(count == 7, NextValue(buf_out.source.ready, 0), NextValue(source.valid, 0), NextValue(source.sof, 0), NextValue(source.eof, 1), NextState("FILTER"), ), If(ts2Enabled, If(count == 0, NextValue(source.sof, 1)), If(count == 7, NextValue(source.eof, 1)), NextValue(source.valid, 1), ).Else( NextValue(source.valid, 0), ), If(~_filterEnable, NextState("NO_FILTER"), ) ) # ********************************************************* # * Read a TLP from the FIFO * # ********************************************************* fsmReader.act("TLP", NextValue(source.data, buf_out.source.data), NextValue(source.ctrl, buf_out.source.ctrl), NextValue(count, count + 1), NextValue(source.valid, 1), NextValue(last_ts, last_ts + 1), NextValue(source.time, 0), NextValue(source.trig, 0), NextValue(source.sof, 0), NextValue(source.eof, 0), If(trigPending, NextValue(buf_out.source.ready, 0), NextValue(state_after, state.TLP), NextState("TIMESTAMP_MSB_TRIG"), ), If((buf_out.source.ctrl[0] & (buf_out.source.data[0:8] == END.value)) | fifo.source.error, If(tlpEnabled, NextValue(source.eof, 1)), NextValue(buf_out.source.ready, 0), NextValue(source.valid, 0), NextState("FILTER"), If(fifo.source.error, NextValue(from_error, 1), ), ), If(tlpEnabled, If(count == 0, NextValue(source.sof, 1)), NextValue(source.valid, 1), ).Else( NextValue(source.valid, 0), ), If(~_filterEnable, NextState("NO_FILTER"), ) ) # ********************************************************* # * Read a DLLP from the FIFO * # ********************************************************* fsmReader.act("DLLP", NextValue(source.data, buf_out.source.data), NextValue(source.ctrl, buf_out.source.ctrl), NextValue(count, count + 1), NextValue(source.valid, 1), NextValue(last_ts, last_ts + 1), NextValue(source.time, 0), NextValue(source.trig, 0), NextValue(source.sof, 0), NextValue(source.eof, 0), If(trigPending, NextValue(buf_out.source.ready, 0), NextValue(state_after, state.DLLP), NextState("TIMESTAMP_MSB_TRIG"), ), If((buf_out.source.ctrl[0] & (buf_out.source.data[0:8] == END.value)) | fifo.source.error, If(dllpEnabled, NextValue(source.eof, 1)), NextValue(buf_out.source.ready, 0), NextValue(source.valid, 0), NextState("FILTER"), If(fifo.source.error, NextValue(from_error, 1), ), ), If(dllpEnabled, If(count == 0, NextValue(source.sof, 1)), NextValue(source.valid, 1), ).Else( NextValue(source.valid, 0), ), If(~_filterEnable, NextState("NO_FILTER"), ) )
def __init__(self, dram_port, pattern_mem, *, rowbits, row_shift): super().__init__(pattern_mem) self.doc = ModuleDoc(""" DMA DRAM reader. Allows to check DRAM contents against a predefined pattern using DMA. Pattern ------- {common} Reading errors -------------- This module allows to check the locations of errors in the memory. It scans the configured memory area and compares the values read to the predefined pattern. If `skip_fifo` is 0, this module will stop after each error encountered, so that it can be examined. Wait until the `error_ready` CSR is 1. Then use the CSRs `error_offset`, `error_data` and `error_expected` to examine the errors in the current transfer. To continue reading, write 1 to `error_continue` CSR. Setting `skip_fifo` to 1 will disable this behaviour entirely. The final nubmer of errors can be read from `error_count`. NOTE: This value represents the number of erroneous *DMA transfers*. The current progress can be read from the `done` CSR. """.format(common=BISTModule.__doc__)) error_desc = [ ('offset', 32), ('data', dram_port.data_width), ('expected', dram_port.data_width), ] self.error_count = Signal(32) self.skip_fifo = Signal() self.error = stream.Endpoint(error_desc) dma = LiteDRAMDMAReader(dram_port, fifo_depth=4) self.submodules += dma # pass addresses from address FSM (command producer) to pattern FSM (data consumer) address_fifo = stream.SyncFIFO([('address', len(dma.sink.address))], depth=4) self.submodules += address_fifo # ----------------- Address FSM ----------------- counter_addr = Signal(32) self.comb += [ self.addr_port.adr.eq(counter_addr & self.data_mask), dma.sink.address.eq(self.addr_port.dat_r + (counter_addr & self.mem_mask)), ] # Using temporary state 'WAIT' to obtain address offset from memory self.submodules.fsm_addr = fsm_addr = FSM() fsm_addr.act( "READY", If( self.start, NextValue(counter_addr, 0), NextState("WAIT"), )) fsm_addr.act( "WAIT", # FIXME: should be possible to write the address in WR_ADDR address_fifo.sink.valid.eq(counter_addr != 0), If( address_fifo.sink.ready | (counter_addr == 0), If(counter_addr >= self.count, NextState("READY")).Else(NextState("WR_ADDR")))) fsm_addr.act( "WR_ADDR", dma.sink.valid.eq(1), If( dma.sink.ready, # send the address in WAIT NextValue(address_fifo.sink.address, dma.sink.address), NextValue(counter_addr, counter_addr + 1), NextState("WAIT"))) # ------------- Pattern FSM ---------------- counter_gen = Signal(32) # Unmatched memory offsets error_fifo = stream.SyncFIFO(error_desc, depth=2, buffered=False) self.submodules += error_fifo # DMA data may be inverted using AddressSelector data_expected = Signal.like(dma.source.data) self.submodules.inverter = RowDataInverter( addr=address_fifo.source.address, data_in=self.data_port.dat_r, data_out=data_expected, rowbits=rowbits, row_shift=row_shift, ) self.comb += [ self.data_port.adr.eq(counter_gen & self.data_mask), self.error.offset.eq(error_fifo.source.offset), self.error.data.eq(error_fifo.source.data), self.error.expected.eq(error_fifo.source.expected), self.error.valid.eq(error_fifo.source.valid), error_fifo.source.ready.eq(self.error.ready | self.skip_fifo), self.done.eq(counter_gen), ] self.submodules.fsm_pattern = fsm_pattern = FSM() fsm_pattern.act( "READY", self.ready.eq(1), If( self.start, NextValue(counter_gen, 0), NextValue(self.error_count, 0), NextState("WAIT"), )) fsm_pattern.act( "WAIT", # TODO: we could pipeline the access If(counter_gen >= self.count, NextState("READY")).Else(NextState("RD_DATA"))) fsm_pattern.act( "RD_DATA", If( dma.source.valid & address_fifo.source.valid, # we must now change FSM state in single cycle dma.source.ready.eq(1), address_fifo.source.ready.eq(1), # count the command NextValue(counter_gen, counter_gen + 1), # next state depends on if there was an error If( dma.source.data != data_expected, NextValue(self.error_count, self.error_count + 1), NextValue(error_fifo.sink.offset, address_fifo.source.address), NextValue(error_fifo.sink.data, dma.source.data), NextValue(error_fifo.sink.expected, data_expected), If(self.skip_fifo, NextState("WAIT")).Else( NextState("WR_ERR"))).Else(NextState("WAIT")))) fsm_pattern.act( "WR_ERR", error_fifo.sink.valid.eq(1), If(error_fifo.sink.ready | self.skip_fifo, NextState("WAIT")))
def __init__(self, endpoint, data_width=32, id_width=1): self.axi = axi.AXIInterface(data_width=data_width, id_width=id_width) # # # aw_id = Signal(id_width) ar_id = Signal(id_width) r_len = Signal(8) desc_rd = stream.Endpoint(descriptor_layout()) desc_wr = stream.Endpoint(descriptor_layout()) port_rd = endpoint.crossbar.get_master_port(read_only=True) port_wr = endpoint.crossbar.get_master_port(write_only=True) # AXI Write Path --------------------------------------------------------------------------- # DMA / FIFO / Converter self.submodules.dma_wr = dma_wr = LitePCIeDMAWriter(endpoint=endpoint, port=port_wr, with_table=False) self.submodules.fifo_wr = fifo_wr = stream.SyncFIFO( descriptor_layout(), 16) self.submodules.conv_wr = conv_wr = stream.Converter( nbits_from=data_width, nbits_to=endpoint.phy.data_width) # Flow self.comb += [ desc_wr.connect(fifo_wr.sink), fifo_wr.source.connect(dma_wr.desc_sink), conv_wr.source.connect(dma_wr.sink), ] # FSM (Convert AXI Write Requests to LitePCIe's DMA Descriptors). self.comb += desc_wr.address.eq( self.axi.aw.addr) # Start address (byte addressed) self.comb += desc_wr.length.eq( (self.axi.aw.len + 1) * (data_width // 8)) # Transfer length (in bytes) self.submodules.fsm_wr = fsm_wr = FSM(reset_state="WRITE-IDLE") fsm_wr.act( "WRITE-IDLE", self.axi.aw.ready.eq(desc_wr.ready), desc_wr.valid.eq(self.axi.aw.valid), If( self.axi.aw.valid & self.axi.aw.ready, NextValue(aw_id, self.axi.aw.id), # Save id to use it on b channel. NextState("WRITE-MONITOR"), )) self.comb += [ conv_wr.sink.data.eq(self.axi.w.data), conv_wr.sink.last.eq(self.axi.w.last), ] fsm_wr.act( "WRITE-MONITOR", conv_wr.sink.valid.eq(self.axi.w.valid), self.axi.w.ready.eq(conv_wr.sink.ready), If( self.axi.w.valid & self.axi.w.ready & self.axi.w.last, NextState("WRITE-RESP"), )) self.comb += [ self.axi.b.id.eq(aw_id), self.axi.b.resp.eq(0), ] fsm_wr.act( "WRITE-RESP", self.axi.b.valid.eq(1), If( self.axi.b.ready, NextState("WRITE-IDLE"), # Write done )) # AXI Read Path ---------------------------------------------------------------------------- # DMA / FIFO / Converter self.submodules.dma_rd = dma_rd = LitePCIeDMAReader(endpoint=endpoint, port=port_rd, with_table=False) self.submodules.fifo_rd = fifo_rd = stream.SyncFIFO( descriptor_layout(), 16) self.submodules.conv_rd = conv_rd = stream.Converter( nbits_from=endpoint.phy.data_width, nbits_to=data_width) # Flow self.comb += [ desc_rd.connect(fifo_rd.sink), fifo_rd.source.connect(dma_rd.desc_sink), dma_rd.source.connect(conv_rd.sink), ] # FSM (Convert AXI Read Requests to LitePCIe's DMA Descriptors). self.comb += desc_rd.address.eq( self.axi.ar.addr) # Starting address (byte addressed) self.comb += desc_rd.length.eq( (self.axi.ar.len + 1) * (data_width // 8)) # Transfer length (in bytes) self.submodules.fsm_rd = fsm_rd = FSM(reset_state="READ-IDLE") fsm_rd.act( "READ-IDLE", self.axi.ar.ready.eq(desc_rd.ready), desc_rd.valid.eq(self.axi.ar.valid), If( self.axi.ar.valid & self.axi.ar.ready, NextValue(ar_id, self.axi.ar.id), # Save id to use it on r channel. NextValue(r_len, self.axi.ar.len), NextState("READ-MONITOR"), )) self.comb += [ self.axi.r.data.eq(conv_rd.source.data), self.axi.r.last.eq(r_len == 0), # We need to provide the same id that was provided on aw channel for the duration of the transfer. self.axi.r.id.eq(ar_id), self.axi.r.resp.eq(0), ] fsm_rd.act( "READ-MONITOR", self.axi.r.valid.eq(conv_rd.source.valid), conv_rd.source.ready.eq(self.axi.r.ready), If( self.axi.r.ready & self.axi.r.valid, NextValue(r_len, r_len - 1), If( self.axi.r. last, # Check if we finished the whole AXI transaction. NextState("READ-IDLE"), )))
def __init__(self, port, fifo_depth=16, fifo_buffered=False, with_csr=False): assert isinstance(port, (LiteDRAMNativePort, LiteDRAMAXIPort)) self.port = port self.sink = sink = stream.Endpoint([("address", port.address_width)]) self.source = source = stream.Endpoint([("data", port.data_width)]) # # # # Native / AXI selection is_native = isinstance(port, LiteDRAMNativePort) is_axi = isinstance(port, LiteDRAMAXIPort) if is_native: (cmd, rdata) = port.cmd, port.rdata elif is_axi: (cmd, rdata) = port.ar, port.r else: raise NotImplementedError # Request issuance ------------------------------------------------------------------------- request_enable = Signal() request_issued = Signal() if is_native: self.comb += cmd.we.eq(0) if is_axi: self.comb += cmd.size.eq(int(log2(port.data_width // 8))) self.comb += [ cmd.addr.eq(sink.address), cmd.valid.eq(sink.valid & request_enable), sink.ready.eq(cmd.ready & request_enable), request_issued.eq(cmd.valid & cmd.ready) ] # FIFO reservation level counter ----------------------------------------------------------- # incremented when data is planned to be queued # decremented when data is dequeued data_dequeued = Signal() self.rsv_level = rsv_level = Signal(max=fifo_depth + 1) self.sync += [ If(request_issued, If(~data_dequeued, rsv_level.eq(self.rsv_level + 1))).Elif( data_dequeued, rsv_level.eq(rsv_level - 1)) ] self.comb += request_enable.eq(rsv_level != fifo_depth) # FIFO ------------------------------------------------------------------------------------- fifo = stream.SyncFIFO([("data", port.data_width)], fifo_depth, fifo_buffered) self.submodules += fifo self.comb += [ rdata.connect(fifo.sink, omit={"id", "resp"}), fifo.source.connect(source), data_dequeued.eq(source.valid & source.ready) ] if with_csr: self.add_csr()
def __init__(self, pads, clk_freq, fifo_depth = 64, read_time = 128, write_time = 128): self.dw = dw = len(pads.data) self.pads = pads self.sink = stream.Endpoint(phy_description(dw)) self.source = stream.Endpoint(phy_description(dw)) # # # # Pads Reset. # ----------- pads.oe_n.reset = 1 pads.rd_n.reset = 1 pads.wr_n.reset = 1 # Read CDC/FIFO (FTDI --> SoC). # ----------------------------- self.submodules.read_cdc = stream.ClockDomainCrossing(phy_description(dw), cd_from = "usb", cd_to = "sys", with_common_rst = True ) self.submodules.read_fifo = stream.SyncFIFO(phy_description(dw), fifo_depth) self.comb += self.read_cdc.source.connect(self.read_fifo.sink) self.comb += self.read_fifo.source.connect(self.source) read_fifo_almost_full = (self.read_fifo.level > (fifo_depth - 4)) read_fifo_almost_full_usb = Signal() self.specials += MultiReg(read_fifo_almost_full, read_fifo_almost_full_usb) # Write FIFO/CDC (SoC --> FTDI). # ------------------------------ self.submodules.write_fifo = stream.SyncFIFO(phy_description(dw), fifo_depth) self.submodules.write_cdc = stream.ClockDomainCrossing(phy_description(dw), cd_from = "sys", cd_to = "usb", with_common_rst = True ) self.comb += self.sink.connect(self.write_fifo.sink) self.comb += self.write_fifo.source.connect(self.write_cdc.sink) # Read / Write Anti-Starvation. # ----------------------------- read_time_en, max_read_time = anti_starvation(self, read_time) write_time_en, max_write_time = anti_starvation(self, write_time) # Read / Write Detection. # ----------------------- self.wants_write = wants_write = Signal() self.wants_read = wants_read = Signal() self.comb += [ wants_write.eq(~pads.txe_n & self.write_cdc.source.valid), wants_read.eq( ~pads.rxf_n & (self.read_cdc.sink.ready & ~read_fifo_almost_full_usb)), ] # Data Bus Tristate. # ------------------ self.data_w = data_w = Signal(dw) self.data_r = data_r = Signal(dw) self.data_oe = data_oe = Signal() for i in range(dw): self.specials += SDRTristate( io = pads.data[i], o = data_w[i], oe = data_oe, i = data_r[i], clk = ClockSignal("usb") ) if hasattr(pads, "be"): for i in range(dw//8): self.specials += SDRTristate( io = pads.be[i], o = Signal(reset=0b1), oe = data_oe, i = Signal(), clk = ClockSignal("usb") ) # Read / Write FSM. # ----------------- fsm = FSM(reset_state="READ") fsm = ClockDomainsRenamer("usb")(fsm) self.submodules.fsm = fsm fsm.act("READ", # Arbitration. read_time_en.eq(1), If(wants_write, If(~wants_read | max_read_time, NextState("READ-TO-WRITE") ) ), # Control/Data-Path. data_oe.eq(0), NextValue(pads.oe_n, ~wants_read), NextValue(pads.rd_n, pads.oe_n | ~wants_read), NextValue(pads.wr_n, 1), ) self.comb += self.read_cdc.sink.data.eq(data_r) self.sync.usb += self.read_cdc.sink.valid.eq(~pads.rd_n & ~pads.rxf_n) fsm.act("READ-TO-WRITE", NextState("WRITE") ) fsm.act("WRITE", # Arbitration. write_time_en.eq(1), If(wants_read, If(~wants_write | max_write_time, NextState("WRITE-TO-READ") ) ), # Control/Data-Path. data_oe.eq(1), NextValue(pads.oe_n, 1), NextValue(pads.rd_n, 1), NextValue(pads.wr_n, ~wants_write), #data_w.eq(write_fifo.source.data), NextValue(data_w, self.write_cdc.source.data), # FIXME: Add 1 cycle delay. self.write_cdc.source.ready.eq(wants_write), ) fsm.act("WRITE-TO-READ", NextState("READ") )
def __init__(self, bus, port, endianness="little"): self.bus = bus self.port = port self.sector = CSRStorage(48) self.base = CSRStorage(64) self.start = CSR() self.done = CSRStatus() self.error = CSRStatus() # # # dma_bytes = bus.data_width // 8 port_bytes = port.dw // 8 count = Signal(max=logical_sector_size // min(dma_bytes, port_bytes)) # DMA dma = WishboneDMAReader(bus, with_csr=False, endianness=endianness) self.submodules.dma = dma # Sector buffer buf = stream.SyncFIFO([("data", port.dw)], logical_sector_size // dma_bytes) self.submodules.buf = buf # Converter conv = stream.Converter(nbits_from=bus.data_width, nbits_to=port.dw) self.submodules.conv = conv # Connect DMA to Sector Buffer self.comb += dma.source.connect(buf.sink) # Connect Sector Buffer to Converter self.comb += buf.source.connect(conv.sink) # Control FSM self.submodules.fsm = fsm = FSM(reset_state="IDLE") fsm.act( "IDLE", If(self.start.re, NextValue(count, 0), NextValue(self.error.status, 0), NextState("READ-DATA-DMA")).Else(self.done.status.eq(1)), conv.source.ready.eq(1)) fsm.act( "READ-DATA-DMA", # Read Sector data over DMA. dma.sink.valid.eq(1), dma.sink.address.eq(self.base.storage[int(log2(dma_bytes)):] + count), If( dma.sink.valid & dma.sink.ready, NextValue(count, count + 1), If(count == (logical_sector_size // dma_bytes - 1), NextValue(count, 0), NextState("SEND-CMD-AND-DATA")))) fsm.act( "SEND-CMD-AND-DATA", # Send write command/data for 1 Sector. port.sink.valid.eq(1), port.sink.last.eq(count == (logical_sector_size // port_bytes - 1)), port.sink.write.eq(1), port.sink.sector.eq(self.sector.storage), port.sink.count.eq(1), port.sink.data.eq(reverse_bytes(conv.source.data)), If(port.sink.ready, conv.source.ready.eq(1), NextValue(count, count + 1), If(port.sink.last, NextState("WAIT-ACK"))), # Monitor errors port.source.ready.eq(1), If( port.source.valid & port.source.ready, If( port.source.failed, NextValue(self.error.status, 1), NextState("IDLE"), ))) fsm.act( "WAIT-ACK", port.source.ready.eq(1), If(port.source.valid, If( port.source.failed, NextValue(self.error.status, 1), ), NextState("IDLE")))
def __init__(self, platform, with_analyzer=True, with_loopback=False): sys_clk_freq = int(100e6) # SoCMini ---------------------------------------------------------------------------------- SoCMini.__init__(self, platform, sys_clk_freq, ident="PCIe Screamer", ident_version=True) # CRG -------------------------------------------------------------------------------------- self.submodules.crg = _CRG(platform, sys_clk_freq) # Serial Wishbone Bridge ------------------------------------------------------------------- self.submodules.bridge = UARTWishboneBridge(platform.request("serial"), sys_clk_freq, baudrate=3e6) self.add_wb_master(self.bridge.wishbone) # PCIe PHY --------------------------------------------------------------------------------- self.submodules.pcie_phy = S7PCIEPHY(platform, platform.request("pcie_x1")) self.add_csr("pcie_phy") # USB FT601 PHY ---------------------------------------------------------------------------- self.submodules.usb_phy = FT601Sync(platform.request("usb_fifo"), dw=32, timeout=1024) # USB Loopback ----------------------------------------------------------------------------- if with_loopback: self.submodules.usb_loopback_fifo = stream.SyncFIFO(phy_description(32), 2048) self.comb += [ self.usb_phy.source.connect(self.usb_loopback_fifo.sink), self.usb_loopback_fifo.source.connect(self.usb_phy.sink) ] # USB Core --------------------------------------------------------------------------------- else: self.submodules.usb_core = USBCore(self.usb_phy, sys_clk_freq) # USB <--> Wishbone -------------------------------------------------------------------- self.submodules.etherbone = Etherbone(self.usb_core, self.usb_map["wishbone"]) self.add_wb_master(self.etherbone.master.bus) # USB <--> TLP ------------------------------------------------------------------------- self.submodules.tlp = TLP(self.usb_core, self.usb_map["tlp"]) self.comb += [ self.pcie_phy.source.connect(self.tlp.sender.sink), self.tlp.receiver.source.connect(self.pcie_phy.sink) ] # Wishbone --> MSI ------------------------------------------------------------------------- self.submodules.msi = MSI() self.comb += self.msi.source.connect(self.pcie_phy.msi) self.add_csr("msi") # Led blink -------------------------------------------------------------------------------- usb_counter = Signal(32) self.sync.usb += usb_counter.eq(usb_counter + 1) self.comb += platform.request("user_led", 0).eq(usb_counter[26]) pcie_counter = Signal(32) self.sync.pcie += pcie_counter.eq(pcie_counter + 1) self.comb += platform.request("user_led", 1).eq(pcie_counter[26]) # Analyzer --------------------------------------------------------------------------------- if with_analyzer: analyzer_signals = [ self.pcie_phy.sink, self.pcie_phy.source, ] self.submodules.analyzer = LiteScopeAnalyzer(analyzer_signals, 1024, csr_csv="test/analyzer.csv") self.add_csr("analyzer")
def __init__(self, port, bus, endianness="little"): self.port = port self.bus = bus self.sector = CSRStorage(48) self.base = CSRStorage(64) self.start = CSR() self.done = CSRStatus() self.error = CSRStatus() # # # port_bytes = port.dw // 8 dma_bytes = bus.data_width // 8 count = Signal(max=logical_sector_size // dma_bytes) # Sector buffer buf = stream.SyncFIFO([("data", port.dw)], logical_sector_size // port_bytes) self.submodules.buf = buf # Converter conv = stream.Converter(nbits_from=port.dw, nbits_to=bus.data_width) self.submodules.conv = conv # Connect Port to Sector Buffer self.comb += port.source.connect( buf.sink, keep={"valid", "ready", "last", "data"}) # Connect Sector Buffer to Converter self.comb += buf.source.connect(conv.sink) # DMA dma = WishboneDMAWriter(bus, with_csr=False, endianness=endianness) self.submodules.dma = dma # Control FSM self.submodules.fsm = fsm = FSM(reset_state="IDLE") fsm.act( "IDLE", If(self.start.re, NextValue(count, 0), NextValue(self.error.status, 0), NextState("SEND-CMD")).Else(self.done.status.eq(1)), conv.source.ready.eq(1)) fsm.act( "SEND-CMD", # Send read command for 1 Sector. port.sink.valid.eq(1), port.sink.last.eq(1), port.sink.read.eq(1), port.sink.sector.eq(self.sector.storage), port.sink.count.eq(1), If(port.sink.ready, NextState("RECEIVE-DATA-DMA"))) fsm.act( "RECEIVE-DATA-DMA", # Connect Converter to DMA. dma.sink.valid.eq(conv.source.valid), dma.sink.last.eq(conv.source.last), dma.sink.address.eq(self.base.storage[int(log2(dma_bytes)):] + count), dma.sink.data.eq(reverse_bytes(conv.source.data)), conv.source.ready.eq(dma.sink.ready), If(dma.sink.valid & dma.sink.ready, NextValue(count, count + 1), If(dma.sink.last, NextState("IDLE"))), # Monitor errors If( port.source.valid & port.source.ready, If( port.source.failed, NextValue(self.error.status, 1), NextState("IDLE"), )))
def __init__(self, pads, clk_freq, fifo_depth=8, read_time=128, write_time=128): dw = len(pads.data) self.clk_freq = clk_freq # timings tRD = self.ns(30) # RD# active pulse width (t4) tRDDataSetup = self.ns(14) # RD# to DATA (t3) tWRDataSetup = self.ns(5) # DATA to WR# active setup time (t8) tWR = self.ns(30) # WR# active pulse width (t10) tMultiReg = 2 # read fifo (FTDI --> SoC) read_fifo = stream.SyncFIFO(phy_description(dw), fifo_depth) # write fifo (SoC --> FTDI) write_fifo = stream.SyncFIFO(phy_description(dw), fifo_depth) self.submodules += read_fifo, write_fifo # sink / source interfaces self.sink = write_fifo.sink self.source = read_fifo.source # read / write arbitration wants_write = Signal() wants_read = Signal() txe_n = Signal() rxf_n = Signal() self.specials += [ MultiReg(pads.txe_n, txe_n), MultiReg(pads.rxf_n, rxf_n) ] self.comb += [ wants_write.eq(~txe_n & write_fifo.source.valid), wants_read.eq(~rxf_n & read_fifo.sink.ready), ] read_time_en, max_read_time = anti_starvation(self, read_time) write_time_en, max_write_time = anti_starvation(self, write_time) fsm = FSM(reset_state="READ") self.submodules += fsm read_done = Signal() write_done = Signal() commuting = Signal() fsm.act( "READ", read_time_en.eq(1), If( wants_write & read_done, If(~wants_read | max_read_time, commuting.eq(1), NextState("RTW")))) fsm.act("RTW", NextState("WRITE")) fsm.act( "WRITE", write_time_en.eq(1), If( wants_read & write_done, If(~wants_write | max_write_time, commuting.eq(1), NextState("WTR")))) fsm.act("WTR", NextState("READ")) # databus tristate data_w = Signal(dw) data_r_async = Signal(dw) data_r = Signal(dw) data_oe = Signal() self.specials += [ Tristate(pads.data, data_w, data_oe, data_r_async), MultiReg(data_r_async, data_r) ] # read actions pads.rd_n.reset = 1 read_fsm = FSM(reset_state="IDLE") self.submodules += read_fsm read_counter = Signal(8) read_fsm.act( "IDLE", read_done.eq(1), NextValue(read_counter, 0), If( fsm.ongoing("READ") & wants_read, If(~commuting, NextState("PULSE_RD_N")))) read_fsm.act( "PULSE_RD_N", pads.rd_n.eq(0), NextValue(read_counter, read_counter + 1), If(read_counter == max(tRD - 1, tRDDataSetup + tMultiReg - 1), NextState("ACQUIRE_DATA"))) read_fsm.act("ACQUIRE_DATA", read_fifo.sink.valid.eq(1), read_fifo.sink.data.eq(data_r), NextState("WAIT_RXF_N")) read_fsm.act("WAIT_RXF_N", If(rxf_n, NextState("IDLE"))) # write actions pads.wr_n.reset = 1 write_fsm = FSM(reset_state="IDLE") self.submodules += write_fsm write_counter = Signal(8) write_fsm.act( "IDLE", write_done.eq(1), NextValue(write_counter, 0), If( fsm.ongoing("WRITE") & wants_write, If(~commuting, NextState("SET_DATA")))) write_fsm.act( "SET_DATA", data_oe.eq(1), data_w.eq(write_fifo.source.data), NextValue(write_counter, write_counter + 1), If(write_counter == (tWRDataSetup - 1), NextValue(write_counter, 0), NextState("PULSE_WR_N"))) write_fsm.act("PULSE_WR_N", data_oe.eq(1), data_w.eq(write_fifo.source.data), pads.wr_n.eq(0), NextValue(write_counter, write_counter + 1), If(write_counter == (tWR - 1), NextState("WAIT_TXE_N"))) write_fsm.act( "WAIT_TXE_N", If(txe_n, write_fifo.source.ready.eq(1), NextState("IDLE")))
def __init__(self, platform, with_endpoint=False): self.reset = Signal() self.sink = sink = stream.Endpoint([("data", 32), ("ctrl", 4)]) self.source = source = stream.Endpoint([("data", 32), ("ctrl", 4)]) # # # # Artificial after reset delay from LT_POLLING_IDLE to LT_POLLING_U0 u0_timer = WaitTimer(32) u0_timer = ResetInserter()(u0_timer) self.submodules += u0_timer self.comb += u0_timer.wait.eq(1) self.comb += u0_timer.reset.eq(self.reset) LT_POLLING_IDLE = 15 LT_POLLING_U0 = 16 ltssm_state = Signal(5) self.comb += [ If(~u0_timer.done, ltssm_state.eq(LT_POLLING_IDLE)).Else( ltssm_state.eq(LT_POLLING_U0)) ] # RX (Sink) -------------------------------------------------------------------------------- aligner = RXWordAligner( check_ctrl_only=True) # FIXME: can we avoid alignment here? self.submodules.aligner = aligner self.comb += sink.connect(aligner.sink) in_data = Signal(32) in_datak = Signal(4) in_active = Signal() self.comb += [ aligner.source.ready.eq(1), # Always ready in_data.eq(aligner.source.data), in_datak.eq(aligner.source.ctrl), in_active.eq(aligner.source.valid), ] # TX (Source) ------------------------------------------------------------------------------ # Daisho core does not support back-pressure (ready signal of LiteX's streams). To accomodate # that, we use a FIFO that absorbs the data bursts from the core and re-transmits datas to # the USB3 Pipe at the maximum allowed rate with back-pressure. This is a hack for our tests # and should be fixed correctly in the core. # FIFO out_fifo = stream.SyncFIFO([("data", 32), ("ctrl", 4)], 128) self.submodules += out_fifo # Map core signals to stream, re-generate first/last delimiters from active signal. out_data = Signal(32) out_datak = Signal(4) out_stall = Signal() out_active = Signal() out_active_d = Signal() self.comb += out_fifo.sink.valid.eq(out_active_d) self.sync += [ out_fifo.sink.data.eq(out_data), out_fifo.sink.ctrl.eq(out_datak), out_active_d.eq(out_active), out_fifo.sink.first.eq(out_active & ~out_active_d), ] self.comb += out_fifo.sink.last.eq(~out_active & out_active_d) # Connect FIFO to source. self.comb += [ If(out_fifo.source.valid, out_fifo.source.connect(source)).Else( source.valid.eq(1), source.first.eq(0), source.last.eq(0), source.data.eq(0), source.ctrl.eq(0), ) ] # Daisho USB3 core ------------------------------------------------------------------------- usb3_top_params = dict( i_clk=ClockSignal(), i_reset_n=~self.reset, i_ltssm_state=ltssm_state, i_in_data=aligner.source.data, i_in_datak=aligner.source.ctrl, i_in_active=aligner.source.valid, o_out_data=out_data, o_out_datak=out_datak, o_out_active=out_active, i_out_stall=0, # FIXME ) # Daisho USB3 core endpoinst --------------------------------------------------------------- if with_endpoint: self.submodules.usb3_control = usb3_control = USB3CoreControl() usb3_top_params.update( i_buf_in_addr=usb3_control.buf_in_addr, i_buf_in_data=usb3_control.buf_in_data, i_buf_in_wren=usb3_control.buf_in_wren, o_buf_in_request=usb3_control.buf_in_request, o_buf_in_ready=usb3_control.buf_in_ready, i_buf_in_commit=usb3_control.buf_in_commit, i_buf_in_commit_len=usb3_control.buf_in_commit_len, o_buf_in_commit_ack=usb3_control.buf_in_commit_ack, i_buf_out_addr=usb3_control.buf_out_addr, o_buf_out_q=usb3_control.buf_out_q, o_buf_out_len=usb3_control.buf_out_len, o_buf_out_hasdata=usb3_control.buf_out_hasdata, i_buf_out_arm=usb3_control.buf_out_arm, o_buf_out_arm_ack=usb3_control.buf_out_arm_ack, ) # Daisho USB3 instance --------------------------------------------------------------------- self.specials += Instance("usb3_top_usb3_pipe", **usb3_top_params) daisho_path = os.path.join(os.path.abspath(os.path.dirname(__file__)), "daisho") platform.add_verilog_include_path(os.path.join(daisho_path)) platform.add_verilog_include_path(os.path.join(daisho_path, "usb3")) platform.add_source_dir(os.path.join(daisho_path, "usb3"))
def __init__(self, pads, clk_freq, fifo_depth=8, read_time=128, write_time=128): dw = len(pads.data) # read fifo (FTDI --> SoC) read_fifo = stream.AsyncFIFO(phy_description(dw), fifo_depth) read_fifo = ClockDomainsRenamer({ "write": "usb", "read": "sys" })(read_fifo) read_buffer = stream.SyncFIFO(phy_description(dw), 4) read_buffer = ClockDomainsRenamer("usb")(read_buffer) self.comb += read_buffer.source.connect(read_fifo.sink) self.submodules += read_fifo, read_buffer # write fifo (SoC --> FTDI) write_fifo = stream.AsyncFIFO(phy_description(dw), fifo_depth) write_fifo = ClockDomainsRenamer({ "write": "sys", "read": "usb" })(write_fifo) self.submodules += write_fifo # sink / source interfaces self.sink = write_fifo.sink self.source = read_fifo.source # read / write arbitration wants_write = Signal() wants_read = Signal() txe_n = Signal() rxf_n = Signal() self.comb += [ txe_n.eq(pads.txe_n), rxf_n.eq(pads.rxf_n), wants_write.eq(~txe_n & write_fifo.source.valid), wants_read.eq(~rxf_n & read_fifo.sink.ready), ] read_time_en, max_read_time = anti_starvation(self, read_time) write_time_en, max_write_time = anti_starvation(self, write_time) data_w_accepted = Signal(reset=1) fsm = FSM(reset_state="READ") self.submodules += ClockDomainsRenamer("usb")(fsm) fsm.act( "READ", read_time_en.eq(1), If(wants_write, If(~wants_read | max_read_time, NextState("RTW")))) fsm.act("RTW", NextState("WRITE")) fsm.act( "WRITE", write_time_en.eq(1), If(wants_read, If(~wants_write | max_write_time, NextState("WTR"))), write_fifo.source.ready.eq(wants_write & data_w_accepted)) fsm.act("WTR", NextState("READ")) # databus tristate data_w = Signal(dw) data_r = Signal(dw) data_oe = Signal() self.specials += Tristate(pads.data, data_w, data_oe, data_r) # read / write actions pads.oe_n.reset = 1 pads.rd_n.reset = 1 pads.wr_n.reset = 1 self.sync.usb += [ If(fsm.ongoing("READ"), data_oe.eq(0), pads.oe_n.eq(0), pads.rd_n.eq(~wants_read), pads.wr_n.eq(1)).Elif(fsm.ongoing("WRITE"), data_oe.eq(1), pads.oe_n.eq(1), pads.rd_n.eq(1), pads.wr_n.eq(~wants_write), data_w_accepted.eq(~txe_n)).Else( data_oe.eq(1), pads.oe_n.eq(~fsm.ongoing("WTR")), pads.rd_n.eq(1), pads.wr_n.eq(1)), read_buffer.sink.valid.eq(~pads.rd_n & ~rxf_n), read_buffer.sink.data.eq(data_r), If(~txe_n & data_w_accepted, data_w.eq(write_fifo.source.data)) ]
def __init__(self, axi, port, buffer_depth, base_address): assert axi.address_width >= log2_int(base_address) assert axi.data_width == port.data_width self.cmd_request = Signal() self.cmd_grant = Signal() # # # can_read = Signal() ashift = log2_int(port.data_width // 8) # Burst to Beat ---------------------------------------------------------------------------- ar_buffer = stream.Buffer( ax_description(axi.address_width, axi.id_width)) self.submodules += ar_buffer self.comb += axi.ar.connect(ar_buffer.sink) ar = stream.Endpoint(ax_description(axi.address_width, axi.id_width)) ar_burst2beat = AXIBurst2Beat(ar_buffer.source, ar) self.submodules.ar_burst2beat = ar_burst2beat # Read buffer ------------------------------------------------------------------------------ r_buffer = stream.SyncFIFO(r_description(axi.data_width, axi.id_width), buffer_depth, buffered=True) self.submodules.r_buffer = r_buffer # Read Buffer reservation ------------------------------------------------------------------ # - Incremented when data is planned to be queued # - Decremented when data is dequeued r_buffer_queue = Signal() r_buffer_dequeue = Signal() r_buffer_level = Signal(max=buffer_depth + 1) self.comb += [ r_buffer_queue.eq(port.cmd.valid & port.cmd.ready & ~port.cmd.we), r_buffer_dequeue.eq(r_buffer.source.valid & r_buffer.source.ready) ] self.sync += [ If(r_buffer_queue, If(~r_buffer_dequeue, r_buffer_level.eq(r_buffer_level + 1))).Elif( r_buffer_dequeue, r_buffer_level.eq(r_buffer_level - 1)) ] self.comb += can_read.eq(r_buffer_level != buffer_depth) # Read ID Buffer --------------------------------------------------------------------------- id_buffer = stream.SyncFIFO([("id", axi.id_width)], buffer_depth) self.submodules += id_buffer self.comb += [ id_buffer.sink.valid.eq(ar.valid & ar.ready), id_buffer.sink.last.eq(ar.last), id_buffer.sink.id.eq(ar.id), axi.r.last.eq(id_buffer.source.last), axi.r.id.eq(id_buffer.source.id), id_buffer.source.ready.eq(axi.r.valid & axi.r.ready) ] # Command ---------------------------------------------------------------------------------- self.comb += [ self.cmd_request.eq(ar.valid & can_read), If(self.cmd_grant, port.cmd.valid.eq(ar.valid & can_read), ar.ready.eq(port.cmd.ready & can_read), port.cmd.we.eq(0), port.cmd.addr.eq((ar.addr - base_address) >> ashift)) ] # Read data -------------------------------------------------------------------------------- self.comb += [ port.rdata.connect(r_buffer.sink, omit={"bank"}), r_buffer.source.connect(axi.r, omit={"id", "last"}), axi.r.resp.eq(RESP_OKAY) ]
def __init__(self, n, address_width, address_align, nranks, settings): self.req = req = Record(cmd_layout(address_width)) self.refresh_req = refresh_req = Signal() self.refresh_gnt = refresh_gnt = Signal() a = settings.geom.addressbits ba = settings.geom.bankbits + log2_int(nranks) self.cmd = cmd = stream.Endpoint(cmd_request_rw_layout(a, ba)) # # # auto_precharge = Signal() # Command buffer --------------------------------------------------------------------------- cmd_buffer_layout = [("we", 1), ("addr", len(req.addr))] cmd_buffer_lookahead = stream.SyncFIFO( cmd_buffer_layout, settings.cmd_buffer_depth, buffered=settings.cmd_buffer_buffered) cmd_buffer = stream.Buffer( cmd_buffer_layout) # 1 depth buffer to detect row change self.submodules += cmd_buffer_lookahead, cmd_buffer self.comb += [ req.connect(cmd_buffer_lookahead.sink, keep={"valid", "ready", "we", "addr"}), cmd_buffer_lookahead.source.connect(cmd_buffer.sink), cmd_buffer.source.ready.eq(req.wdata_ready | req.rdata_valid), req.lock.eq(cmd_buffer_lookahead.source.valid | cmd_buffer.source.valid), ] slicer = _AddressSlicer(settings.geom.colbits, address_align) # Row tracking ----------------------------------------------------------------------------- row = Signal(settings.geom.rowbits) row_opened = Signal() row_hit = Signal() row_open = Signal() row_close = Signal() self.comb += row_hit.eq(row == slicer.row(cmd_buffer.source.addr)) self.sync += \ If(row_close, row_opened.eq(0) ).Elif(row_open, row_opened.eq(1), row.eq(slicer.row(cmd_buffer.source.addr)) ) # Address generation ----------------------------------------------------------------------- row_col_n_addr_sel = Signal() self.comb += [ cmd.ba.eq(n), If(row_col_n_addr_sel, cmd.a.eq(slicer.row(cmd_buffer.source.addr))).Else( cmd.a.eq((auto_precharge << 10) | slicer.col(cmd_buffer.source.addr))) ] # tWTP (write-to-precharge) controller ----------------------------------------------------- write_latency = math.ceil(settings.phy.cwl / settings.phy.nphases) precharge_time = write_latency + settings.timing.tWR + settings.timing.tCCD # AL=0 self.submodules.twtpcon = twtpcon = tXXDController(precharge_time) self.comb += twtpcon.valid.eq(cmd.valid & cmd.ready & cmd.is_write) # tRC (activate-activate) controller ------------------------------------------------------- self.submodules.trccon = trccon = tXXDController(settings.timing.tRC) self.comb += trccon.valid.eq(cmd.valid & cmd.ready & row_open) # tRAS (activate-precharge) controller ----------------------------------------------------- self.submodules.trascon = trascon = tXXDController( settings.timing.tRAS) self.comb += trascon.valid.eq(cmd.valid & cmd.ready & row_open) # Auto Precharge generation ---------------------------------------------------------------- # generate auto precharge when current and next cmds are to different rows if settings.with_auto_precharge: self.comb += \ If(cmd_buffer_lookahead.source.valid & cmd_buffer.source.valid, If(slicer.row(cmd_buffer_lookahead.source.addr) != slicer.row(cmd_buffer.source.addr), auto_precharge.eq(row_close == 0) ) ) # Control and command generation FSM ------------------------------------------------------- # Note: tRRD, tFAW, tCCD, tWTR timings are enforced by the multiplexer self.submodules.fsm = fsm = FSM() fsm.act( "REGULAR", If(refresh_req, NextState("REFRESH")).Elif( cmd_buffer.source.valid, If( row_opened, If( row_hit, cmd.valid.eq(1), If( cmd_buffer.source.we, req.wdata_ready.eq(cmd.ready), cmd.is_write.eq(1), cmd.we.eq(1), ).Else(req.rdata_valid.eq(cmd.ready), cmd.is_read.eq(1)), cmd.cas.eq(1), If(cmd.ready & auto_precharge, NextState("AUTOPRECHARGE"))). Else( # row_opened & ~row_hit NextState("PRECHARGE"))).Else( # ~row_opened NextState("ACTIVATE")))) fsm.act( "PRECHARGE", # Note: we are presenting the column address, A10 is always low If(twtpcon.ready & trascon.ready, cmd.valid.eq(1), If(cmd.ready, NextState("TRP")), cmd.ras.eq(1), cmd.we.eq(1), cmd.is_cmd.eq(1)), row_close.eq(1)) fsm.act("AUTOPRECHARGE", If(twtpcon.ready & trascon.ready, NextState("TRP")), row_close.eq(1)) fsm.act( "ACTIVATE", If(trccon.ready, row_col_n_addr_sel.eq(1), row_open.eq(1), cmd.valid.eq(1), cmd.is_cmd.eq(1), If(cmd.ready, NextState("TRCD")), cmd.ras.eq(1))) fsm.act("REFRESH", If( twtpcon.ready, refresh_gnt.eq(1), ), row_close.eq(1), cmd.is_cmd.eq(1), If(~refresh_req, NextState("REGULAR"))) fsm.delayed_enter("TRP", "ACTIVATE", settings.timing.tRP - 1) fsm.delayed_enter("TRCD", "REGULAR", settings.timing.tRCD - 1)
def __init__(self, buffer_depth=4): self.sink = sink = stream.Endpoint(etherbone_record_description(32)) self.source = source = stream.Endpoint(etherbone_mmap_description(32)) # # # fifo = stream.SyncFIFO(etherbone_record_description(32), buffer_depth, buffered=True) self.submodules += fifo self.comb += sink.connect(fifo.sink) base_addr = Signal(32) base_addr_update = Signal() self.sync += If(base_addr_update, base_addr.eq(fifo.source.data)) counter = Signal(max=512) counter_reset = Signal() counter_ce = Signal() self.sync += \ If(counter_reset, counter.eq(0) ).Elif(counter_ce, counter.eq(counter + 1) ) self.submodules.fsm = fsm = FSM(reset_state="IDLE") fsm.act( "IDLE", fifo.source.ready.eq(1), counter_reset.eq(1), If( fifo.source.valid, base_addr_update.eq(1), If(fifo.source.wcount, NextState("RECEIVE_WRITES")).Elif( fifo.source.rcount, NextState("RECEIVE_READS")))) fsm.act( "RECEIVE_WRITES", source.valid.eq(fifo.source.valid), source.last.eq(counter == fifo.source.wcount - 1), source.count.eq(fifo.source.wcount), source.be.eq(fifo.source.byte_enable), source.addr.eq(base_addr[2:] + counter), source.we.eq(1), source.data.eq(fifo.source.data), fifo.source.ready.eq(source.ready), If( source.valid & source.ready, counter_ce.eq(1), If( source.last, If(fifo.source.rcount, NextState("RECEIVE_BASE_RET_ADDR")).Else( NextState("IDLE"))))) fsm.act( "RECEIVE_BASE_RET_ADDR", counter_reset.eq(1), If(fifo.source.valid, base_addr_update.eq(1), NextState("RECEIVE_READS"))) fsm.act( "RECEIVE_READS", source.valid.eq(fifo.source.valid), source.last.eq(counter == fifo.source.rcount - 1), source.count.eq(fifo.source.rcount), source.base_addr.eq(base_addr), source.addr.eq(fifo.source.data[2:]), fifo.source.ready.eq(source.ready), If(source.valid & source.ready, counter_ce.eq(1), If(source.last, NextState("IDLE"))))
def __init__(self, pads): self.sink = sink = stream.Endpoint([('data', 8)]) self.source = source = stream.Endpoint([('data', 8), ('cmd', 1)]) self.reset = CSRStorage(reset=1) self.rx_count_reset = CSR() self.rx_count = CSRStatus(32) self.tx_count_reset = CSR() self.tx_count = CSRStatus(32) # # # self.submodules.tx_fifo = tx_fifo = stream.SyncFIFO( self.sink.description, 4) self.submodules.rx_fifo = rx_fifo = stream.SyncFIFO( self.source.description, 4) self.comb += [ sink.connect(tx_fifo.sink), rx_fifo.source.connect(source), ] self.sync += [ # rx count If(self.rx_count_reset.re, self.rx_count.status.eq(0)).Elif( source.valid, # & source.ready not needed self.rx_count.status.eq(self.rx_count.status + 1)), # tx count If(self.tx_count_reset.re, self.tx_count.status.eq(0)).Elif( sink.valid & sink.ready, self.tx_count.status.eq(self.tx_count.status + 1)) ] self.data_t = TSTriple(8) self.specials += self.data_t.get_tristate(pads.data) last = Signal() odir = Signal() data_i = Signal(8) for i in range(8): self.specials += Instance("IDDR", p_DDR_CLK_EDGE="SAME_EDGE", p_INIT_Q1=0, p_INIT_Q2=0, p_SRTYPE="ASYNC", i_C=ClockSignal("sys"), i_CE=1, i_S=0, i_R=0, i_D=self.data_t.i[i], o_Q1=Signal(), o_Q2=data_i[i]) if hasattr(pads, "rst"): self.comb += pads.rst.eq(self.reset.storage) if hasattr(pads, "rst_n"): self.comb += pads.rst_n.eq(~self.reset.storage) self.comb += [ self.data_t.oe.eq(~odir), If( tx_fifo.source.valid, self.data_t.o.eq(tx_fifo.source.data), ).Else(self.data_t.o.eq(0)), tx_fifo.source.ready.eq(~pads.dir & pads.nxt), If( ~pads.dir, pads.stp.eq(last), ), rx_fifo.sink.last.eq(odir & ~pads.dir), rx_fifo.sink.data.eq(data_i), ] self.sync += [ If( pads.nxt, last.eq(tx_fifo.source.last), ).Else(last.eq(0)), odir.eq(pads.dir), rx_fifo.sink.cmd.eq(~pads.nxt), rx_fifo.sink.valid.eq(odir & pads.dir) ]
def __init__(self, hres=640, vres=480, with_csi_interpreter=True): self.enable = Signal(reset=1) self.vtg_sink = vtg_sink = stream.Endpoint(video_timing_layout) self.uart_sink = uart_sink = stream.Endpoint([("data", 8)]) self.source = source = stream.Endpoint(video_data_layout) # # # # Font Mem. # --------- os.system("wget https://github.com/enjoy-digital/litex/files/6076336/ter-u16b.txt") # FIXME: Store Font in LiteX? os.system("mv ter-u16b.txt ter-u16b.bdf") font = import_bdf_font("ter-u16b.bdf") font_width = 8 font_heigth = 16 font_mem = Memory(width=8, depth=4096, init=font) font_rdport = font_mem.get_port(write_capable=False, has_re=True) self.specials += font_mem, font_rdport # Terminal Mem. # ------------- term_colums = 128 # 80 rounded to next power of two. term_lines = math.floor(vres/font_heigth) term_depth = term_colums * term_lines term_init = [ord(c) for c in [" "]*term_colums*term_lines] term_mem = Memory(width=font_width, depth=term_depth, init=term_init) term_wrport = term_mem.get_port(write_capable=True) term_rdport = term_mem.get_port(write_capable=False, has_re=True) self.specials += term_mem, term_wrport, term_rdport # UART Terminal Fill. # ------------------- # Optional CSI Interpreter. if with_csi_interpreter: self.submodules.csi_interpreter = CSIInterpreter() self.comb += uart_sink.connect(self.csi_interpreter.sink) uart_sink = self.csi_interpreter.source self.submodules.uart_fifo = stream.SyncFIFO([("data", 8)], 8) self.comb += uart_sink.connect(self.uart_fifo.sink) uart_sink = self.uart_fifo.source # UART Reception and Terminal Fill. x_term = term_wrport.adr[:7] y_term = term_wrport.adr[7:] y_term_rollover = Signal() self.submodules.uart_fsm = uart_fsm = FSM(reset_state="IDLE") uart_fsm.act("IDLE", If(uart_sink.valid, If(uart_sink.data == ord("\n"), uart_sink.ready.eq(1), # Ack sink. NextState("INCR-Y") ).Elif(uart_sink.data == ord("\r"), uart_sink.ready.eq(1), # Ack sink. NextState("RST-X") ).Else( NextState("WRITE") ) ) ) uart_fsm.act("WRITE", uart_sink.ready.eq(1), term_wrport.we.eq(1), term_wrport.dat_w.eq(uart_sink.data), NextState("INCR-X") ) uart_fsm.act("RST-X", NextValue(x_term, 0), NextState("IDLE") ) uart_fsm.act("INCR-X", NextValue(x_term, x_term + 1), NextState("IDLE"), If(x_term == (80 - 1), NextValue(x_term, 0), NextState("INCR-Y") ) ) uart_fsm.act("RST-Y", NextValue(y_term, 0), NextState("CLEAR-X") ) uart_fsm.act("INCR-Y", NextValue(y_term, y_term + 1), NextState("CLEAR-X"), If(y_term == (term_lines - 1), NextValue(y_term_rollover, 1), NextState("RST-Y") ) ) uart_fsm.act("CLEAR-X", NextValue(x_term, x_term + 1), term_wrport.we.eq(1), term_wrport.dat_w.eq(ord(" ")), If(x_term == (80 - 1), NextValue(x_term, 0), NextState("IDLE") ) ) # Video Generation. # ----------------- ce = (vtg_sink.valid & vtg_sink.ready) # Timing delay line. latency = 2 timing_bufs = [stream.Buffer(video_timing_layout) for i in range(latency)] self.comb += vtg_sink.connect(timing_bufs[0].sink) for i in range(len(timing_bufs) - 1): self.comb += timing_bufs[i].source.connect(timing_bufs[i+1].sink) self.comb += timing_bufs[-1].source.connect(source, keep={"valid", "ready", "last", "de", "hsync", "vsync"}) self.submodules += timing_bufs # Compute X/Y position. x = vtg_sink.hcount[int(math.log2(font_width)):] y = vtg_sink.vcount[int(math.log2(font_heigth)):] y_rollover = Signal(8) self.comb += [ If(~y_term_rollover, y_rollover.eq(y) ).Else( # FIXME: Use Modulo. If((y + y_term + 1) >= term_lines, y_rollover.eq(y + y_term + 1 - term_lines) ).Else( y_rollover.eq(y + y_term + 1) ), ) ] # Get character from Terminal Mem. term_dat_r = Signal(font_width) self.comb += term_rdport.re.eq(ce) self.comb += term_rdport.adr.eq(x + y_rollover*term_colums) self.comb += [ term_dat_r.eq(term_rdport.dat_r), If((x >= 80) | (y >= term_lines), term_dat_r.eq(ord(" ")), # Out of range, generate space. ) ] # Translate character to video data through Font Mem. self.comb += font_rdport.re.eq(ce) self.comb += font_rdport.adr.eq(term_dat_r*font_heigth + timing_bufs[0].source.vcount[:4]) bit = Signal() cases = {} for i in range(font_width): cases[i] = [bit.eq(font_rdport.dat_r[font_width-1-i])] self.comb += Case(timing_bufs[1].source.hcount[:int(math.log2(font_width))], cases) # FIXME: Allow static/dynamic Font color. self.comb += If(bit, source.r.eq(0xff), source.g.eq(0xff), source.b.eq(0xff), ).Else( source.r.eq(0x00), source.g.eq(0x00), source.b.eq(0x00) )
def __init__(self, dram_port, pattern_mem): super().__init__(pattern_mem) self.doc = ModuleDoc(""" DMA DRAM reader. Allows to check DRAM contents against a predefined pattern using DMA. Pattern ------- {common} Reading errors -------------- This module allows to check the locations of errors in the memory. It scans the configured memory area and compares the values read to the predefined pattern. If `skip_fifo` is 0, this module will stop after each error encountered, so that it can be examined. Wait until the `error_ready` CSR is 1. Then use the CSRs `error_offset`, `error_data` and `error_expected` to examine the errors in the current transfer. To continue reading, write 1 to `error_continue` CSR. Setting `skip_fifo` to 1 will disable this behaviour entirely. The final nubmer of errors can be read from `error_count`. NOTE: This value represents the number of erroneous *DMA transfers*. The current progress can be read from the `done` CSR. """.format(common=BISTModule.__doc__)) error_desc = [ ('offset', 32), ('data', dram_port.data_width), ('expected', dram_port.data_width), ] self.error_count = Signal(32) self.skip_fifo = Signal() self.error = stream.Endpoint(error_desc) # FIXME: Increase fifo depth dma = LiteDRAMDMAReader(dram_port) self.submodules += dma # ----------------- Address FSM ----------------- counter_addr = Signal(32) self.comb += [ self.addr_port.adr.eq(counter_addr & self.data_mask), dma.sink.address.eq(self.addr_port.dat_r + (counter_addr & self.mem_mask)), ] # Using temporary state 'WAIT' to obtain address offset from memory self.submodules.fsm_addr = fsm_addr = FSM() fsm_addr.act( "READY", If( self.start, NextValue(counter_addr, 0), NextState("WAIT"), )) fsm_addr.act( "WAIT", # TODO: we could pipeline the access If(counter_addr >= self.count, NextState("READY")).Else(NextState("WR_ADDR"))) fsm_addr.act( "WR_ADDR", dma.sink.valid.eq(1), If(dma.sink.ready, NextValue(counter_addr, counter_addr + 1), NextState("WAIT"))) # ------------- Pattern FSM ---------------- counter_gen = Signal(32) # Unmatched memory offsets error_fifo = stream.SyncFIFO(error_desc, depth=2, buffered=False) self.submodules += error_fifo self.comb += [ self.data_port.adr.eq(counter_gen & self.data_mask), self.error.offset.eq(error_fifo.source.offset), self.error.data.eq(error_fifo.source.data), self.error.expected.eq(error_fifo.source.expected), self.error.valid.eq(error_fifo.source.valid), error_fifo.source.ready.eq(self.error.ready | self.skip_fifo), self.done.eq(counter_gen), ] self.submodules.fsm_pattern = fsm_pattern = FSM() fsm_pattern.act( "READY", self.ready.eq(1), If( self.start, NextValue(counter_gen, 0), NextValue(self.error_count, 0), NextState("WAIT"), )) fsm_pattern.act( "WAIT", # TODO: we could pipeline the access If(counter_gen >= self.count, NextState("READY")).Else(NextState("RD_DATA"))) fsm_pattern.act( "RD_DATA", dma.source.ready.eq(1), If( dma.source.valid, NextValue(counter_gen, counter_gen + 1), If( dma.source.data != self.data_port.dat_r, NextValue(self.error_count, self.error_count + 1), NextValue(error_fifo.sink.offset, counter_gen), NextValue(error_fifo.sink.data, dma.source.data), NextValue(error_fifo.sink.expected, self.data_port.dat_r), If(self.skip_fifo, NextState("WAIT")).Else( NextState("WR_ERR"))).Else(NextState("WAIT")))) fsm_pattern.act( "WR_ERR", error_fifo.sink.valid.eq(1), If(error_fifo.sink.ready | self.skip_fifo, NextState("WAIT")))
def __init__(self, port_from, port_to, reverse=False): assert port_from.clock_domain == port_to.clock_domain assert port_from.data_width < port_to.data_width assert port_from.mode == port_to.mode if port_to.data_width % port_from.data_width: raise ValueError("Ratio must be an int") # # # ratio = port_to.data_width // port_from.data_width mode = port_from.mode # Command ---------------------------------------------------------------------------------- # Defines cmd type and the chunks that have been requested for the current port_to command. sel = Signal(ratio) cmd_buffer = stream.SyncFIFO([("sel", ratio), ("we", 1)], 0) self.submodules += cmd_buffer # Store last received command. cmd_addr = Signal.like(port_from.cmd.addr) cmd_we = Signal() cmd_last = Signal() # Indicates that we need to proceed to the next port_to command. next_cmd = Signal() addr_changed = Signal() # Signals that indicate that write/read convertion has finished. wdata_finished = Signal() rdata_finished = Signal() # Used to prevent reading old memory value if previous command has written the same address. read_lock = Signal() read_unlocked = Signal() rw_collision = Signal() # Different order depending on read/write: # - read: new -> cmd -> fill -> commit -> new # - write: new -> fill -> commit -> cmd -> new # For writes we have to send the command at the end to prevent situations when, during # a burst, LiteDRAM expects data (wdata_ready=1) but write converter is still converting. self.submodules.fsm = fsm = FSM() fsm.act( "NEW", port_from.cmd.ready.eq(port_from.cmd.valid & ~read_lock), If( port_from.cmd.ready, NextValue(cmd_addr, port_from.cmd.addr), NextValue(cmd_we, port_from.cmd.we), NextValue(cmd_last, port_from.cmd.last), NextValue(sel, 1 << port_from.cmd.addr[:log2_int(ratio)]), If( port_from.cmd.we, NextState("FILL"), ).Else(NextState("CMD"), ))) fsm.act( "CMD", port_to.cmd.valid.eq(1), port_to.cmd.we.eq(cmd_we), port_to.cmd.addr.eq(cmd_addr[log2_int(ratio):]), If(port_to.cmd.ready, If(cmd_we, NextState("NEW")).Else(NextState("FILL")))) fsm.act( "FILL", If(next_cmd, NextState("COMMIT")). Else( # Acknowledge incomming commands, while filling `sel`. port_from.cmd.ready.eq(port_from.cmd.valid), NextValue(cmd_last, port_from.cmd.last), If( port_from.cmd.valid, NextValue(sel, sel | 1 << port_from.cmd.addr[:log2_int(ratio)])))) fsm.act( "COMMIT", cmd_buffer.sink.valid.eq(1), cmd_buffer.sink.sel.eq(sel), cmd_buffer.sink.we.eq(cmd_we), If(cmd_buffer.sink.ready, If(cmd_we, NextState("CMD")).Else(NextState("NEW")))) self.comb += [ cmd_buffer.source.ready.eq(wdata_finished | rdata_finished), addr_changed.eq(cmd_addr[log2_int(ratio):] != port_from.cmd.addr[log2_int(ratio):]), # Collision happens on write to read transition when address does not change. rw_collision.eq(cmd_we & (port_from.cmd.valid & ~port_from.cmd.we) & ~addr_changed), # Go to the next command if one of the following happens: # - port_to address changes. # - cmd type changes. # - we received all the `ratio` commands. # - this is the last command in a sequence. # - master requests a flush (even after the command has been sent). next_cmd.eq(addr_changed | (cmd_we != port_from.cmd.we) | (sel == 2**ratio - 1) | cmd_last | port_from.flush), ] self.sync += [ # Block sending read command if we have just written to that address If( wdata_finished, read_lock.eq(0), read_unlocked.eq(1), ).Elif(rw_collision & ~port_to.cmd.valid & ~read_unlocked, read_lock.eq(1)), If(port_from.cmd.valid & port_from.cmd.ready, read_unlocked.eq(0)) ] # Read Datapath ---------------------------------------------------------------------------- if mode in ["read", "both"]: # Queue received data not to loose it when it comes too fast. rdata_fifo = stream.SyncFIFO(port_to.rdata.description, ratio - 1) rdata_converter = stream.StrideConverter( description_from=port_to.rdata.description, description_to=port_from.rdata.description, reverse=reverse) self.submodules += rdata_fifo, rdata_converter # Shift register with a bitmask of current chunk. rdata_chunk = Signal(ratio, reset=1) rdata_chunk_valid = Signal() self.sync += \ If(rdata_converter.source.valid & rdata_converter.source.ready, rdata_chunk.eq(Cat(rdata_chunk[ratio-1], rdata_chunk[:ratio-1])) ) self.comb += [ # port_to -> rdata_fifo -> rdata_converter -> port_from port_to.rdata.connect(rdata_fifo.sink), rdata_fifo.source.connect(rdata_converter.sink), rdata_chunk_valid.eq( (cmd_buffer.source.sel & rdata_chunk) != 0), If( cmd_buffer.source.valid & ~cmd_buffer.source.we, # If that chunk is valid we send it to the user port and wait for ready. If(rdata_chunk_valid, port_from.rdata.valid.eq(rdata_converter.source.valid), port_from.rdata.data.eq(rdata_converter.source.data), rdata_converter.source.ready.eq(port_from.rdata.ready)). Else( # If this chunk was not requested in `sel`, ignore it. rdata_converter.source.ready.eq(1)), rdata_finished.eq(rdata_converter.source.valid & rdata_converter.source.ready & rdata_chunk[ratio - 1])), ] # Write Datapath --------------------------------------------------------------------------- if mode in ["write", "both"]: # Queue write data not to miss it when the lower chunks haven't been reqested. wdata_fifo = stream.SyncFIFO(port_from.wdata.description, ratio - 1) wdata_buffer = stream.SyncFIFO(port_to.wdata.description, 1) wdata_converter = stream.StrideConverter( description_from=port_from.wdata.description, description_to=port_to.wdata.description, reverse=reverse) self.submodules += wdata_converter, wdata_fifo, wdata_buffer # Shift register with a bitmask of current chunk. wdata_chunk = Signal(ratio, reset=1) wdata_chunk_valid = Signal() self.sync += \ If(wdata_converter.sink.valid & wdata_converter.sink.ready, wdata_chunk.eq(Cat(wdata_chunk[ratio-1], wdata_chunk[:ratio-1])) ) # Replicate `sel` bits to match the width of port_to.wdata.we. wdata_sel = Signal.like(port_to.wdata.we) if reverse: wdata_sel_parts = [ Replicate(cmd_buffer.source.sel[i], port_to.wdata.we.nbits // sel.nbits) for i in reversed(range(ratio)) ] else: wdata_sel_parts = [ Replicate(cmd_buffer.source.sel[i], port_to.wdata.we.nbits // sel.nbits) for i in range(ratio) ] self.sync += \ If(cmd_buffer.source.valid & cmd_buffer.source.we & wdata_chunk[ratio - 1], wdata_sel.eq(Cat(wdata_sel_parts)) ) self.comb += [ # port_from -> wdata_fifo -> wdata_converter port_from.wdata.connect(wdata_fifo.sink), wdata_buffer.source.connect(port_to.wdata), wdata_chunk_valid.eq( (cmd_buffer.source.sel & wdata_chunk) != 0), If( cmd_buffer.source.valid & cmd_buffer.source.we, # When the current chunk is valid, read it from wdata_fifo. If( wdata_chunk_valid, wdata_converter.sink.valid.eq(wdata_fifo.source.valid), wdata_converter.sink.data.eq(wdata_fifo.source.data), wdata_converter.sink.we.eq(wdata_fifo.source.we), wdata_fifo.source.ready.eq(wdata_converter.sink.ready), ). Else( # If chunk is not valid, send any data and do not advance fifo. wdata_converter.sink.valid.eq(1), ), ), wdata_buffer.sink.valid.eq(wdata_converter.source.valid), wdata_buffer.sink.data.eq(wdata_converter.source.data), wdata_buffer.sink.we.eq(wdata_converter.source.we & wdata_sel), wdata_converter.source.ready.eq(wdata_buffer.sink.ready), wdata_finished.eq(wdata_converter.sink.valid & wdata_converter.sink.ready & wdata_chunk[ratio - 1]), ]
def __init__(self, port_from, port_to, reverse=False): assert port_from.clock_domain == port_to.clock_domain assert port_from.data_width < port_to.data_width assert port_from.mode == port_to.mode assert port_from.mode == "read" if port_to.data_width % port_from.data_width: raise ValueError("Ratio must be an int") # # # ratio = port_to.data_width // port_from.data_width # Command ---------------------------------------------------------------------------------- cmd_buffer = stream.SyncFIFO([("sel", ratio)], 4) self.submodules += cmd_buffer counter = Signal(max=ratio) counter_ce = Signal() self.sync += \ If(counter_ce, counter.eq(counter + 1) ) self.comb += \ If(port_from.cmd.valid, If(counter == 0, port_to.cmd.valid.eq(1), port_to.cmd.addr.eq(port_from.cmd.addr[log2_int(ratio):]), port_from.cmd.ready.eq(port_to.cmd.ready), counter_ce.eq(port_to.cmd.ready) ).Else( port_from.cmd.ready.eq(1), counter_ce.eq(1) ) ) # TODO: fix sel self.comb += \ If(port_to.cmd.valid & port_to.cmd.ready, cmd_buffer.sink.valid.eq(1), cmd_buffer.sink.sel.eq(2**ratio-1) ) # Datapath --------------------------------------------------------------------------------- rdata_buffer = stream.Buffer(port_to.rdata.description) rdata_converter = stream.StrideConverter(port_to.rdata.description, port_from.rdata.description, reverse=reverse) self.submodules += rdata_buffer, rdata_converter rdata_chunk = Signal(ratio, reset=1) rdata_chunk_valid = Signal() self.sync += \ If(rdata_converter.source.valid & rdata_converter.source.ready, rdata_chunk.eq(Cat(rdata_chunk[ratio-1], rdata_chunk[:ratio-1])) ) self.comb += [ port_to.rdata.connect(rdata_buffer.sink), rdata_buffer.source.connect(rdata_converter.sink), rdata_chunk_valid.eq((cmd_buffer.source.sel & rdata_chunk) != 0), If(port_from.flush, rdata_converter.source.ready.eq(1)).Elif( cmd_buffer.source.valid, If(rdata_chunk_valid, port_from.rdata.valid.eq(rdata_converter.source.valid), port_from.rdata.data.eq(rdata_converter.source.data), rdata_converter.source.ready.eq( port_from.rdata.ready)).Else( rdata_converter.source.ready.eq(1))), cmd_buffer.source.ready.eq(rdata_converter.source.ready & rdata_chunk[ratio - 1]) ]
def __init__(self, data_width, depth): self.sink = sink = stream.Endpoint(core_layout(data_width)) self.enable = CSRStorage() self.done = CSRStatus() self.length = CSRStorage(bits_for(depth)) self.offset = CSRStorage(bits_for(depth)) self.mem_valid = CSRStatus() self.mem_data = CSRStatus(data_width) # # # # Control re-synchronization enable = Signal() enable_d = Signal() self.specials += MultiReg(self.enable.storage, enable, "scope") self.sync.scope += enable_d.eq(enable) length = Signal().like(self.length.storage) offset = Signal().like(self.offset.storage) self.specials += MultiReg(self.length.storage, length, "scope") self.specials += MultiReg(self.offset.storage, offset, "scope") # Status re-synchronization done = Signal() self.specials += MultiReg(done, self.done.status) # Memory mem = stream.SyncFIFO([("data", data_width)], depth, buffered=True) mem = ClockDomainsRenamer("scope")(mem) cdc = stream.AsyncFIFO([("data", data_width)], 4) cdc = ClockDomainsRenamer({"write": "scope", "read": "sys"})(cdc) self.submodules += mem, cdc # Flush mem_flush = WaitTimer(depth) mem_flush = ClockDomainsRenamer("scope")(mem_flush) self.submodules += mem_flush # FSM fsm = FSM(reset_state="IDLE") fsm = ClockDomainsRenamer("scope")(fsm) self.submodules += fsm fsm.act("IDLE", done.eq(1), If(enable & ~enable_d, NextState("FLUSH")), sink.ready.eq(1), mem.source.connect(cdc.sink)) fsm.act("FLUSH", sink.ready.eq(1), mem_flush.wait.eq(1), mem.source.ready.eq(1), If(mem_flush.done, NextState("WAIT"))) fsm.act("WAIT", sink.connect(mem.sink, omit={"hit"}), If(sink.valid & sink.hit, NextState("RUN")), mem.source.ready.eq(mem.level >= offset)) fsm.act("RUN", sink.connect(mem.sink, omit={"hit"}), If( mem.level >= length, NextState("IDLE"), )) # Memory read self.comb += [ self.mem_valid.status.eq(cdc.source.valid), cdc.source.ready.eq(self.mem_data.we | ~self.enable.storage), self.mem_data.status.eq(cdc.source.data) ]
def __init__(self, crc_class, layout): self.sink = sink = stream.Endpoint(layout) self.source = source = stream.Endpoint(layout) self.busy = Signal() # # # dw = len(sink.data) crc = crc_class(dw) self.submodules += crc ratio = crc.width//dw error = Signal() fifo = ResetInserter()(stream.SyncFIFO(layout, ratio + 1)) self.submodules += fifo fsm = FSM(reset_state="RESET") self.submodules += fsm fifo_in = Signal() fifo_out = Signal() fifo_full = Signal() self.comb += [ fifo_full.eq(fifo.level == ratio), fifo_in.eq(sink.valid & (~fifo_full | fifo_out)), fifo_out.eq(source.valid & source.ready), sink.connect(fifo.sink), fifo.sink.valid.eq(fifo_in), self.sink.ready.eq(fifo_in), source.valid.eq(sink.valid & fifo_full), source.last.eq(sink.last), fifo.source.ready.eq(fifo_out), source.payload.eq(fifo.source.payload), source.error.eq(sink.error | crc.error), ] fsm.act("RESET", crc.reset.eq(1), fifo.reset.eq(1), NextState("IDLE"), ) fsm.act("IDLE", crc.data.eq(sink.data), If(sink.valid & sink.ready, crc.ce.eq(1), NextState("COPY") ) ) fsm.act("COPY", crc.data.eq(sink.data), If(sink.valid & sink.ready, crc.ce.eq(1), If(sink.last, NextState("RESET") ) ) ) self.comb += self.busy.eq(~fsm.ongoing("IDLE"))