def __init__(self, sys_clk_freq, baudrate, data_bits=8, stop_bits=1): # Module's interface self.tx_data = Signal(data_bits) self.tx_en = Signal() self.tx = Signal() self.submodules.tx_fifo = SyncFIFO(16, data_bits) # # # ce = Signal() n = sys_clk_freq / baudrate tx_n = Signal(max=data_bits + stop_bits) # enough to count to 10 counter_preload = int(n - 1) counter = Signal(max=int(n - 1)) # Combinatorial assignements self.comb += ce.eq(counter == 0) # Synchronous assignments self.sync += [ If(~self.tx_en, self.tx.eq(1)).Else( If( ce, If( tx_n == 0, tx_n.eq(tx_n + 1), self.tx.eq(0), ).Elif(tx_n == (data_bits + stop_bits), tx_n.eq(0), self.tx.eq(1), self.tx_en.eq(0)).Else( self.tx.eq(self.tx_data[0]), self.tx_data.eq(Cat(self.tx_data[1:], 0)), tx_n.eq(tx_n + 1)), counter.eq(counter_preload)).Else(counter.eq(counter - 1))) ]
def __init__(self, clk_freq, baud_rate): self.submodules.rxcore = RX(clk_freq, baud_rate) self.submodules.fifo = SyncFIFO(8, 1024) self.comb += [self.fifo.din.eq(self.rxcore.data)] self.submodules.fsm = FSM(reset_state='IDLE') self.fsm.act( 'IDLE', If( self.rxcore.ready, If( self.fifo.writable, self.fifo.we.eq(1), ), self.rxcore.ack.eq(1), NextState('READING'), ).Else(self.fifo.we.eq(0), )) self.fsm.act( 'READING', self.fifo.we.eq(0), self.rxcore.ack.eq(0), If( ~self.rxcore.ready, NextState('IDLE'), ), ) self.dout = self.fifo.dout self.re = self.fifo.re self.readable = self.fifo.readable self.rx = self.rxcore.rx self.io = {self.dout, self.re, self.readable, self.rx}
def __init__(self, inner, overflow_depth): super().__init__(inner.width, inner.depth) self.submodules.inner = inner self.dout = inner.dout self.re = inner.re self.readable = inner.readable ### overflow = SyncFIFO(inner.width, overflow_depth) self.submodules += overflow self.comb += [ If(overflow.readable, inner.din.eq(overflow.dout), inner.we.eq(1), overflow.re.eq(inner.writable) ), If(inner.writable & ~overflow.readable, inner.din.eq(self.din), inner.we.eq(self.we), self.writable.eq(inner.writable) ).Else( overflow.din.eq(self.din), overflow.we.eq(self.we), self.writable.eq(overflow.writable) ) ]
def __init__(self, inner, skid_depth): super().__init__(inner.width, inner.depth) self.submodules.inner = inner self.dout = inner.dout self.re = inner.re self.readable = inner.readable ### skid = SyncFIFO(inner.width, skid_depth) self.submodules += skid self.comb += [ If(skid.readable, inner.din.eq(skid.dout), inner.we.eq(1), skid.re.eq(inner.writable) ), If(inner.writable & ~skid.readable, inner.din.eq(self.din), inner.we.eq(self.we), self.writable.eq(inner.writable) ).Else( skid.din.eq(self.din), skid.we.eq(self.we), self.writable.eq(skid.writable) ) ]
def __init__(self, lasmim, fifo_depth=None): self.address_data = Sink([("a", lasmim.aw), ("d", lasmim.dw)]) self.busy = Signal() ### if fifo_depth is None: fifo_depth = lasmim.req_queue_size + lasmim.write_latency + 2 fifo = SyncFIFO(lasmim.dw, fifo_depth) self.submodules += fifo self.comb += [ lasmim.we.eq(1), lasmim.stb.eq(fifo.writable & self.address_data.stb), lasmim.adr.eq(self.address_data.a), self.address_data.ack.eq(fifo.writable & lasmim.req_ack), fifo.we.eq(self.address_data.stb & lasmim.req_ack), fifo.din.eq(self.address_data.d) ] data_valid = lasmim.dat_ack for i in range(lasmim.write_latency): new_data_valid = Signal() self.sync += new_data_valid.eq(data_valid), data_valid = new_data_valid self.comb += [ fifo.re.eq(data_valid), If(data_valid, lasmim.dat_we.eq(2**(lasmim.dw // 8) - 1), lasmim.dat_w.eq(fifo.dout)), self.busy.eq(fifo.readable) ]
def __init__(self, lasmim, fifo_depth=None): self.address_data = stream.Endpoint([("a", lasmim.aw), ("d", lasmim.dw)]) self.busy = Signal() ### if fifo_depth is None: fifo_depth = lasmim.req_queue_size + lasmim.write_latency + 2 fifo = SyncFIFO(lasmim.dw, fifo_depth) self.submodules += fifo self.comb += [ lasmim.we.eq(1), lasmim.stb.eq(fifo.writable & self.address_data.stb), lasmim.adr.eq(self.address_data.a), self.address_data.ack.eq(fifo.writable & lasmim.req_ack), fifo.we.eq(self.address_data.stb & lasmim.req_ack), fifo.din.eq(self.address_data.d) ] self.comb += [ If(lasmim.dat_w_ack, fifo.re.eq(1), lasmim.dat_we.eq(2**(lasmim.dw // 8) - 1), lasmim.dat_w.eq(fifo.dout)), self.busy.eq(fifo.readable) ]
def __init__(self): self.submodules.dut = SyncFIFO(64, 2) self.sync += [ If(self.dut.we & self.dut.writable, self.dut.din[:32].eq(self.dut.din[:32] + 1), self.dut.din[32:].eq(self.dut.din[32:] + 2)) ]
def __init__(self): self.submodules.dut = SyncFIFO([("a", 32), ("b", 32)], 2) self.sync += [ If(self.dut.we & self.dut.writable, self.dut.din.a.eq(self.dut.din.a + 1), self.dut.din.b.eq(self.dut.din.b + 2)) ]
def __init__(self, buf_width=8, buf_depth=16, buf_afull=5): # write port self.wr_valid_in = migen.Signal(1) self.wr_data_in = migen.Signal(buf_width) self.wr_ready_out = migen.Signal(1) # read port self.rd_data_out = migen.Signal(buf_width) self.rd_valid_out = migen.Signal(1) self.rd_ready_in = migen.Signal(1) self.num_fifo_elements = migen.Signal( math.ceil(math.log2(buf_depth)) + 1) # Create port map wr_itf = { self.wr_valid_in, self.wr_data_in, self.wr_ready_out, self.num_fifo_elements } rd_iff = {self.rd_data_out, self.rd_valid_out, self.rd_ready_in} self.ios = set(wr_itf) | set(rd_iff) # internals self.cnt = migen.Signal(math.ceil(math.log2(buf_depth)) + 1) self.almost_full = migen.Signal(1) #### # fifo submodule self.submodules.fifo = fifo = SyncFIFO(buf_width, buf_depth) self.comb += [ migen.If( # write logic fifo.writable & self.wr_valid_in & ~self.almost_full, fifo.we.eq(1), fifo.din.eq(self.wr_data_in)), migen.If( # read logic fifo.readable & self.rd_ready_in, fifo.re.eq(1), self.rd_data_out.eq(fifo.dout)), migen.If( # assert rd valid if fifo is not empty fifo.readable, self.rd_valid_out.eq(1)) ] # element counter self.sync += [ migen.If(fifo.we & (~fifo.re), self.cnt.eq(self.cnt + 1)).Elif( fifo.re & (~fifo.we), self.cnt.eq(self.cnt - 1)).Else(self.cnt.eq(self.cnt)) ] # almost full self.comb += [ migen.If((self.cnt >= buf_depth - buf_afull), self.almost_full.eq(1)).Else(self.almost_full.eq(0)), self.wr_ready_out.eq(~self.almost_full), # back-pressure self.num_fifo_elements.eq(self.cnt) # usedw ]
def __init__(self, lasmim, fifo_depth=None): self.address = Sink([("a", lasmim.aw)]) self.data = Source([("d", lasmim.dw)]) self.busy = Signal() ### if fifo_depth is None: fifo_depth = lasmim.req_queue_size + lasmim.read_latency + 2 # request issuance request_enable = Signal() request_issued = Signal() self.comb += [ lasmim.we.eq(0), lasmim.stb.eq(self.address.stb & request_enable), lasmim.adr.eq(self.address.a), self.address.ack.eq(lasmim.req_ack & request_enable), request_issued.eq(lasmim.stb & lasmim.req_ack) ] # FIFO reservation level counter # incremented when data is planned to be queued # decremented when data is dequeued data_dequeued = Signal() rsv_level = Signal(max=fifo_depth + 1) self.sync += [ If(request_issued, If(~data_dequeued, rsv_level.eq(rsv_level + 1))).Elif( data_dequeued, rsv_level.eq(rsv_level - 1)) ] self.comb += [ self.busy.eq(rsv_level != 0), request_enable.eq(rsv_level != fifo_depth) ] # data available data_available = lasmim.dat_ack for i in range(lasmim.read_latency): new_data_available = Signal() self.sync += new_data_available.eq(data_available) data_available = new_data_available # FIFO fifo = SyncFIFO(lasmim.dw, fifo_depth) self.submodules += fifo self.comb += [ fifo.din.eq(lasmim.dat_r), fifo.we.eq(data_available), self.data.stb.eq(fifo.readable), fifo.re.eq(self.data.ack), self.data.d.eq(fifo.dout), data_dequeued.eq(self.data.stb & self.data.ack) ]
def __init__(self): self.fifo = fifo = SyncFIFO(12, 1024) self.submodules += fifo counter = Signal(12) self.sync += [ If(fifo.writable, fifo.we.eq(1), counter.eq(counter + 1)) ] self.comb += fifo.din.eq(counter)
def __init__(self, elementwidth, num_in, num_out, depth, name=None): self.din = Signal(num_in * elementwidth, name_override=name + "_din" if name else None) self.nin = Signal(max=num_in + 1, name_override=name + "_nin" if name else None) self.writable = Signal(name_override=name + "_writable" if name else None) self.we = Signal(name_override=name + "_we" if name else None) self.dout = Signal(num_out * elementwidth, name_override=name + "_dout" if name else None) self.readable = Signal(num_out, name_override=name + "_readable" if name else None) self.re = Signal(name_override=name + "_re" if name else None) self.submodules.slice = _Slicer(elementwidth=elementwidth, nelements=num_in) self.comb += [ self.slice.data_in.eq(self.din), self.slice.num_in.eq(self.nin), self.slice.valid_in.eq(self.we), self.writable.eq(self.slice.ack_in) ] storage = SyncFIFO(width=elementwidth * num_in + len(self.slice.num_out), depth=depth) self.submodules += storage self.comb += [ storage.din.eq(Cat(self.slice.data_out, self.slice.num_out)), storage.we.eq(self.slice.valid_out), self.slice.ack_out.eq(storage.writable), self.slice.flush.eq(~storage.readable) ] self.submodules.downconvert = _DownConverter(elementwidth=elementwidth, nelements_from=num_in, nelements_to=num_out) self.comb += [ self.downconvert.data_in.eq(storage.dout[:elementwidth * num_in]), self.downconvert.num_in.eq(storage.dout[elementwidth * num_in:]), self.downconvert.valid_in.eq(storage.readable), storage.re.eq(self.downconvert.ack_in), self.dout.eq(self.downconvert.data_out), If(self.downconvert.valid_out, self.readable.eq(self.downconvert.num_out)).Else( self.readable.eq(0)), self.downconvert.ack_out.eq(self.re) ]
def __init__(self, pe_id, config, edge_data=None, hmc_port=None): self.pe_id = pe_id nodeidsize = config.addresslayout.nodeidsize edgeidsize = config.addresslayout.edgeidsize if config.has_edgedata: edgedatasize = config.addresslayout.edgedatasize else: edgedatasize = 0 # input self.neighbor_in = Record(set_layout_parameters(_neighbor_in_layout, **config.addresslayout.get_params())) # output self.neighbor_out = Record(set_layout_parameters(_neighbor_out_layout, **config.addresslayout.get_params())) if not hmc_port: hmc_port = config.platform.getHMCPort(pe_id % config.addresslayout.num_pe_per_fpga) self.hmc_port = hmc_port effective_max_tag_size = self.hmc_port.effective_max_tag_size # tag management num_injected = Signal(7) inject = Signal() tag_available = Signal() no_tags_inflight = Signal() current_tag = Signal(6) self.submodules.tags = SyncFIFO(6, 2**effective_max_tag_size) self.sync += If(inject & hmc_port.cmd_ready & hmc_port.cmd_valid, num_injected.eq(num_injected + 1) ) self.comb += [ no_tags_inflight.eq(self.tags.level == num_injected), inject.eq(num_injected < 2**effective_max_tag_size), If(inject, current_tag.eq(num_injected), tag_available.eq(1) ).Else( current_tag.eq(self.tags.dout), tag_available.eq(self.tags.readable) ) ] # buffers self.specials.answerbuffer = Memory(flit_size, max_flit_in_burst*2**effective_max_tag_size) self.specials.answer_rd_port = self.answerbuffer.get_port(async_read=True, mode=READ_FIRST) self.specials.answer_wr_port = self.answerbuffer.get_port(write_capable=True, mode=READ_FIRST) self.specials.updatebuffer = Memory(len(update_dat_w), 2**effective_max_tag_size) self.specials.update_rd_port = self.updatebuffer.get_port(async_read=True, mode=READ_FIRST)
def __init__(self, plat): from migen.build.generic_platform import Subsignal, IOStandard, Pins neopixel_gpio = [ ('neopixel', 0, Subsignal('tx', Pins('PMOD:0')), IOStandard('LVCMOS33') ) ] plat.add_extension(neopixel_gpio) neopixel_pads = plat.request('neopixel') N_PIXELS = 8 fifo = SyncFIFO(24, N_PIXELS) self.submodules.controller = WS2812Controller(neopixel_pads, fifo, 12000000)
def __init__(self, dw, max_pending_requests): self.sink = Sink(completion_layout(dw)) self.source = Source(completion_layout(dw)) self.req_we = Signal() self.req_tag = Signal(log2_int(max_pending_requests)) # # # tag_buffer = SyncFIFO(log2_int(max_pending_requests), 2 * max_pending_requests) self.submodules += tag_buffer self.comb += [ tag_buffer.we.eq(self.req_we), tag_buffer.din.eq(self.req_tag) ] reorder_buffers = [ SyncFlowFIFO(completion_layout(dw), 2 * max_request_size // (dw // 8), buffered=True) for i in range(max_pending_requests) ] self.submodules += iter(reorder_buffers) # store incoming completion in "sink.tag" buffer cases = {} for i in range(max_pending_requests): cases[i] = [Record.connect(self.sink, reorder_buffers[i].sink)] cases["default"] = [self.sink.ack.eq(1)] self.comb += Case(self.sink.tag, cases) # read buffer according to tag_buffer order cases = {} for i in range(max_pending_requests): cases[i] = [Record.connect(reorder_buffers[i].source, self.source)] cases["default"] = [] self.comb += [ Case(tag_buffer.dout, cases), If(self.source.stb & self.source.eop & self.source.last, tag_buffer.re.eq(self.source.ack)) ]
def __init__(self, fifo, overflow_depth=2): _FIFOInterface.__init__(self, fifo.width, fifo.depth) self.submodules.fifo = fifo self.submodules.overflow = overflow = SyncFIFO(fifo.width, overflow_depth) self.dout = fifo.dout self.re = fifo.re self.readable = fifo.readable ### self.comb += [ If(overflow.readable, fifo.din.eq(overflow.dout), fifo.we.eq(1), overflow.re.eq(fifo.writable)), If(fifo.writable & ~overflow.readable, fifo.din.eq(self.din), fifo.we.eq(self.we), self.writable.eq(fifo.writable)).Else( overflow.din.eq(self.din), overflow.we.eq(self.we), self.writable.eq(overflow.writable)) ]
def __init__(self, geom_settings, timing_settings, controller_settings, address_align, bankn, req): self.refresh_req = Signal() self.refresh_gnt = Signal() self.cmd = CommandRequestRW(geom_settings.addressbits, geom_settings.bankbits) ### # Request FIFO layout = [("we", 1), ("adr", len(req.adr))] req_in = Record(layout) reqf = Record(layout) self.submodules.req_fifo = SyncFIFO(layout_len(layout), controller_settings.req_queue_size) self.comb += [ self.req_fifo.din.eq(req_in.raw_bits()), reqf.raw_bits().eq(self.req_fifo.dout) ] self.comb += [ req_in.we.eq(req.we), req_in.adr.eq(req.adr), self.req_fifo.we.eq(req.stb), req.req_ack.eq(self.req_fifo.writable), self.req_fifo.re.eq(req.dat_w_ack | req.dat_r_ack), req.lock.eq(self.req_fifo.readable) ] slicer = _AddressSlicer(geom_settings.colbits, address_align) # Row tracking has_openrow = Signal() openrow = Signal(geom_settings.rowbits) hit = Signal() self.comb += hit.eq(openrow == slicer.row(reqf.adr)) track_open = Signal() track_close = Signal() self.sync += [ If(track_open, has_openrow.eq(1), openrow.eq(slicer.row(reqf.adr))), If(track_close, has_openrow.eq(0)) ] # Address generation s_row_adr = Signal() self.comb += [ self.cmd.ba.eq(bankn), If(s_row_adr, self.cmd.a.eq(slicer.row(reqf.adr))).Else( self.cmd.a.eq(slicer.col(reqf.adr))) ] # Respect write-to-precharge specification precharge_ok = Signal() t_unsafe_precharge = 2 + timing_settings.tWR - 1 unsafe_precharge_count = Signal(max=t_unsafe_precharge + 1) self.comb += precharge_ok.eq(unsafe_precharge_count == 0) self.sync += [ If(self.cmd.stb & self.cmd.ack & self.cmd.is_write, unsafe_precharge_count.eq(t_unsafe_precharge)).Elif( ~precharge_ok, unsafe_precharge_count.eq(unsafe_precharge_count - 1)) ] # Control and command generation FSM fsm = FSM() self.submodules += fsm fsm.act( "REGULAR", If(self.refresh_req, NextState("REFRESH")).Elif( self.req_fifo.readable, If( has_openrow, If( hit, # NB: write-to-read specification is enforced by multiplexer self.cmd.stb.eq(1), req.dat_w_ack.eq(self.cmd.ack & reqf.we), req.dat_r_ack.eq(self.cmd.ack & ~reqf.we), self.cmd.is_read.eq(~reqf.we), self.cmd.is_write.eq(reqf.we), self.cmd.cas_n.eq(0), self.cmd.we_n.eq(~reqf.we)).Else( NextState("PRECHARGE"))).Else( NextState("ACTIVATE")))) fsm.act( "PRECHARGE", # Notes: # 1. we are presenting the column address, A10 is always low # 2. since we always go to the ACTIVATE state, we do not need # to assert track_close. If(precharge_ok, self.cmd.stb.eq(1), If(self.cmd.ack, NextState("TRP")), self.cmd.ras_n.eq(0), self.cmd.we_n.eq(0), self.cmd.is_cmd.eq(1))) fsm.act("ACTIVATE", s_row_adr.eq(1), track_open.eq(1), self.cmd.stb.eq(1), self.cmd.is_cmd.eq(1), If(self.cmd.ack, NextState("TRCD")), self.cmd.ras_n.eq(0)) fsm.act("REFRESH", self.refresh_gnt.eq(precharge_ok), track_close.eq(1), self.cmd.is_cmd.eq(1), If(~self.refresh_req, NextState("REGULAR"))) fsm.delayed_enter("TRP", "ACTIVATE", timing_settings.tRP - 1) fsm.delayed_enter("TRCD", "REGULAR", timing_settings.tRCD - 1)
def __init__(self, depth): self.source = source = Source(descriptor_layout()) aw = flen(source.address) lw = flen(source.length) self._value = CSRStorage(aw + lw) self._we = CSR() self._loop_prog_n = CSRStorage() self._index = CSRStatus(log2_int(depth)) self._level = CSRStatus(log2_int(depth)) self._flush = CSR() self.irq = Signal() # # # # CSR signals value = self._value.storage we = self._we.r & self._we.re loop_prog_n = self._loop_prog_n.storage index = self._index.status level = self._level.status flush = self._flush.r & self._flush.re # FIFO # instance fifo_layout = [("address", aw), ("length", lw), ("start", 1)] fifo = InsertReset(SyncFIFO(fifo_layout, depth)) self.submodules += fifo self.comb += [fifo.reset.eq(flush), level.eq(fifo.level)] # write part self.sync += [ # in "loop" mode, each data output of the fifo is # written back If(loop_prog_n, fifo.din.address.eq(fifo.dout.address), fifo.din.length.eq(fifo.dout.length), fifo.din.start.eq(fifo.dout.start), fifo.we.eq(fifo.re) # in "program" mode, fifo input is connected # to registers ).Else(fifo.din.address.eq(value[:aw]), fifo.din.length.eq(value[aw:aw + lw]), fifo.din.start.eq(~fifo.readable), fifo.we.eq(we)) ] # read part self.comb += [ source.stb.eq(fifo.readable), fifo.re.eq(source.stb & source.ack), source.address.eq(fifo.dout.address), source.length.eq(fifo.dout.length) ] # index # used by the software for synchronization in # "loop" mode self.sync += \ If(flush, index.eq(0) ).Elif(source.stb & source.ack, If(fifo.dout.start, index.eq(0) ).Else( index.eq(index+1) ) ) # IRQ self.comb += self.irq.eq(source.stb & source.ack)
def __init__(self, hostif, max_burst_length=256): width = len(hostif.d_write) assert width == 16 awidth = len(hostif.i_addr) + 1 self.sink = Endpoint([('d', 8), ('last', 1)]) self.submodules.sdram_fifo = SyncFIFO(width, max_burst_length) self.submodules.fifo_write_fsm = FSM() self.wptr = Signal(awidth) # rptr (from SDRAM Source) self.rptr = Signal(awidth) # CSRs self._ptr_read = CSRStorage(1) ptr_read = self._ptr_read.re self._wptr = CSRStatus(awidth) self.sync += If(ptr_read, self._wptr.status.eq(self.wptr)) self._rptr = CSRStatus(awidth) self.sync += If(ptr_read, self._rptr.status.eq(self.rptr)) self._ring_base = CSRStorage(awidth) self._ring_end = CSRStorage(awidth) self._go = CSRStorage(1) go = self._go.storage[0] # 'go'-signal edge detect gor = Signal() self.sync += gor.eq(go) self._wrap_count = Perfcounter(ptr_read, go & ~gor) # wptr wrap around wrap = Signal() self.comb += wrap.eq(self.wptr == self._ring_end.storage) wptr_next = Signal(awidth) self.comb += If(wrap, wptr_next.eq(self._ring_base.storage)).Else( wptr_next.eq(self.wptr + 1)) # debug self._debug_ctl = CSRStorage(1) snapshot = self._debug_ctl.re perf_reset = self._debug_ctl.storage[0] self._debug_i_stb = Perfcounter(snapshot, perf_reset) self._debug_i_ack = Perfcounter(snapshot, perf_reset) self._debug_d_stb = Perfcounter(snapshot, perf_reset) self._debug_d_term = Perfcounter(snapshot, perf_reset) self._debug_s0 = Perfcounter(snapshot, perf_reset) self._debug_s1 = Perfcounter(snapshot, perf_reset) self._debug_s2 = Perfcounter(snapshot, perf_reset) self._perf_busy = Perfcounter(snapshot, perf_reset) self.comb += If(hostif.i_stb, self._debug_i_stb.inc()) self.comb += If(hostif.i_ack, self._debug_i_ack.inc()) self.comb += If(hostif.d_stb, self._debug_d_stb.inc()) self.comb += If(hostif.d_term, self._debug_d_term.inc()) self.comb += If(~self.sdram_fifo.writable, self._perf_busy.inc()) # FSM to move FIFO data to SDRAM burst_rem = Signal(max=max_burst_length) burst_rem_next = Signal(max=max_burst_length) self.comb += burst_rem_next.eq(burst_rem) self.sync += burst_rem.eq(burst_rem_next) self.comb += hostif.i_wr.eq(1) blocked = Signal() self.comb += blocked.eq(self.rptr == wptr_next) # start writing data if # - 'go'-signal set, and # - input data available # - not blocked self.fifo_write_fsm.act( "IDLE", self._debug_s0.inc(), If(self.sdram_fifo.readable & go & ~blocked, hostif.i_addr.eq(self.wptr), hostif.i_stb.eq(1), burst_rem_next.eq(max_burst_length - 1)), If(hostif.i_ack, NextState("WRITE"))) self.comb += hostif.d_write.eq(self.sdram_fifo.dout) # stop writing if # - max burst length reached, or # - no more input data, or # - wrap # - blocked self.fifo_write_fsm.act( "WRITE", self._debug_s1.inc(), hostif.d_term.eq((burst_rem == 0) | ~self.sdram_fifo.readable | wrap | blocked), self.sdram_fifo.re.eq(hostif.d_stb & ~hostif.d_term), If( ~hostif.d_term & hostif.d_stb, burst_rem_next.eq( burst_rem - 1) # CHECKME: was burst_rem_next - 1 which is a comb loop ), If(hostif.d_term & ~hostif.d_stb, NextState("WAIT")).Elif(hostif.d_term & hostif.d_stb, NextState("IDLE"))) self.fifo_write_fsm.act("WAIT", self._debug_s2.inc(), hostif.d_term.eq(1), If(hostif.d_stb, NextState("IDLE"))) # wrap around counter self.comb += If(wrap & hostif.d_stb & ~hostif.d_term, self._wrap_count.inc()) # update wptr self.sync += If( go & ~gor, self.wptr.eq(self._ring_base.storage), ).Elif((hostif.d_stb & ~hostif.d_term) | wrap, self.wptr.eq(wptr_next)) # sink into fifo self.submodules.fifo_fsm = FSM() capture_low = Signal() din_low = Signal(8) self.comb += self.sdram_fifo.din.eq(Cat(din_low, self.sink.payload.d)) self.sync += If(capture_low, din_low.eq(self.sink.payload.d)) self.fifo_fsm.act("READ_LOW", capture_low.eq(1), self.sink.ack.eq(1), If(self.sink.stb, NextState("READ_HI"))) self.fifo_fsm.act( "READ_HI", self.sdram_fifo.we.eq(self.sink.stb), self.sink.ack.eq(self.sdram_fifo.writable), If(self.sink.ack & self.sink.stb, NextState("READ_LOW")))
def __init__(self, endpoint, port, table_depth=256): self.sink = sink = Sink(dma_layout(endpoint.phy.dw)) self._enable = CSRStorage() # # # enable = self._enable.storage max_words_per_request = max_request_size // (endpoint.phy.dw // 8) fifo_depth = 4 * max_words_per_request # Data FIFO # store data until we have enough data to issue a # write request fifo = InsertReset(SyncFIFO(endpoint.phy.dw, fifo_depth)) self.submodules += fifo self.comb += [ fifo.we.eq(sink.stb & enable), sink.ack.eq(fifo.writable & sink.stb & enable), fifo.din.eq(sink.dat), fifo.reset.eq(~enable) ] # Request generation # requests from table are splitted in chunks of "max_size" self.table = table = DMARequestTable(table_depth) splitter = InsertReset( DMARequestSplitter(endpoint.phy.max_payload_size)) self.submodules += table, splitter self.comb += splitter.reset.eq(~enable) self.comb += table.source.connect(splitter.sink) # Request FSM cnt = Signal(max=(2**flen(endpoint.phy.max_payload_size)) / 8) clr_cnt = Signal() inc_cnt = Signal() self.sync += \ If(clr_cnt, cnt.eq(0) ).Elif(inc_cnt, cnt.eq(cnt + 1) ) self.submodules.fsm = fsm = FSM(reset_state="IDLE") request_ready = Signal() fsm.act("IDLE", clr_cnt.eq(1), If( request_ready, NextState("REQUEST"), )) fsm.act( "REQUEST", inc_cnt.eq(port.source.stb & port.source.ack), port.source.stb.eq(1), port.source.channel.eq(port.channel), port.source.user_id.eq(splitter.source.user_id), port.source.sop.eq(cnt == 0), port.source.eop.eq(cnt == splitter.source.length[3:] - 1), port.source.we.eq(1), port.source.adr.eq(splitter.source.address), port.source.req_id.eq(endpoint.phy.id), port.source.tag.eq(0), port.source.len.eq(splitter.source.length[2:]), port.source.dat.eq(fifo.dout), If( port.source.ack, fifo.re.eq(1), If( port.source.eop, splitter.source.ack.eq(1), NextState("IDLE"), ))) fifo_ready = fifo.level >= splitter.source.length[3:] self.sync += request_ready.eq(splitter.source.stb & fifo_ready)
def __init__(self, data_width, max_pending_requests, with_reordering=False): self.master_in = LitePCIeMasterInternalPort(data_width) self.master_out = LitePCIeMasterInternalPort(data_width) # # # req_sink, req_source = self.master_in.sink, self.master_out.sink cmp_sink, cmp_source = self.master_out.source, self.master_in.source tag_fifo = SyncFIFO(log2_int(max_pending_requests), max_pending_requests) self.submodules += tag_fifo info_mem = Memory(16, max_pending_requests) info_mem_wr_port = info_mem.get_port(write_capable=True) info_mem_rd_port = info_mem.get_port(async_read=False) self.specials += info_mem, info_mem_wr_port, info_mem_rd_port req_tag = Signal(max=max_pending_requests) self.sync += \ If(tag_fifo.re, req_tag.eq(tag_fifo.dout) ) # Requests mgt self.submodules.req_fsm = req_fsm = FSM(reset_state="IDLE") req_fsm.act("IDLE", If(req_sink.valid & req_sink.first & ~req_sink.we & tag_fifo.readable, tag_fifo.re.eq(1), NextState("SEND_READ") ).Elif(req_sink.valid & req_sink.first & req_sink.we, NextState("SEND_WRITE") ) ) self.comb += req_sink.connect(req_source, omit=set(["valid", "ready"])) req_fsm.act("SEND_READ", req_source.valid.eq(req_sink.valid), req_source.tag.eq(req_tag), If(req_source.valid & req_source.last & req_source.ready, NextState("UPDATE_INFO_MEM") ).Else( req_sink.ready.eq(req_source.ready) ) ) req_fsm.act("SEND_WRITE", req_source.valid.eq(req_sink.valid), req_sink.ready.eq(req_source.ready), req_source.tag.eq(32), If(req_source.valid & req_source.last & req_source.ready, NextState("IDLE") ) ) self.comb += [ info_mem_wr_port.adr.eq(req_tag), info_mem_wr_port.dat_w[:8].eq(req_sink.channel), info_mem_wr_port.dat_w[8:].eq(req_sink.user_id) ] req_fsm.act("UPDATE_INFO_MEM", info_mem_wr_port.we.eq(1), req_sink.ready.eq(1), NextState("IDLE") ) # Completions mgt if with_reordering: self.submodules.reordering = LitePCIeTLPReordering(data_width, max_pending_requests) self.comb += [ self.reordering.req_we.eq(info_mem_wr_port.we), self.reordering.req_tag.eq(info_mem_wr_port.adr), self.reordering.source.connect(cmp_source) ] cmp_source = self.reordering.sink self.submodules.cmp_fsm = cmp_fsm = FSM(reset_state="INIT") tag_cnt = Signal(max=max_pending_requests) inc_tag_cnt = Signal() self.sync += \ If(inc_tag_cnt, tag_cnt.eq(tag_cnt + 1) ) cmp_fsm.act("INIT", inc_tag_cnt.eq(1), tag_fifo.we.eq(1), tag_fifo.din.eq(tag_cnt), If(tag_cnt == (max_pending_requests-1), NextState("IDLE") ) ) self.comb += [ info_mem_rd_port.adr.eq(cmp_sink.tag), cmp_sink.connect(cmp_source, omit=set(["valid", "ready"])), cmp_source.channel.eq(info_mem_rd_port.dat_r[:8]), cmp_source.user_id.eq(info_mem_rd_port.dat_r[8:]) ] cmp_fsm.act("IDLE", If(cmp_sink.valid & cmp_sink.first, NextState("COPY"), ).Else( cmp_sink.ready.eq(1) ) ) cmp_fsm.act("COPY", If(cmp_sink.valid & cmp_sink.last & cmp_sink.end, NextState("UPDATE_TAG_FIFO"), ).Else( cmp_source.valid.eq(cmp_sink.valid), cmp_sink.ready.eq(cmp_source.ready), If(cmp_sink.valid & cmp_sink.last & cmp_sink.ready, NextState("IDLE") ) ) ) cmp_fsm.act("UPDATE_TAG_FIFO", tag_fifo.we.eq(1), tag_fifo.din.eq(cmp_sink.tag), cmp_source.valid.eq(cmp_sink.valid), cmp_sink.ready.eq(cmp_source.ready), If(cmp_sink.valid & cmp_sink.ready, NextState("IDLE") ) )
def __init__(self, clk_freq, baud_rate): divisor = clk_freq // baud_rate self.submodules.fifo = SyncFIFO(8, 1024) self.din = self.fifo.din self.we = self.fifo.we self.writable = self.fifo.writable self.tx = Signal() self.io = {self.din, self.we, self.fifo.writable, self.tx} # strobe_counter counts down from divisor to 0, resets automatically # or when strobe-start is asserted. strobe_counter = Signal(max=divisor) strobe_start = Signal() strobe = Signal() self.comb += strobe.eq(strobe_counter == 0) self.sync += (If( strobe | strobe_start, strobe_counter.eq(divisor - 1), ).Else(strobe_counter.eq(strobe_counter - 1))) # Main bit sender FSM. bit_counter = Signal(max=8) tx_data = Signal(8) self.submodules.fsm = FSM(reset_state='IDLE') self.fsm.act( 'IDLE', If( self.fifo.readable, NextState('START'), NextValue(tx_data, self.fifo.dout), )) self.fsm.act( 'START', If( strobe, NextState('DATA'), NextValue(bit_counter, 0), ), ) self.fsm.act( 'DATA', If( strobe, If( bit_counter == 7, NextState('STOP'), ).Else(NextValue(bit_counter, bit_counter + 1)))) self.fsm.act( 'STOP', If( strobe, NextState('IDLE'), ), ) self.comb += [ # FIFO readout. self.fifo.re.eq(self.fsm.ongoing('IDLE')), # Keep resetting the counter when in IDLE. strobe_start.eq(self.fsm.ongoing('IDLE')), # TX line logic. If( self.fsm.ongoing('START'), self.tx.eq(0), ).Elif( self.fsm.ongoing('DATA'), self.tx.eq((tx_data >> bit_counter) & 1), ).Else(self.tx.eq(1), ) ]
def __init__(self, platform): # Instantiate and connect UART cores to host. self.submodules.uart_rx = uart.RXFIFO(self.CLKFREQ, self.BAUDRATE) self.submodules.uart_tx = uart.TXFIFO(self.CLKFREQ, self.BAUDRATE) serial = platform.request('serial') self.comb += [ serial.tx.eq(self.uart_tx.tx), self.uart_rx.rx.eq(serial.rx), ] # Heartbeat LED. led = platform.request('user_led') counter = Signal(max=12000000) self.sync += If(counter == 11999999, counter.eq(0), led.eq(~led), ).Else( counter.eq(counter + 1), ) target = platform.request('sio') # Register signals from target because metastability. target_txd = Signal() target_busy = Signal() self.sync += [ target_txd.eq(target.txd), target_busy.eq(target.busy), ] # More debug LEDs. self.comb += [ platform.request('user_led').eq(target.rxd), platform.request('user_led').eq(target.txd), platform.request('user_led').eq(target.busy), ] # Input/output FIFOs. self.submodules.txbuffer = SyncFIFO(8, 512) self.submodules.rxbuffer = SyncFIFO(8, 512) # Dispatch and response flops for host communication. request = Signal(8) response = Signal(8) # Generic counter used by a bunch of states. # TODO: share the logic that populates this for sending/receiving # words. counter = Signal(max=120000) # Target CLK divider. tclk_divider = Signal(max=120, reset=4) tclk_counter = Signal(max=121) self.sync += [ If(tclk_counter == tclk_divider, tclk_counter.eq(0), target.tclk.eq(~target.tclk), ).Else( tclk_counter.eq(tclk_counter + 1), ) ] # Target serial CLK divider, used by the *_EDGE states in the FSM. sclk_divider = Signal(max=1024, reset=1023) # Target busy timer. timer = Signal(32) timer_running = Signal(reset=0) last_busy = Signal() self.sync += last_busy.eq(target_busy) self.sync += \ If(~timer_running, If((~last_busy) & target_busy, timer_running.eq(1), timer.eq(0), ) ).Elif(~target_busy, timer_running.eq(0), ).Else( timer.eq(timer + 1), ) # Main state machine. self.submodules.fsm = FSM(reset_state='IDLE') self.fsm.act('IDLE', If(self.uart_rx.readable, NextState('DISPATCH'), NextValue(request, self.uart_rx.dout), ), ) self.fsm.act('DISPATCH', Case(request, { # Get API version of bitstream. ord('v'): [ NextState('RESPOND_BYTE'), NextValue(response, ord('0')), ], # Flush both FIFOs. ord('f'): [ NextState('FIFO_FLUSH'), ], # Reset target. ord('r'): [ NextState('RESET_TARGET'), NextValue(target.rst, 0), NextValue(counter, 119999), ], # Write byte to FIFO. ord('w'): [ NextState('FIFO_WRITE'), ], # Perform transaction with target. ord('W'): [ NextState('SEND_START'), ], # Read bytes from FIFO. ord('R'): [ NextState('FIFO_READ_START'), NextValue(counter, 3), ], # Get timer value. ord('t'): [ NextState('GET_TIMER'), NextValue(counter, 0), ], # Get timer status. ord('T'): [ NextState('RESPOND_BYTE'), If(timer_running, NextValue(response, ord('r')) ).Else( NextValue(response, ord('s')) ) ], # Set target clock. ord('s'): [ NextState('SET_TCLK'), ], # Set target serial clock. ord('S'): [ NextState('SET_SCLK'), NextValue(counter, 1), ], # Default handler. 'default': [ NextState('RESPOND_BYTE'), NextValue(response, ord('?')), ], }) ) self.fsm.act('RESET_TARGET', If(counter == 0, NextValue(target.rst, 1), NextValue(target.sclk, 1), NextValue(response, ord('.')), NextState('RESPOND_BYTE'), ).Else( NextValue(counter, counter-1), ) ) self.fsm.act('GET_TIMER', If(counter == 3, NextState('IDLE'), ), NextValue(counter, counter+1), ) self.fsm.act('FIFO_WRITE', If(self.uart_rx.readable, If(self.txbuffer.writable, NextValue(response, ord('.')), NextState('RESPOND_BYTE'), ).Else( NextValue(response, ord('!')), NextState('RESPOND_BYTE'), ) ) ) self.fsm.act('SET_TCLK', If(self.uart_rx.readable, NextValue(tclk_divider, self.uart_rx.dout), NextValue(response, ord('.')), NextState('RESPOND_BYTE'), ) ) self.fsm.act('SET_SCLK', If(self.uart_rx.readable, NextValue(sclk_divider, (sclk_divider >> 8) | (self.uart_rx.dout << 8)), If(counter == 0, NextValue(response, ord('.')), NextState('RESPOND_BYTE'), ).Else( NextValue(counter, counter-1), ) ) ) # Transaction signals. # Byte to be sent to target. send_byte = Signal(8) # Byte being received from target. receive_byte = Signal(8) # Index into both send and receive bytes. bit_index = Signal(max=8) # Downounter for clock rise/fall edges, set to sclk. bit_counter = Signal(max=1024) # Rising/falling edge over, move to next state. bit_strobe = Signal() self.comb += bit_strobe.eq(bit_counter == 0) next_bit = Signal() self.fsm.act('SEND_START', NextState('SEND_PREPARE'), NextValue(bit_index, 0), ) # Prepare next byte to send or finish transaction. self.fsm.act('SEND_PREPARE', If(self.txbuffer.readable, NextValue(send_byte, self.txbuffer.dout), NextState('SEND_WAIT'), ).Else( NextValue(response, ord('.')), NextState('RESPOND_BYTE'), ) ) # Wait for target to not be busy. self.fsm.act('SEND_WAIT', If(~target_busy, NextValue(bit_counter, sclk_divider), NextState('SEND_FALLING'), ) ) # Downcount bit_counter, send data to target. self.fsm.act('SEND_FALLING', If(bit_counter == 0, NextValue(target.sclk, 0), NextState('SEND_RISING'), NextValue(bit_counter, sclk_divider), NextValue(target.rxd, (send_byte >> bit_index) & 1), ).Else( NextValue(bit_counter, bit_counter-1), ) ) # Downcount bit_counter, receive data from target. self.fsm.act('SEND_RISING', If(bit_counter == 0, NextValue(receive_byte, (target_txd << 7) | (receive_byte >> 1)), NextValue(target.sclk, 1), If(bit_index == 7, NextValue(bit_index, 0), NextState('SEND_WRITEBACK'), ).Else( NextValue(bit_index, bit_index+1), NextState('SEND_FALLING'), NextValue(bit_counter, sclk_divider), ) ).Else( NextValue(bit_counter, bit_counter-1), ) ) # Write received byte to read FIFO. self.fsm.act('SEND_WRITEBACK', NextState('SEND_PREPARE') ) # Downcounter for requested bytes to read from FIFO. fifo_read_counter = Signal(32) # Set downcounter based on host request. self.fsm.act('FIFO_READ_START', If(self.uart_rx.readable, If(counter == 0, NextState('FIFO_READ'), ).Else( NextValue(counter, counter-1) ), NextValue(fifo_read_counter, (fifo_read_counter >> 8) | (self.uart_rx.dout << 24)) ) ) # Downcount fifo_read_counter, send FIFO bytes to host. self.fsm.act('FIFO_READ', If(fifo_read_counter == 0, NextState('IDLE'), ).Else( NextValue(fifo_read_counter, fifo_read_counter-1), ) ) # Whether the read FIFO should emit a byte - somewhat of a hack. fifo_read = Signal() self.comb += fifo_read.eq(self.fsm.ongoing('FIFO_READ') & (fifo_read_counter != 0)) self.fsm.act('FIFO_FLUSH', If((~self.rxbuffer.readable) & (~self.txbuffer.readable), NextValue(response, ord('.')), NextState('RESPOND_BYTE'), ) ) # Enables and data connections for FIFOs. self.comb += [ self.txbuffer.we.eq( self.fsm.ongoing('FIFO_WRITE') & self.uart_rx.readable ), self.txbuffer.re.eq( self.fsm.ongoing('SEND_PREPARE') | self.fsm.ongoing('FIFO_FLUSH') ), self.txbuffer.din.eq(self.uart_rx.dout), self.rxbuffer.we.eq(self.fsm.ongoing('SEND_WRITEBACK')), self.rxbuffer.re.eq( fifo_read | self.fsm.ongoing('FIFO_FLUSH') ), self.rxbuffer.din.eq(receive_byte), ] # Generic 1-byte response state. self.fsm.act('RESPOND_BYTE', If(self.uart_tx.writable, NextState('IDLE'), ), ) # Host UART enables. self.comb += [ self.uart_rx.re.eq( self.fsm.ongoing('IDLE') | self.fsm.ongoing('FIFO_WRITE') | self.fsm.ongoing('SET_TCLK') | self.fsm.ongoing('SET_SCLK') | self.fsm.ongoing('FIFO_READ_START') ), self.uart_tx.we.eq( self.fsm.ongoing('RESPOND_BYTE') | self.fsm.ongoing('GET_TIMER') | fifo_read ), If(self.fsm.ongoing('RESPOND_BYTE'), self.uart_tx.din.eq(response), ).Elif(self.fsm.ongoing('GET_TIMER'), self.uart_tx.din.eq(timer >> (counter * 8)), ).Elif(fifo_read, If(self.rxbuffer.readable, self.uart_tx.din.eq(self.rxbuffer.dout), ).Else( self.uart_tx.din.eq(0xff), ), ) ] # Expose Host UART on debug pins. self.comb += [ platform.request('debug').eq(self.uart_tx.tx), platform.request('debug').eq(self.uart_rx.rx), ]
def __init__(self, bus, bus_dmac, fifo_depth=None): ar, aw, w, r, b = attrgetter("ar", "aw", "w", "r", "b")(bus) dw = bus.data_width self.sink = stream.Endpoint(rec_layout(r, {"data"})) self.busy = Signal() self.dma_reset = Signal() ### self.submodules.requester = requester = _ReadRequester(bus_dmac) self.comb += requester.reset.eq(self.dma_reset) fifo_depth = fifo_depth or BURST_LENGTH + DMAC_LATENCY if fifo_depth < BURST_LENGTH: raise ValueError("fifo_depth shall be ge BURST_LENGTH") try: log2_int(BURST_LENGTH) except ValueError: raise ValueError("BURST_LENGTH shall be a power of 2") fifo = SyncFIFO(dw, fifo_depth) self.submodules += fifo self.comb += [ requester.burst_request.eq( reduce(or_, fifo.level[len(wrap(BURST_LENGTH - 1)):])), self.sink.ack.eq(fifo.writable), fifo.we.eq(self.sink.stb), fifo.din.eq(self.sink.data), ] self.comb += [ r.data.eq(fifo.dout), self.busy.eq(fifo.readable), ] # AXI Slave, ignore write access id_ = Signal(bus.id_width, reset_less=True) id_next = Signal(len(id_)) cnt = Signal(max=15, reset_less=True) cnt_next = Signal(len(cnt)) self.comb += [ id_next.eq(id_), cnt_next.eq(cnt), ] self.sync += [ id_.eq(id_next), cnt.eq(cnt_next), ] # control self.submodules.fsm = fsm = FSM(reset_state="IDLE") fsm.act( "IDLE", aw.ready.eq(1), ar.ready.eq(1), If( aw.valid, ar.ready.eq(0), id_next.eq(aw.id), NextState("WRITE"), ).Elif( ar.valid, id_next.eq(ar.id), cnt_next.eq(ar.len), NextState("READ"), )) fsm.act( "WRITE", w.ready.eq(1), # ignored If( w.valid & w.last, NextState("WRITE_DONE"), )) fsm.act("WRITE_DONE", b.valid.eq(1), If( b.ready, NextState("IDLE"), )) fsm.act( "READ", r.valid.eq(1), If(cnt == 0, r.last.eq(1), If( r.ready, NextState("IDLE"), ).Else(NextState("READ_DONE"), )).Elif( r.ready, cnt_next.eq(cnt - 1), )) fsm.act("READ_DONE", r.valid.eq(1), r.last.eq(1), If( r.ready, NextState("IDLE"), )) # data path self.comb += [ r.last.eq(cnt == 0), r.id.eq(id_), b.id.eq(id_), r.resp.eq(axi.Response.okay), b.resp.eq(axi.Response.okay), r.data.eq(fifo.dout), fifo.re.eq(r.valid & r.ready), ]
def __init__(self): self.specials += self.data_t self.isRxCmd = Signal() self.rx_data = Signal(8) rx_fifo_we = Signal() self.debug = Signal(8) past_rx_cmd = Signal(8) current_rx_cmd = Signal(8) # ULPI Register Write signals. USB clock domain ulpi_reg_wr_addr = Signal(6) ulpi_reg_wr_data = Signal(8) ulpi_reg_wr_trig = Signal() ulpi_reg_wr_busy = Signal() ulpi_reg_wr_done = Signal() ulpi_reg_wr_queue = Signal() # ULPI Register Read signals. USB clock domain ulpi_reg_rd_addr = Signal(6) ulpi_reg_rd_data = Signal(8) ulpi_reg_rd_trig = Signal() ulpi_reg_rd_busy = Signal() ulpi_reg_rd_done = Signal() ulpi_reg_rd_queue = Signal() self.sync.usb += [ If(ulpi_reg_wr_trig, ulpi_reg_wr_queue.eq(1)), If(ulpi_reg_rd_trig, ulpi_reg_rd_queue.eq(1)), ] fsm = self.fsm = ClockDomainsRenamer("usb")(FSM(reset_state="RESET")) self.submodules += fsm fsm.act( "RESET", If( ~self.dir, NextValue(self.data_t.o, 0x00), NextState("IDLE"), )) fsm.act( "IDLE", If( self.dir, # & self.nxt, NextState("RX")).Elif( ulpi_reg_wr_queue, NextState("REG_WR_CMD"), NextValue( self.data_t.o, Cat(ulpi_reg_wr_addr, Constant(value=2, bits_sign=2))), NextValue(ulpi_reg_wr_queue, 0), NextValue(ulpi_reg_wr_busy, 1), NextValue(ulpi_reg_wr_done, 0), ).Elif( ulpi_reg_rd_queue, NextState("REG_RD_CMD"), NextValue( self.data_t.o, Cat(ulpi_reg_rd_addr, Constant(value=3, bits_sign=2))), NextValue(ulpi_reg_rd_queue, 0), NextValue(ulpi_reg_rd_busy, 1), NextValue(ulpi_reg_rd_done, 0), )) fsm.act( "RX", NextValue(self.isRxCmd, 0x0), If( self.dir & ~self.nxt, NextValue(self.rx_data, self.data_t.i), NextValue(self.isRxCmd, 0x1), ).Elif(self.dir & self.nxt, NextValue(self.rx_data, self.data_t.i)).Elif( ~self.dir & ~self.nxt, NextState("IDLE"), )) fsm.act( "REG_WR_CMD", If( ~self.dir & self.nxt, NextState("REG_WR_DATA"), NextValue(self.data_t.o, ulpi_reg_wr_data), ).Elif( self.dir, # & self.nxt, NextState( "RX"), # Reg write aborted during Reg Write TXCMD cycle NextValue(self.data_t.o, 0x00), )) fsm.act( "REG_WR_DATA", If( ~self.dir & self.nxt, NextState("REG_WR_STP"), NextValue(self.stp, 1), ).Elif( self.dir & self.nxt, NextState("RX"), # Reg write aborted during write data cycle ), NextValue(self.data_t.o, 0x00), ) fsm.act( "REG_WR_STP", If( ~self.dir & ~self.nxt, NextState("IDLE"), ).Elif( self.dir & self.nxt, NextState( "RX" ), # Register write followed immediately by a USB receive during stp assertion ), NextValue(self.data_t.o, 0x00), NextValue(ulpi_reg_wr_busy, 0), NextValue(ulpi_reg_wr_done, 1), ) fsm.act( "REG_RD_CMD", If( self.nxt & ~self.dir, NextState("REG_RD_TURNAROUND"), ).Elif( self.nxt & self. dir, # Reg read aborted by PHY during TX_CMD due to receive NextState("RX"), NextValue(self.data_t.o, 0x00), ), ) fsm.act( "REG_RD_TURNAROUND", If( self.dir & self. nxt, # Reg read aborted by PHY during turnaround due to receive NextState("RX")).Elif( self.dir & ~self.nxt, NextState("REG_RD_DATA"), ), NextValue(self.data_t.o, 0x00), ) fsm.act( "REG_RD_DATA", If( self.dir & ~self.nxt, NextState("RX"), NextValue(ulpi_reg_rd_data, self.data_t.i), NextValue(ulpi_reg_rd_busy, 0), NextValue(ulpi_reg_rd_done, 1), )) self.sync.usb += [ If(self.dir & ~self.nxt & fsm.ongoing("RX"), past_rx_cmd.eq(current_rx_cmd), current_rx_cmd.eq(self.data_t.i)) ] se0 = Signal() j_state = Signal() k_state = Signal() se1 = Signal() squelch = Signal() n_squelch = Signal() FULL_SPEED = 0 HIGH_SPEED = 1 mode = FULL_SPEED # 0: Full Speed, 1: High Speed self.comb += [ se0.eq((current_rx_cmd[0:2] == 0b00) & (mode == FULL_SPEED)), j_state.eq((current_rx_cmd[0:2] == 0b01) & (mode == FULL_SPEED)), k_state.eq((current_rx_cmd[0:2] == 0b10) & (mode == FULL_SPEED)), se1.eq((current_rx_cmd[0:2] == 0b11) & (mode == FULL_SPEED)), squelch.eq((current_rx_cmd[0:2] == 0b00) & (mode == HIGH_SPEED)), n_squelch.eq((current_rx_cmd[0:2] == 0b01) & (mode == HIGH_SPEED)), ] rx_fifo = self.rx_fifo = ClockDomainsRenamer("usb")( SyncFIFO(9, 20480) ) # ClockDomainsRenamer({"write": "usb", "read": "sys"})(AsyncFIFO(9, 2048)) self.submodules += rx_fifo self.sync.usb += [ self.stp.eq( 0 ), # ~rx_fifo.writable) # No need to stop. Always receive unless FIFO full. FIXME stp rx_fifo.we.eq( rx_fifo_we ) # Delay we assertion since we are delaying data too in fsm's NextValue ] self.comb += [ self.data_t.oe.eq(~self.dir), # Tristate output enable rx_fifo_we.eq(fsm.ongoing("RX") & self.dir & rx_fifo.writable), rx_fifo.din.eq(Cat(self.rx_data, self.isRxCmd)), # rx_fifo.we.eq(fsm.ongoing("RX") & self.dir & rx_fifo.writable) ] self.attach_axi_slave(16) self.comb += [ self.axi_reg[0].data_r.eq( Cat(rx_fifo.dout[:8], Constant(0, bits_sign=23), rx_fifo.dout[8])), self.axi_reg[0].readable.eq(rx_fifo.readable), rx_fifo.re.eq(self.axi_reg[0].re), self.axi_reg[1].data_r.eq(rx_fifo.readable), # ULPI Register ulpi_reg_wr_addr.eq(self.axi_reg[2].data_w[0:6]), ulpi_reg_wr_data.eq(self.axi_reg[3].data_w[0:8]), ulpi_reg_wr_trig.eq(self.get_rising_edge( self.axi_reg[4].data_w[0])), self.axi_reg[5].data_r.eq(Cat(ulpi_reg_wr_busy, ulpi_reg_wr_done)), ulpi_reg_rd_addr.eq(self.axi_reg[6].data_w[0:6]), self.axi_reg[7].data_r.eq(ulpi_reg_rd_data), ulpi_reg_rd_trig.eq(self.get_rising_edge( self.axi_reg[8].data_w[0])), self.axi_reg[9].data_r.eq(Cat(ulpi_reg_rd_busy, ulpi_reg_rd_done)), ] self.comb += [self.axi_reg[i].writable.eq(1) for i in range(16)] # self.comb += [ # self.debug.eq(Cat(ulpi_reg_rd_busy, ulpi_reg_rd_done, ulpi_reg_rd_trig, ulpi_reg_rd_queue, ulpi_reg_wr_busy, ulpi_reg_wr_done, ulpi_reg_wr_trig, ulpi_reg_wr_queue)) # ] self.sync.usb += [ If(ulpi_reg_wr_trig, self.debug[0].eq(1)), If(ulpi_reg_rd_trig, self.debug[1].eq(1)), ]
def __init__(self, config): IOModule.__init__(self, "Video") VRAM_DATA_SIZE = 8 if config == VideoConfig.CFG1: VRAM_ADDR_SIZE = 14 else: VRAM_ADDR_SIZE = 16 # Control Registers self.cregs += CtrlReg("ADDRH", CtrlRegDir.WRONLY) self.cregs += CtrlReg("ADDRL", CtrlRegDir.WRONLY) self.cregs += CtrlReg("DATA", CtrlRegDir.WRONLY) # IO definition ## VGA Signals self.iosignals += IOSignal("VGA_VS", IOSignalDir.OUT) self.iosignals += IOSignal("VGA_HS", IOSignalDir.OUT) self.iosignals += IOSignal("VGA_RED0", IOSignalDir.OUT) self.iosignals += IOSignal("VGA_RED1", IOSignalDir.OUT) self.iosignals += IOSignal("VGA_GREEN0", IOSignalDir.OUT) self.iosignals += IOSignal("VGA_GREEN1", IOSignalDir.OUT) self.iosignals += IOSignal("VGA_BLUE0", IOSignalDir.OUT) self.iosignals += IOSignal("VGA_BLUE1", IOSignalDir.OUT) # VRAM Signals for i in range(VRAM_ADDR_SIZE): self.iosignals += IOSignal("VRAM_ADDR{}".format(i), IOSignalDir.OUT) for i in range(VRAM_DATA_SIZE): self.iosignals += IOSignal("VRAM_DOUT{}".format(i), IOSignalDir.OUT) self.iosignals += IOSignal("VRAM_DIN{}".format(i), IOSignalDir.IN) self.iosignals += IOSignal("VRAM_DDIR{}".format(i), IOSignalDir.DIRCTL) self.iosignals += IOSignal("VRAM_WR", IOSignalDir.OUT) self.iosignals += IOSignal("VRAM_CE", IOSignalDir.OUT) # Internal signals ## Video signals self.px = Signal(6) self.display_enabled = Signal() self.h_display = Signal() self.v_display = Signal() self.next_h_counter = Signal(10) self.h_counter = Signal(10) self.v_counter = Signal(10) ## VRAM Signals self.vram_din = Signal(VRAM_DATA_SIZE) self.vram_addr = Signal(VRAM_ADDR_SIZE) # Logic ## VRAM management drive_pxl = Signal() drive_vram_d = Signal() vram_wr = Signal() vram_dout = Signal(VRAM_DATA_SIZE) self.comb += self.iosignals.VRAM_WR.eq(vram_wr) self.comb += self.iosignals.VRAM_CE.eq(0) for i in range(VRAM_ADDR_SIZE): self.comb += getattr(self.iosignals, "VRAM_ADDR{}".format(i)).eq(self.vram_addr[i]) for i in range(VRAM_DATA_SIZE): self.comb += getattr(self.iosignals, "VRAM_DOUT{}".format(i)).eq(vram_dout[i]) self.comb += self.vram_din[i].eq( getattr(self.iosignals, "VRAM_DIN{}".format(i))) self.comb += getattr(self.iosignals, "VRAM_DDIR{}".format(i)).eq(drive_vram_d) vram_write_fifo = SyncFIFO(VRAM_DATA_SIZE, 4) self.comb += vram_write_fifo.we.eq(self.cregs.DATA.wr_pulse) self.comb += vram_write_fifo.din.eq(self.cregs.DATA) self.vram_cursor = Signal(VRAM_ADDR_SIZE) self.sync += \ If(self.cregs.ADDRL.wr_pulse, self.vram_cursor.eq(self.cregs.ADDRH << 8 | self.cregs.ADDRL)).\ Elif(~vram_wr, self.vram_cursor.eq(self.vram_cursor + 1)) fsm_vram = FSM() fsm_vram.act("VRAM_WRITE", NextValue(drive_pxl, 0), If(vram_write_fifo.readable, NextValue(vram_dout, vram_write_fifo.dout), NextValue(drive_vram_d, 1), NextValue(vram_wr, 0), NextValue(self.vram_addr, self.vram_cursor), NextValue(vram_write_fifo.re, 1), NextState("VRAM_WRITE_LATCH")).\ Else( NextValue(drive_vram_d, 0), NextValue(vram_wr, 1), NextState("VRAM_READ"), *self.vram_drive_addr_logic)) fsm_vram.act("VRAM_WRITE_LATCH", NextValue(drive_pxl, 1), NextValue(vram_wr, 1), NextValue(vram_write_fifo.re, 0), NextState("VRAM_WRITE")) fsm_vram.act("VRAM_READ", NextValue(drive_pxl, 1), NextState("VRAM_WRITE"), *self.vram_read_logic) self.submodules += vram_write_fifo self.submodules += fsm_vram ## Video signals generator self.clock_domains.cd_pxl = ClockDomain(reset_less=True) self.comb += self.cd_pxl.clk.eq(drive_pxl) self.clock_domains.cd_line = ClockDomain(reset_less=True) self.comb += self.cd_line.clk.eq(self.iosignals.VGA_HS) self.comb += self.display_enabled.eq(self.h_display & self.v_display) fsm_h = ClockDomainsRenamer("pxl")(FSM()) fsm_v = ClockDomainsRenamer("line")(FSM()) def wait(cnt, duration, current_state, next_state): return \ If(cnt == duration - 1, NextValue(cnt, 0), NextState(next_state)).\ Else( NextValue(cnt, cnt + 1), NextState(current_state)) fsm_h.act( "FRONT_PORCH", self.iosignals.VGA_HS.eq(1), self.h_display.eq(0), wait(self.h_counter, H_FRONT_PORCH_LEN, "FRONT_PORCH", "SYNC")) fsm_h.act("SYNC", self.iosignals.VGA_HS.eq(0), self.h_display.eq(0), wait(self.h_counter, H_SYNC_PULSE_LEN, "SYNC", "BACK_PORCH")) fsm_h.act( "BACK_PORCH", self.iosignals.VGA_HS.eq(1), self.h_display.eq(0), If(self.h_counter >= H_BACK_PORCH_LEN - 2, self.next_h_counter.eq(self.h_counter - (H_BACK_PORCH_LEN - 2))), wait(self.h_counter, H_BACK_PORCH_LEN, "BACK_PORCH", "DISPLAY")) fsm_h.act("DISPLAY", self.iosignals.VGA_HS.eq(1), self.h_display.eq(1), If(self.h_counter < H_DISPLAY_LEN - 2, self.next_h_counter.eq(self.h_counter + 2)).\ Else(self.next_h_counter.eq(self.h_counter - (H_DISPLAY_LEN - 2))), wait(self.h_counter, H_DISPLAY_LEN, "DISPLAY", "FRONT_PORCH")) fsm_v.act( "FRONT_PORCH", self.iosignals.VGA_VS.eq(1), self.v_display.eq(0), wait(self.v_counter, V_FRONT_PORCH_LEN, "FRONT_PORCH", "SYNC")) fsm_v.act("SYNC", self.iosignals.VGA_VS.eq(0), self.v_display.eq(0), wait(self.v_counter, V_SYNC_PULSE_LEN, "SYNC", "BACK_PORCH")) fsm_v.act( "BACK_PORCH", self.iosignals.VGA_VS.eq(1), self.v_display.eq(0), wait(self.v_counter, V_BACK_PORCH_LEN, "BACK_PORCH", "DISPLAY")) fsm_v.act( "DISPLAY", self.iosignals.VGA_VS.eq(1), self.v_display.eq(1), wait(self.v_counter, V_DISPLAY_LEN, "DISPLAY", "FRONT_PORCH")) self.submodules += fsm_h self.submodules += fsm_v ## Pixels generator self.comb += self.iosignals.VGA_RED0.eq(self.px[2]) self.comb += self.iosignals.VGA_GREEN0.eq(self.px[1]) self.comb += self.iosignals.VGA_BLUE0.eq(self.px[0]) if config == VideoConfig.CFG1: self.comb += self.iosignals.VGA_RED1.eq(self.px[5]) self.comb += self.iosignals.VGA_GREEN1.eq(self.px[4]) self.comb += self.iosignals.VGA_BLUE1.eq(self.px[3]) self.comb += If(self.display_enabled, *self.display_logic).Else(self.px.eq(0))
def __init__(self, phys, jesd_settings, converter_data_width, ilas_check=True): self.enable = Signal() self.jsync = Signal() self.jref = Signal() self.ready = Signal() self.restart = Signal() self.stpl_enable = Signal() self.source = Record([("converter"+str(i), converter_data_width) for i in range(jesd_settings.nconverters)]) # # # # Restart when disabled. self.comb += If(~self.enable, self.restart.eq(1)) # transport layer transport = LiteJESD204BTransportRX(jesd_settings, converter_data_width) transport = ClockDomainsRenamer("jesd")(transport) self.submodules.transport = transport # stpl stpl = LiteJESD204BSTPLChecker(jesd_settings, converter_data_width) stpl = ClockDomainsRenamer("jesd")(stpl) self.submodules.stpl = stpl self.comb += \ If(self.stpl_enable, stpl.sink.eq(transport.source) ).Else( self.source.eq(transport.source) ) self.links = links = [] self.skew_fifos = skew_fifos = [] for n, (phy, lane) in enumerate(zip(phys, transport.sink.flatten())): phy_name = "jesd_phy{}".format(n if not hasattr(phy, "n") else phy.n) phy_cd = phy_name + "_rx" cdc = LiteJESD204BRXCDC(phy, phy_cd) setattr(self.submodules, "cdc"+str(n), cdc) link = LiteJESD204BLinkRX(32, jesd_settings, n, ilas_check) link = ClockDomainsRenamer("jesd")(link) self.submodules += link links.append(link) self.comb += [ link.reset.eq(self.restart), link.jref.eq(self.jref), phy.rx_align.eq(link.align) ] skew_fifo = SyncFIFO(32, 2*jesd_settings.lmfc_cycles) skew_fifo = ClockDomainsRenamer("jesd")(skew_fifo) skew_fifo = ResetInserter()(skew_fifo) skew_fifos.append(skew_fifo) self.submodules += skew_fifo self.comb += [ skew_fifo.reset.eq(~link.ready), skew_fifo.we.eq(1), skew_fifo.re.eq(self.ready), ] # connect data self.comb += [ phy.source.connect(cdc.sink), link.sink.data.eq(cdc.source.data), link.sink.ctrl.eq(cdc.source.ctrl), cdc.source.ready.eq(1), skew_fifo.din.eq(link.source.data), lane.eq(skew_fifo.dout) ] self.sync.jesd += [ self.jsync.eq(reduce(and_, [link.jsync for link in links])), self.ready.eq(reduce(and_, [link.ready for link in links])), ]
def __init__(self, dw, max_pending_requests, with_reordering=False): self.master_in = MasterInternalPort(dw) self.master_out = MasterInternalPort(dw) # # # req_sink, req_source = self.master_in.sink, self.master_out.sink cmp_sink, cmp_source = self.master_out.source, self.master_in.source tag_fifo = SyncFIFO(log2_int(max_pending_requests), max_pending_requests) self.submodules += tag_fifo info_mem = Memory(16, max_pending_requests) info_mem_wr_port = info_mem.get_port(write_capable=True) info_mem_rd_port = info_mem.get_port(async_read=False) self.specials += info_mem, info_mem_wr_port, info_mem_rd_port req_tag = Signal(max=max_pending_requests) self.sync += \ If(tag_fifo.re, req_tag.eq(tag_fifo.dout) ) # requests mgt req_fsm = FSM(reset_state="IDLE") self.submodules += req_fsm req_fsm.act( "IDLE", req_sink.ack.eq(0), If(req_sink.stb & req_sink.sop & ~req_sink.we & tag_fifo.readable, tag_fifo.re.eq(1), NextState("SEND_READ")).Elif( req_sink.stb & req_sink.sop & req_sink.we, NextState("SEND_WRITE"))) req_fsm.act( "SEND_READ", Record.connect(req_sink, req_source), req_sink.ack.eq(0), req_source.tag.eq(req_tag), If(req_source.stb & req_source.eop & req_source.ack, NextState("UPDATE_INFO_MEM"))) req_fsm.act( "SEND_WRITE", Record.connect(req_sink, req_source), req_source.tag.eq(32), If(req_source.stb & req_source.eop & req_source.ack, NextState("IDLE"))) req_fsm.act("UPDATE_INFO_MEM", info_mem_wr_port.we.eq(1), info_mem_wr_port.adr.eq(req_tag), info_mem_wr_port.dat_w[0:8].eq(req_sink.channel), info_mem_wr_port.dat_w[8:16].eq(req_sink.user_id), req_sink.ack.eq(1), NextState("IDLE")) # completions mgt if with_reordering: self.submodules.reordering = Reordering(dw, max_pending_requests) self.comb += [ self.reordering.req_we.eq(info_mem_wr_port.we), self.reordering.req_tag.eq(info_mem_wr_port.adr), Record.connect(self.reordering.source, cmp_source) ] cmp_source = self.reordering.sink cmp_fsm = FSM(reset_state="INIT") self.submodules += cmp_fsm tag_cnt = Signal(max=max_pending_requests) inc_tag_cnt = Signal() self.sync += \ If(inc_tag_cnt, tag_cnt.eq(tag_cnt+1) ) cmp_fsm.act( "INIT", inc_tag_cnt.eq(1), tag_fifo.we.eq(1), tag_fifo.din.eq(tag_cnt), If(tag_cnt == (max_pending_requests - 1), NextState("IDLE"))) cmp_fsm.act( "IDLE", cmp_sink.ack.eq(1), info_mem_rd_port.adr.eq(cmp_sink.tag), If( cmp_sink.stb & cmp_sink.sop, cmp_sink.ack.eq(0), NextState("COPY"), )) cmp_fsm.act( "COPY", info_mem_rd_port.adr.eq(cmp_sink.tag), If( cmp_sink.stb & cmp_sink.eop & cmp_sink.last, cmp_sink.ack.eq(0), NextState("UPDATE_TAG_FIFO"), ).Else( Record.connect(cmp_sink, cmp_source), If(cmp_sink.stb & cmp_sink.eop & cmp_sink.ack, NextState("IDLE"))), cmp_source.channel.eq(info_mem_rd_port.dat_r[0:8]), cmp_source.user_id.eq(info_mem_rd_port.dat_r[8:16]), ) cmp_fsm.act( "UPDATE_TAG_FIFO", tag_fifo.we.eq(1), tag_fifo.din.eq(cmp_sink.tag), info_mem_rd_port.adr.eq(cmp_sink.tag), Record.connect(cmp_sink, cmp_source), If(cmp_sink.stb & cmp_sink.ack, NextState("IDLE")), cmp_source.channel.eq(info_mem_rd_port.dat_r[0:8]), cmp_source.user_id.eq(info_mem_rd_port.dat_r[8:16]), )
def __init__(self, hostif, host_burst_length = 16): width = flen(hostif.d_write) assert width == 16 awidth = flen(hostif.i_addr) + 1 self.source = Source([('d', 8), ('last', 1)]) go = Signal() gor = Signal() rptr = Signal(awidth) self.rptr = rptr rptr_w = Signal(awidth) rptr_we = Signal() self.wptr = Signal(awidth) # CSRs ## self._debug_i_stb = description.CSRStatus(32) self._debug_i_ack = description.CSRStatus(32) self._debug_d_stb = description.CSRStatus(32) self._debug_d_term = description.CSRStatus(32) self._debug_s0 = description.CSRStatus(32) self._debug_s1 = description.CSRStatus(32) self._debug_s2 = description.CSRStatus(32) self.submodules.i_stb_acc = Acc_inc(32) self.submodules.i_ack_acc = Acc_inc(32) self.submodules.d_stb_acc = Acc_inc(32) self.submodules.d_term_acc = Acc_inc(32) self.comb += self._debug_i_stb.status.eq(self.i_stb_acc.v) self.comb += self._debug_i_ack.status.eq(self.i_ack_acc.v) self.comb += self._debug_d_stb.status.eq(self.d_stb_acc.v) self.comb += self._debug_d_term.status.eq(self.d_term_acc.v) self.comb += If(hostif.i_stb, self.i_stb_acc.inc()) self.comb += If(hostif.i_ack, self.i_ack_acc.inc()) self.comb += If(hostif.d_stb, self.d_stb_acc.inc()) self.comb += If(hostif.d_term, self.d_term_acc.inc()) self.submodules.s0_acc = Acc_inc(32) self.submodules.s1_acc = Acc_inc(32) self.submodules.s2_acc = Acc_inc(32) self.comb += self._debug_s0.status.eq(self.s0_acc.v) self.comb += self._debug_s1.status.eq(self.s1_acc.v) self.comb += self._debug_s2.status.eq(self.s2_acc.v) ## self._ring_base = description.CSRStorage(awidth) self._ring_end = description.CSRStorage(awidth) # rptr readback self._rptr_status = description.CSRStatus(awidth) self.comb += self._rptr_status.status.eq(rptr) # 'go' bit self._go = description.CSRStorage(1) self.comb += go.eq(self._go.storage[0]) self.sync += gor.eq(go) # state machine to read self.submodules.sdram_read_fsm = FSM() sdram_fifo = SyncFIFO(width, host_burst_length) self.submodules += sdram_fifo # we always read (never write) self.comb += hostif.i_wr.eq(0) # blocked blocked = Signal() self.comb += blocked.eq(rptr == self.wptr) # wait until there's data and go, and then when the fifo has space, issue request. self.sdram_read_fsm.act("BLOCKED", self.s2_acc.inc(), If(go & ~blocked, NextState("IDLE")) ) self.sdram_read_fsm.act("IDLE", self.s0_acc.inc(), hostif.i_addr.eq(rptr), hostif.i_stb.eq(sdram_fifo.writable), If (hostif.i_stb & hostif.i_ack, NextState("DATA") ) ) # read until fifo is full; when fifo is not writable but data was received, # abort SDRAM read request. wrap = Signal() self.comb += wrap.eq(self.rptr == self._ring_end.storage) self.sdram_read_fsm.act("DATA", self.s1_acc.inc(), hostif.d_term.eq(~sdram_fifo.writable | ~go | blocked | wrap), If (hostif.d_term, If (hostif.d_stb, NextState("BLOCKED") ).Else( NextState("WAIT") ) ) ) self.sdram_read_fsm.act("WAIT", hostif.d_term.eq(1), If (hostif.d_stb, NextState("BLOCKED") ) ) # allow rptr to be updated via CSR. Otherwise, # increment read point whenever valid data is fed into the fifo. rptr_next = Signal(awidth) self.comb += If(wrap, rptr_next.eq(self._ring_base.storage)).Else(rptr_next.eq(self.rptr + 1)) self.sync += \ If(go &~ gor, rptr.eq(self._ring_base.storage), ).Elif(hostif.d_stb &~hostif.d_term | wrap, rptr.eq(rptr_next)) self.comb += sdram_fifo.we.eq(hostif.d_stb &~ hostif.d_term) self.comb += sdram_fifo.din.eq(hostif.d_read) # fifo to host interface self.submodules.host_write_fsm = FSM() burst_rem = Signal(max = host_burst_length) burst_rem_next = Signal(max = host_burst_length) self.comb += burst_rem_next.eq(burst_rem) self.sync += burst_rem.eq(burst_rem_next) # when the sdram_fifo is not anymore writable, start bursting out that data. self.host_write_fsm.act("IDLE", self.source.payload.d.eq(0xD0), self.source.stb.eq(sdram_fifo.readable &~ sdram_fifo.writable), If(self.source.ack & self.source.stb, NextState("SEND_HEADER") ) ) self.host_write_fsm.act("SEND_HEADER", self.source.payload.d.eq(host_burst_length - 1), self.source.stb.eq(1), If(self.source.ack & self.source.stb, burst_rem_next.eq(host_burst_length - 1), NextState("SEND_DATA_ODD") ) ) # when byte available, write low byte until ack'ed. self.host_write_fsm.act("SEND_DATA_ODD", self.source.payload.d.eq(sdram_fifo.dout[0:8]), self.source.stb.eq(sdram_fifo.readable), If (self.source.stb & self.source.ack, NextState("SEND_DATA_EVEN") ) ) # write high byte. when ack'ed, read next byte, unless we hit the burst length limit. self.host_write_fsm.act("SEND_DATA_EVEN", self.source.payload.d.eq(sdram_fifo.dout[8:16]), self.source.payload.last.eq(burst_rem == 0), self.source.stb.eq(1), sdram_fifo.re.eq(self.source.ack), If (self.source.ack, If (burst_rem != 0, NextState("SEND_DATA_ODD"), burst_rem_next.eq(burst_rem - 1) ).Else( NextState("IDLE") ) ) )
def __init__(self, pads): N_PIXELS = 8 self.submodules.fifo = SyncFIFO(24, N_PIXELS) self.submodules.controller = WS2812Controller(pads, self.fifo, 12000000)