def elaborate(self, platform): m = Module() sync = m.d.sync comb = m.d.comb fifo = [ SyncFIFOBuffered(width=self.input_w, depth=self.row_length + 4) for _ in range(self.N) ] fifo_r_rdy = [Signal() for _ in range(self.N)] fifo_r_valid = [Signal() for _ in range(self.N)] w_en = [Signal() for _ in range(self.N - 1)] for n in range(self.N): m.submodules['fifo_' + str(n)] = fifo[n] comb += [ fifo_r_rdy[n].eq((fifo[n].level < self.row_length) | self.output.accepted()), ] # first fifo comb += [ self.input.ready.eq(fifo[0].w_rdy), fifo[0].w_en.eq(self.input.accepted()), fifo[0].w_data.eq(self.input.data), ] for n in range(self.N - 1): comb += [ fifo_r_valid[n].eq((fifo[n + 1].level == self.row_length) & (fifo[n].r_rdy)), fifo[n].r_en.eq((self.output.accepted() | ~fifo_r_valid[n])), fifo[n + 1].w_en.eq(fifo[n].r_rdy & fifo[n].r_en), fifo[n + 1].w_data.eq(fifo[n].r_data), ] # last fifo n = self.N - 1 comb += [ fifo_r_valid[n].eq(fifo[n].r_rdy), fifo[n].r_en.eq(self.output.accepted()), ] # output comb += [ self.output.valid.eq(_and(fifo_r_valid)), ] for n in range(self.N): if self.invert: comb += self.output.dataport.matrix[n].eq(fifo[n].r_data) else: comb += self.output.dataport.matrix[n].eq(fifo[self.N - 1 - n].r_data) return m
def __init__(self, match_d=256, event_d=512, prg_d=1024, reg_d=256): # smaller depths simulate faster self.match_fifo = SyncFIFOBuffered(width=72, depth=match_d) self.event_fifo = SyncFIFOBuffered(width=32, depth=event_d) self.ev = Eventuator() self.prg_mem = Memory(width=isa.INSN_WIDTH, depth=prg_d, init=[0]) self.reg_mem = Memory(width=isa.DATA_WIDTH, depth=reg_d) # write match into fifo self.i_match_info = make_match_info() self.i_match_we = Signal() self.o_match_space = Signal() # read event from fifo self.o_event = Signal(32) self.o_event_valid = Signal() self.i_event_re = Signal() self.o_clk = Signal()
def _make_output_queue(self, m): m.submodules['FIFO'] = fifo = SyncFIFOBuffered( depth=config.OUTPUT_QUEUE_DEPTH, width=32) m.submodules['oq_get'] = oq_get = NextWordGetter() m.d.comb += [ oq_get.data.eq(fifo.r_data), oq_get.ready.eq(fifo.r_rdy), fifo.r_en.eq(oq_get.next), ] self.register_xetter(34, oq_get) oq_has_space = fifo.w_level < (config.OUTPUT_QUEUE_DEPTH - 8) return fifo.w_data, fifo.w_en, oq_has_space
def elaborate(self, platform): m = Module() # Fifo m.submodules.fifo = fifo = SyncFIFOBuffered(width = 16, depth = self.fifo_depth) # Extract pixel from fifo data m.d.comb += [ self.o.r.eq(fifo.r_data[11:]), self.o.g.eq(fifo.r_data[5:11]), self.o.b.eq(fifo.r_data[:5]) ] # Connect the fifo m.d.comb += [ fifo.w_en.eq(self.i_valid & self.i_en), fifo.w_data.eq(self.i.as_data()), fifo.r_en.eq(fifo.r_rdy & self.i_ready) ] # Set output valid, when fifo has data m.d.comb += self.o_valid.eq(fifo.r_rdy) # Set ready when fifo is not full m.d.comb += self.o_ready.eq(fifo.w_rdy) # Calculate screen co-ordinates m.d.sync += self.o_eof.eq(0) r_en = Signal() m.d.sync += r_en.eq(self.i_en) with m.If(self.i_en & ~r_en): m.d.sync += [ self.o_x.eq(0), self.o_y.eq(0), self.o_eof.eq(0) ] with m.Elif(fifo.r_en): m.d.sync += self.o_x.eq(self.o_x + 1) with m.If(self.o_x == self.x_res - 1): m.d.sync += [ self.o_x.eq(0), self.o_y.eq(self.o_y + 1) ] with m.If(self.o_y == self.y_res - 1): m.d.sync += [ self.o_y.eq(0), self.o_eof.eq(1) ] return m
def __init__(self, divisor, rx_fifo_depth=512): # 512x8 = 1 BRAM self.divisor = divisor # boneless bus inputs self.i_re = Signal() self.i_we = Signal() self.i_addr = Signal(4) self.o_rdata = Signal(16) self.i_wdata = Signal(16) # UART signals self.i_rx = Signal() self.o_tx = Signal(reset=1) # inverted, like usual self.rx_fifo = SyncFIFOBuffered(width=8, depth=rx_fifo_depth)
def __init__(self, raw_lane: PCIeSERDESInterface, decoded_lane: PCIeScrambler, fifo_depth=256): assert raw_lane.ratio == 2 self.raw_lane = raw_lane self.decoded_lane = decoded_lane self.ts = Record(ts_layout) self.vlink = Signal(8) self.vlane = Signal(5) self.consecutive = Signal() self.inverted = Signal() self.ready = Signal() self.fifo = DomainRenamer("rx")(SyncFIFOBuffered(width=18, depth=fifo_depth))
def __init__(self, fifo_depth=16): # boneless bus inputs. we only have two registers. self.i_re = Signal() self.i_we = Signal() self.i_addr = Signal(1) self.o_rdata = Signal(16) self.i_wdata = Signal(16) # SPI signals self.o_clk = Signal() self.o_cs = Signal() # inverted, like usual self.o_mosi = Signal() self.i_miso = Signal() self.fifo = SyncFIFOBuffered(width=8, depth=fifo_depth)
def __init__(self): # SNES bus signals self.i_bus_valid = Signal() self.i_bus_addr = Signal(24) self.i_bus_data = Signal(8) self.i_cycle_count = Signal(32) # config bus signals self.i_config = Signal(8) self.i_config_addr = Signal(10) self.i_config_we = Signal() self.o_match_info = make_match_info() self.o_match_valid = Signal() self.i_match_re = Signal() self.i_reset_match_fifo = Signal() self.match_fifo = SyncFIFOBuffered(width=72, depth=256)
def __init__(self, cart_signals): self.i_prg_insn = Signal(isa.INSN_WIDTH) # program instruction self.i_prg_addr = Signal(isa.PC_WIDTH) # what address to write to self.i_prg_we = Signal() # write instruction to the address # connection to the event FIFO self.o_event = Signal(32) self.o_event_valid = Signal() self.i_event_re = Signal() # acknowledge the data # version constant output for the get version command self.o_gateware_version = Const(GATEWARE_VERSION, 32) self.event_fifo = SyncFIFOBuffered(width=32, depth=512) self.bus = SNESBus(cart_signals) self.match_engine = MatchEngine() self.eventuator = Eventuator() self.ev_prg_mem = Memory(width=isa.INSN_WIDTH, depth=1024, init=[0]) self.ev_reg_mem = Memory(width=isa.DATA_WIDTH, depth=256)
def __init__(self, config, constraints): #config assertions assert config['bit_depth'] >= 2 and config['bit_depth'] <= 16 assert config['pixels_per_cycle'] >= 1 assert config['LJ92_fifo_depth'] >= 25 #data width single_data_bits = min(16 + config['bit_depth'], 31) self.data_bits = single_data_bits * config['pixels_per_cycle'] self.ctr_bits = ceil(log(self.data_bits + 1, 2)) self.total_width = self.data_bits + self.ctr_bits + 1 self.depth = config['LJ92_fifo_depth'] # lj92 pipeline ports self.enc_in = Signal(self.data_bits) self.enc_in_ctr = Signal(max=self.data_bits + 1) self.in_end = Signal(1) self.valid_in = Signal(1) self.latch_output = Signal(1) self.enc_out = Signal(self.data_bits) self.enc_out_ctr = Signal(max=self.data_bits + 1) self.out_end = Signal(1) self.valid_out = Signal(1) # port to indicate it is full and no more # input should be inserted in the lj92 pipeline self.close_full = Signal(1) self.ios = \ [self.enc_in, self.enc_in_ctr, self.in_end, self.valid_in] + \ [self.enc_out, self.enc_out_ctr, self.out_end, self.valid_out] + \ [self.latch_output, self.close_full] # 4x dual port bram with 36kb each. self.fifo = SyncFIFOBuffered(self.total_width, self.depth)
def __init__(self, config, constraints): #config assertions assert config['converter'] >= 1 assert config['bit_depth'] >= 2 and config['bit_depth'] <= 16 assert config['pixels_per_cycle'] >= 1 #how many steps in converter single_ctr = min(16 + config['bit_depth'], 31) total_ctr = single_ctr * config['pixels_per_cycle'] self.steps = ceil(total_ctr / config['converter']) + 3 assert config['converter_fifo_depth'] > (self.steps + 3) #save some data self.data_width = config['converter'] self.ctr_bits = ceil(log(config['converter'] + 1, 2)) self.total_bits = self.data_width + self.ctr_bits + 1 self.depth = config['converter_fifo_depth'] self.enc_in = Signal(self.data_width) self.enc_in_ctr = Signal(self.ctr_bits) self.in_end = Signal(1) self.valid_in = Signal(1) self.writable = Signal(1) self.close_full = Signal(1) self.latch_output = Signal(1) self.enc_out = Signal(self.data_width) self.enc_out_ctr = Signal(self.ctr_bits) self.out_end = Signal(1) self.valid_out = Signal(1) self.ios = \ [self.enc_in, self.enc_in_ctr, self.in_end, self.valid_in] + \ [self.enc_out, self.enc_out_ctr, self.out_end, self.valid_out] + \ [self.latch_output, self.writable, self.close_full] self.fifo = SyncFIFOBuffered(self.total_bits, self.depth)
def elaborate(self, platform: Platform) -> Module: m = Module() m.submodules.serdes = serdes = self.__serdes m.submodules += self.lane m.d.comb += serdes.lane.speed.eq(self.lane.speed) m.d.comb += serdes.lane.reset.eq(self.lane.reset) data_width = len(serdes.lane.rx_symbol) m.domains.rxf = ClockDomain() m.domains.txf = ClockDomain() m.d.comb += [ #ClockSignal("sync").eq(serdes.refclk), ClockSignal("rxf").eq(serdes.rx_clk), ClockSignal("txf").eq(serdes.tx_clk), ] platform.add_clock_constraint( self.rx_clk, 125e6 if self.speed_5GTps else 625e5 ) # For NextPNR, set the maximum clock frequency such that errors are given platform.add_clock_constraint(self.tx_clk, 125e6 if self.speed_5GTps else 625e5) m.submodules.lane = lane = PCIeSERDESInterface(4) # IF SOMETHING IS BROKE: Check if the TX actually transmits good data and not order-swapped data m.d.rxf += self.rx_clk.eq(~self.rx_clk) with m.If(~self.rx_clk): m.d.rxf += lane.rx_symbol[data_width:data_width * 2].eq( serdes.lane.rx_symbol) m.d.rxf += lane.rx_valid[serdes.gearing:serdes.gearing * 2].eq( serdes.lane.rx_valid) with m.Else(): m.d.rxf += lane.rx_symbol[0:data_width].eq(serdes.lane.rx_symbol) m.d.rxf += lane.rx_valid[0:serdes.gearing].eq(serdes.lane.rx_valid) # To ensure that it outputs consistent data # m.d.rxf += self.lane.rx_symbol.eq(lane.rx_symbol) # m.d.rxf += self.lane.rx_valid.eq(lane.rx_valid) m.d.txf += self.tx_clk.eq(~self.tx_clk) m.d.txf += serdes.lane.tx_symbol.eq( Mux(self.tx_clk, lane.tx_symbol[data_width:data_width * 2], lane.tx_symbol[0:data_width])) m.d.txf += serdes.lane.tx_disp.eq( Mux(self.tx_clk, lane.tx_disp[serdes.gearing:serdes.gearing * 2], lane.tx_disp[0:serdes.gearing])) m.d.txf += serdes.lane.tx_set_disp.eq( Mux(self.tx_clk, lane.tx_set_disp[serdes.gearing:serdes.gearing * 2], lane.tx_set_disp[0:serdes.gearing])) m.d.txf += serdes.lane.tx_e_idle.eq( Mux(self.tx_clk, lane.tx_e_idle[serdes.gearing:serdes.gearing * 2], lane.tx_e_idle[0:serdes.gearing])) # CDC # TODO: Keep the SyncFIFO? Its faster but is it reliable? #rx_fifo = m.submodules.rx_fifo = AsyncFIFOBuffered(width=(data_width + serdes.gearing) * 2, depth=4, r_domain="rx", w_domain="rxf") rx_fifo = m.submodules.rx_fifo = DomainRenamer("rxf")(SyncFIFOBuffered( width=(data_width + serdes.gearing) * 2, depth=4)) m.d.rxf += rx_fifo.w_data.eq(Cat(lane.rx_symbol, lane.rx_valid)) m.d.comb += Cat(self.lane.rx_symbol, self.lane.rx_valid).eq(rx_fifo.r_data) m.d.comb += rx_fifo.r_en.eq(1) m.d.rxf += rx_fifo.w_en.eq(self.rx_clk) #tx_fifo = m.submodules.tx_fifo = AsyncFIFOBuffered(width=(data_width + serdes.gearing * 3) * 2, depth=4, r_domain="txf", w_domain="tx") tx_fifo = m.submodules.tx_fifo = DomainRenamer("txf")(SyncFIFOBuffered( width=(data_width + serdes.gearing * 3) * 2, depth=4)) m.d.comb += tx_fifo.w_data.eq( Cat(self.lane.tx_symbol, self.lane.tx_set_disp, self.lane.tx_disp, self.lane.tx_e_idle)) m.d.txf += Cat(lane.tx_symbol, lane.tx_set_disp, lane.tx_disp, lane.tx_e_idle).eq(tx_fifo.r_data) m.d.txf += tx_fifo.r_en.eq(self.tx_clk) m.d.comb += tx_fifo.w_en.eq(1) #m.d.txf += Cat(lane.tx_symbol, lane.tx_set_disp, lane.tx_disp, lane.tx_e_idle).eq(Cat(self.lane.tx_symbol, self.lane.tx_set_disp, self.lane.tx_disp, self.lane.tx_e_idle)) serdes.lane.rx_invert = self.lane.rx_invert serdes.lane.rx_align = self.lane.rx_align serdes.lane.rx_aligned = self.lane.rx_aligned serdes.lane.rx_locked = self.lane.rx_locked serdes.lane.rx_present = self.lane.rx_present serdes.lane.det_enable = self.lane.det_enable serdes.lane.det_valid = self.lane.det_valid serdes.lane.det_status = self.lane.det_status serdes.slip = self.slip return m
def elaborate(self, platform): sink = self.sink source = self.source m = Module() # TODO figure out a _useful_ stream abstraction that can handle this use case m.submodules.fifo = fifo = SyncFIFOBuffered(width=8, depth=self._mtu) # input side m.d.comb += self.sink.connect(self._input) counter = Signal(16) active = Signal() m.submodules.counter_fifo = counter_fifo = SyncFIFOBuffered( width=16, depth=self._in_flight) m.submodules.checksum_fifo = checksum_fifo = SyncFIFOBuffered( width=16, depth=self._in_flight) # gotta stall if _either_ FIFO is full, means sink can't be exactly fifo's sink m.d.comb += sink.ready.eq(fifo.w_rdy & counter_fifo.w_rdy) we = Signal() m.d.comb += we.eq(sink.valid & sink.ready) udp_checksum = Signal(16) m.d.comb += fifo.w_data.eq(self.sink.data) m.d.comb += fifo.w_en.eq(we) with m.If(we): with m.If(self._input.sop): m.d.sync += counter.eq(1) m.d.sync += udp_checksum.eq(self._partial_udp_checksum() + sink.data) m.d.sync += active.eq(1) with m.If(active): m.d.sync += udp_checksum.eq(udp_checksum + sink.data) m.d.sync += counter.eq(counter + 1) with m.If(self._input.eop): # write counter + 1 and checksum to FIFOs, become inactive m.d.comb += counter_fifo.w_data.eq(counter + 1) m.d.comb += counter_fifo.w_en.eq(1) m.d.comb += checksum_fifo.w_data.eq(udp_checksum + sink.data) m.d.comb += checksum_fifo.w_en.eq(1) m.d.sync += active.eq(0) with m.Else(): m.d.comb += counter_fifo.w_en.eq(0) m.d.comb += checksum_fifo.w_en.eq(0) # output side output_active = Signal() m.d.comb += source.valid.eq(counter_fifo.r_rdy | output_active) re = Signal() m.d.comb += re.eq(source.valid & source.ready) header_idx = Signal(range(4), reset=0) pkt_len = Signal(16) ip_checksum = Signal(16) udp_checksum_out = Signal(16) # normally don't advance these (ticked below in FSM) m.d.comb += fifo.r_en.eq(0) m.d.sync += counter_fifo.r_en.eq(0) m.d.sync += checksum_fifo.r_en.eq(0) # output FSM with m.FSM() as fsm: with m.State("INIT"): # send first header byte m.d.comb += source.data.eq(Cat(IHL, IP_VERSION)) with m.If(re): # set SOP m.d.comb += self.source.sop.eq(1) # mark output active m.d.sync += output_active.eq(1) # latch out counter value m.d.sync += counter_fifo.r_en.eq(1) m.d.sync += pkt_len.eq(counter_fifo.r_data) # advance checksum FIFO m.d.sync += checksum_fifo.r_en.eq(1) # calculate full checksums from counter value checksum_intermediate = self._partial_ip_checksum( ) + counter_fifo.r_data + 28 checksum_intermediate = checksum_intermediate[: 16] + checksum_intermediate[ 16:] checksum_intermediate = checksum_intermediate[: 16] + checksum_intermediate[ 16:] m.d.sync += ip_checksum.eq(checksum_intermediate[:16]) checksum_intermediate = checksum_fifo.r_data + counter_fifo.r_data + 8 checksum_intermediate = checksum_intermediate[: 16] + checksum_intermediate[ 16:] checksum_intermediate = checksum_intermediate[: 16] + checksum_intermediate[ 16:] m.d.sync += udp_checksum_out.eq(checksum_intermediate[:16]) # advance state m.next = "IP_HEADER_BYTE2" with m.State("IP_HEADER_BYTE2"): # send second header byte m.d.comb += source.data.eq(Cat(ECN, DSCP)) with m.If(re): # set index for Length m.d.sync += header_idx.eq(1) # advance state m.next = "IP_LENGTH" with m.State("IP_LENGTH"): # send current length byte m.d.comb += source.data.eq( (pkt_len + 28).word_select(header_idx, 8)) with m.If(re): # decrement index m.d.sync += header_idx.eq(header_idx - 1) with m.If(header_idx == 0): # set index for ID m.d.sync += header_idx.eq(1) # advance state m.next = "ID" with m.State("ID"): # send current id byte m.d.comb += source.data.eq(ID.word_select(header_idx, 8)) with m.If(re): # decrement index m.d.sync += header_idx.eq(header_idx - 1) with m.If(header_idx == 0): # advance state m.next = "FLAGS" with m.State("FLAGS"): # send flags m.d.comb += source.data.eq(Cat(FO[8:], FLAGS)) with m.If(re): # advance state m.next = "FO" with m.State("FO"): # send fragment offset low bits m.d.comb += source.data.eq(FO[:8]) with m.If(re): # advance state m.next = "TTL" with m.State("TTL"): # send time-to-live m.d.comb += source.data.eq(TTL) with m.If(re): # advance state m.next = "PROTOCOL" with m.State("PROTOCOL"): # send protocol number m.d.comb += source.data.eq(self._proto) with m.If(re): # set index for checksum m.d.sync += header_idx.eq(1) # advance state m.next = "IP_CHECKSUM" with m.State("IP_CHECKSUM"): # send current checksum byte m.d.comb += source.data.eq( ~ip_checksum.word_select(header_idx, 8)) with m.If(re): # decrement index m.d.sync += header_idx.eq(header_idx - 1) with m.If(header_idx == 0): # set index for source address m.d.sync += header_idx.eq(3) # advance state m.next = "ADDR_SOURCE" with m.State("ADDR_SOURCE"): # send current source address byte m.d.comb += source.data.eq( self._source_ip.word_select(header_idx, 8)) with m.If(re): # decrement index m.d.sync += header_idx.eq(header_idx - 1) with m.If(header_idx == 0): # set index for dest address m.d.sync += header_idx.eq(3) # advance state m.next = "ADDR_DEST" with m.State("ADDR_DEST"): # send current destination address byte m.d.comb += source.data.eq( self._dest_ip.word_select(header_idx, 8)) with m.If(re): # decrement index m.d.sync += header_idx.eq(header_idx - 1) with m.If(header_idx == 0): # set index for source port m.d.sync += header_idx.eq(1) # advance state m.next = "PORT_SOURCE" with m.State("PORT_SOURCE"): # send current source port byte m.d.comb += source.data.eq( self._source_port.word_select(header_idx, 8)) with m.If(re): # decrement index m.d.sync += header_idx.eq(header_idx - 1) with m.If(header_idx == 0): # set index for dest port m.d.sync += header_idx.eq(1) # advance state m.next = "PORT_DEST" with m.State("PORT_DEST"): # send current dest port byte m.d.comb += source.data.eq( self._dest_port.word_select(header_idx, 8)) with m.If(re): # decrement index m.d.sync += header_idx.eq(header_idx - 1) with m.If(header_idx == 0): # set index for UDP length m.d.sync += header_idx.eq(1) # advance state m.next = "UDP_LENGTH" with m.State("UDP_LENGTH"): # send current length byte m.d.comb += source.data.eq( (pkt_len + 8).word_select(header_idx, 8)) with m.If(re): # decrement index m.d.sync += header_idx.eq(header_idx - 1) with m.If(header_idx == 0): # set index for UDP checksum m.d.sync += header_idx.eq(1) # advance state m.next = "UDP_CHECKSUM" with m.State("UDP_CHECKSUM"): # send current checksum byte m.d.comb += source.data.eq( ~udp_checksum.word_select(header_idx, 8)) with m.If(re): # decrement index m.d.sync += header_idx.eq(header_idx - 1) with m.If(header_idx == 0): # advance state m.next = "PAYLOAD" with m.State("PAYLOAD"): # send current payload byte m.d.comb += source.data.eq(fifo.r_data) with m.If(re): # advance FIFO m.d.comb += fifo.r_en.eq(1) # decrement length m.d.sync += pkt_len.eq(pkt_len - 1) with m.If(pkt_len - 1 == 0): # set EOP m.d.comb += self.source.eop.eq(1) # mark output inactive m.d.sync += output_active.eq(0) # packet complete m.next = "INIT" return m
def elaborate(self, platform): sink = self.sink source = self.source m = Module() m.submodules.fifo = fifo = SyncFIFOBuffered(width=8, depth=self._mtu) m.submodules.counter_fifo = counter_fifo = SyncFIFOBuffered( width=16, depth=self._in_flight) counter = Signal(16) input_counter = Signal(16) we = Signal() m.d.comb += we.eq(sink.valid & sink.ready) m.d.comb += fifo.w_data.eq(sink.data) m.d.comb += fifo.w_en.eq(0) m.d.comb += counter_fifo.w_data.eq(input_counter) m.d.comb += counter_fifo.w_en.eq(0) input_active = Signal() m.d.comb += self.sink.connect(self._input) m.d.comb += sink.ready.eq((fifo.level == 0) | input_active) # input FSM with m.FSM(name='input_fsm') as fsm: with m.State("IDLE"): with m.If(we): with m.If(self._input.sop): m.d.sync += input_active.eq(1) with m.If(sink.data == Cat(IHL, IP_VERSION)): # possible legal IPv4 packet, advance m.next = "HEADER_BYTE1" with m.State("HEADER_BYTE1"): with m.If(we): with m.If(sink.data == Cat(ECN, DSCP)): m.next = "HEADER_BYTE2" with m.Else(): # error - return to IDLE m.d.sync += input_active.eq(0) m.next = "IDLE" with m.State("HEADER_BYTE2"): with m.If(we): m.d.sync += counter[8:].eq(sink.data) m.next = "HEADER_BYTE3" with m.State("HEADER_BYTE3"): with m.If(we): m.d.sync += counter[:8].eq(sink.data) m.next = "HEADER_BYTE4" with m.State("HEADER_BYTE4"): with m.If(we): # ignore Identification field #with m.If(sink.data == ID[8:]): m.next = "HEADER_BYTE5" #with m.Else(): # m.d.sync += input_active.eq(0) # m.next = "IDLE" with m.State("HEADER_BYTE5"): with m.If(we): # ignore Identification field #with m.If(sink.data == ID[:8]): m.next = "HEADER_BYTE6" #with m.Else(): # m.d.sync += input_active.eq(0) # m.next = "IDLE" with m.State("HEADER_BYTE6"): with m.If(we): with m.If(sink.data == Cat(FO[8:], FLAGS)): m.next = "HEADER_BYTE7" with m.Else(): m.d.sync += input_active.eq(0) m.next = "IDLE" with m.State("HEADER_BYTE7"): with m.If(we): with m.If(sink.data == FO[:8]): m.next = "HEADER_BYTE8" with m.Else(): m.d.sync += input_active.eq(0) m.next = "IDLE" with m.State("HEADER_BYTE8"): with m.If(we): # TTL is ignored m.next = "HEADER_BYTE9" with m.State("HEADER_BYTE9"): with m.If(we): with m.If(sink.data == IPProtocolNumber.UDP.value): m.next = "HEADER_BYTE10" with m.Else(): m.d.sync += input_active.eq(0) m.next = "IDLE" with m.State("HEADER_BYTE10"): with m.If(we): # TODO validate checksum m.next = "HEADER_BYTE11" with m.State("HEADER_BYTE11"): with m.If(we): # TODO validate checksum m.next = "HEADER_BYTE12" with m.State("HEADER_BYTE12"): with m.If(we): # source IP is ignored m.next = "HEADER_BYTE13" with m.State("HEADER_BYTE13"): with m.If(we): # source IP is ignored m.next = "HEADER_BYTE14" with m.State("HEADER_BYTE14"): with m.If(we): # source IP is ignored m.next = "HEADER_BYTE15" with m.State("HEADER_BYTE15"): with m.If(we): # source IP is ignored m.next = "HEADER_BYTE16" with m.State("HEADER_BYTE16"): with m.If(we): with m.If(sink.data == self._ip[24:]): m.next = "HEADER_BYTE17" with m.Else(): m.d.sync += input_active.eq(0) m.next = "IDLE" with m.State("HEADER_BYTE17"): with m.If(we): with m.If(sink.data == self._ip[16:24]): m.next = "HEADER_BYTE18" with m.Else(): m.d.sync += input_active.eq(0) m.next = "IDLE" with m.State("HEADER_BYTE18"): with m.If(we): with m.If(sink.data == self._ip[8:16]): m.next = "HEADER_BYTE19" with m.Else(): m.d.sync += input_active.eq(0) m.next = "IDLE" with m.State("HEADER_BYTE19"): with m.If(we): with m.If(sink.data == self._ip[:8]): # assume no options m.next = "UDP_HEADER_BYTE0" with m.Else(): m.d.sync += input_active.eq(0) m.next = "IDLE" with m.State("UDP_HEADER_BYTE0"): with m.If(we): # source port is ignored m.next = "UDP_HEADER_BYTE1" with m.State("UDP_HEADER_BYTE1"): with m.If(we): # source port is ignored m.next = "UDP_HEADER_BYTE2" with m.State("UDP_HEADER_BYTE2"): with m.If(we): with m.If(sink.data == self._port[8:]): m.next = "UDP_HEADER_BYTE3" with m.Else(): m.d.sync += input_active.eq(0) m.next = "IDLE" with m.State("UDP_HEADER_BYTE3"): with m.If(we): with m.If(sink.data == self._port[:8]): m.next = "UDP_HEADER_BYTE4" with m.Else(): m.d.sync += input_active.eq(0) m.next = "IDLE" with m.State("UDP_HEADER_BYTE4"): with m.If(we): with m.If(sink.data == (counter - 20)[8:]): m.next = "UDP_HEADER_BYTE5" with m.Else(): # length mismatch between IP and UDP headers - fragmented? m.d.sync += input_active.eq(0) m.next = "IDLE" with m.State("UDP_HEADER_BYTE5"): with m.If(we): with m.If(sink.data == (counter - 20)[:8]): m.next = "UDP_HEADER_BYTE6" with m.Else(): m.d.sync += input_active.eq(0) m.next = "IDLE" with m.State("UDP_HEADER_BYTE6"): with m.If(we): # TODO validate checksum m.next = "UDP_HEADER_BYTE7" with m.State("UDP_HEADER_BYTE7"): with m.If(we): # TODO validate checksum m.d.sync += input_counter.eq(counter - 28) m.d.sync += counter.eq(counter - 20) m.next = "PAYLOAD" with m.State("PAYLOAD"): with m.If(we): # TODO handle a too-early EOP correctly (or at all) m.d.sync += counter.eq(counter - 1) m.d.comb += fifo.w_en.eq(1) with m.If(counter == 9): # latch counter and re-set m.d.comb += counter_fifo.w_en.eq(1) m.d.sync += input_active.eq(0) m.next = "IDLE" re = Signal() m.d.comb += re.eq(source.valid & source.ready) m.d.comb += fifo.r_en.eq(0) m.d.comb += counter_fifo.r_en.eq(0) output_counter = Signal(16) m.d.comb += source.data.eq(fifo.r_data) m.d.comb += source.valid.eq(counter_fifo.r_rdy) m.d.comb += source.sop.eq(0) m.d.comb += source.eop.eq(0) # output FSM with m.FSM(name='output_fsm') as fsm: with m.State("IDLE"): with m.If(re): m.d.comb += source.sop.eq(1) m.d.sync += output_counter.eq(counter_fifo.r_data - 1) m.d.comb += fifo.r_en.eq(1) m.next = "PAYLOAD" with m.State("PAYLOAD"): with m.If(re): m.d.sync += output_counter.eq(output_counter - 1) m.d.comb += fifo.r_en.eq(1) with m.If(output_counter == 1): m.d.comb += source.eop.eq(1) m.d.comb += counter_fifo.r_en.eq(1) m.next = "IDLE" return m
def elaborate(self, _: Optional[Platform]) -> Module: m = Module() cts_n = Signal(1) m.submodules.cts_sync = FFSynchronizer(i=self.uart.cts_n, o=cts_n, reset=1) # Frontend + FIFOs # TODO: This shouldn't need to go 8 deep, but things get flaky if I # reduce it down to 2 m.submodules.rx = rx = uart.Receive(self.baud_rate) m.submodules.rx_fifo = rx_fifo = SyncFIFOBuffered(width=8, depth=8) m.d.comb += [ rx.input.eq(self.uart.rx), rx_fifo.w_data.eq(rx.data), rx_fifo.w_en.eq(rx.done), # Masked internally by w_rdy self.uart.rts_n.eq(~rx_fifo.w_rdy), ] m.submodules.tx = tx = uart.Transmit(self.baud_rate) m.submodules.tx_fifo = tx_fifo = SyncFIFOBuffered(width=1, depth=8) m.d.comb += [ self.uart.tx.eq(tx.output), tx.data.eq(Mux(tx_fifo.r_data, ord('1'), ord('0'))), ] m.d.comb += tx.start.eq(0) # default m.d.comb += tx_fifo.r_en.eq(0) # default with m.FSM(name='tx', reset='IDLE'): with m.State('IDLE'): with m.If(~cts_n & tx_fifo.r_rdy): m.d.comb += tx.start.eq(1) m.d.comb += tx_fifo.r_en.eq(1) m.next = 'TRANSMITTING' with m.State('TRANSMITTING'): with m.If(tx.done): with m.If(~cts_n & tx_fifo.r_rdy): m.d.comb += tx.start.eq(1) m.d.comb += tx_fifo.r_en.eq(1) m.next = 'TRANSMITTING' with m.Else(): m.next = 'IDLE' # Process incoming commands stall_latch = Signal(1) m.d.comb += rx_fifo.r_en.eq(0) # default m.d.comb += tx_fifo.w_en.eq(0) # default with m.FSM(name='cmd', reset='IDLE'): with m.State('IDLE'): with m.If(rx_fifo.r_rdy): m.d.comb += rx_fifo.r_en.eq(1) tx_data = Signal(1) tx_ready = Signal(1, reset=0) with m.Switch(rx_fifo.r_data): wbits = Cat(self.jtag.tdi, self.jtag.tms, self.jtag.tck) rbits = Cat(self.jtag.srst, self.jtag.trst) with m.Case(ord('B')): m.d.sync += self.blink.eq(1) with m.Case(ord('b')): m.d.sync += self.blink.eq(0) with m.Case(ord('R')): m.d.comb += tx_data.eq(self.jtag.tdo) m.d.comb += tx_ready.eq(1) with m.Case(ord('Q')): pass with m.Case('0011 0---'): # ASCII '0' through '7' m.d.sync += wbits.eq(rx_fifo.r_data[:3]) for c in 'rstu': with m.Case(ord(c)): m.d.sync += rbits.eq(rx_fifo.r_data - ord('r')) with m.If(tx_ready): with m.If(tx_fifo.w_rdy): m.d.comb += tx_fifo.w_data.eq(tx_data) m.d.comb += tx_fifo.w_en.eq(1) m.next = 'IDLE' with m.Else(): m.d.sync += stall_latch.eq(tx_data) m.next = 'WRITE_STALL' with m.State('WRITE_STALL'): with m.If(tx_fifo.w_rdy): m.d.comb += tx_fifo.w_data.eq(stall_latch) m.d.comb += tx_fifo.w_en.eq(1) m.next = 'IDLE' return m
def elaborate(self, platform): m = Module() wbuffer_layout = [("addr", 32), ("data", 32), ("sel", 4)] wbuffer_din = Record(wbuffer_layout) wbuffer_dout = Record(wbuffer_layout) dcache = m.submodules.dcache = Cache(nlines=self.nlines, nwords=self.nwords, nways=self.nways, start_addr=self.start_addr, end_addr=self.end_addr, enable_write=True) arbiter = m.submodules.arbiter = Arbiter() wbuffer = m.submodules.wbuffer = SyncFIFOBuffered( width=len(wbuffer_din), depth=self.nwords) wbuffer_port = arbiter.add_port(priority=0) cache_port = arbiter.add_port(priority=1) bare_port = arbiter.add_port(priority=2) x_use_cache = Signal() m_use_cache = Signal() m_data_w = Signal(32) m_byte_sel = Signal(4) bits_range = log2_int(self.end_addr - self.start_addr, need_pow2=False) m.d.comb += x_use_cache.eq( (self.x_addr[bits_range:] == (self.start_addr >> bits_range))) with m.If(~self.x_stall): m.d.sync += [ m_use_cache.eq(x_use_cache), m_data_w.eq(self.x_data_w), m_byte_sel.eq(self.x_byte_sel) ] m.d.comb += arbiter.bus.connect(self.dport) # -------------------------------------------------- # write buffer IO m.d.comb += [ # input wbuffer.w_data.eq(wbuffer_din), wbuffer.w_en.eq(x_use_cache & self.x_store & self.x_valid & ~self.x_stall), wbuffer_din.addr.eq(self.x_addr), wbuffer_din.data.eq(self.x_data_w), wbuffer_din.sel.eq(self.x_byte_sel), # output wbuffer_dout.eq(wbuffer.r_data), ] # drive the arbiter port with m.If(wbuffer_port.cyc): with m.If(wbuffer_port.ack | wbuffer_port.err): m.d.comb += wbuffer.r_en.eq(1) m.d.sync += wbuffer_port.stb.eq(0) with m.If(wbuffer.level == 1): # Buffer is empty m.d.sync += [wbuffer_port.cyc.eq(0), wbuffer_port.we.eq(0)] with m.Elif(~wbuffer_port.stb): m.d.sync += [ wbuffer_port.stb.eq(1), wbuffer_port.addr.eq(wbuffer_dout.addr), wbuffer_port.dat_w.eq(wbuffer_dout.data), wbuffer_port.sel.eq(wbuffer_dout.sel) ] with m.Elif(wbuffer.r_rdy): m.d.sync += [ wbuffer_port.cyc.eq(1), wbuffer_port.stb.eq(1), wbuffer_port.we.eq(1), wbuffer_port.addr.eq(wbuffer_dout.addr), wbuffer_port.dat_w.eq(wbuffer_dout.data), wbuffer_port.sel.eq(wbuffer_dout.sel) ] m.d.comb += wbuffer.r_en.eq(0) m.d.comb += [ wbuffer_port.cti.eq(CycleType.CLASSIC), wbuffer_port.bte.eq(0) ] # -------------------------------------------------- # connect IO: cache m.d.comb += [ dcache.s1_address.eq(self.x_addr), dcache.s1_flush.eq(0), dcache.s1_valid.eq(self.x_valid), dcache.s1_stall.eq(self.x_stall), dcache.s2_address.eq(self.m_addr), dcache.s2_evict.eq(0), # Evict is not used. Remove maybe? dcache.s2_valid.eq(self.m_valid), dcache.s2_re.eq(self.m_load), dcache.s2_wdata.eq(m_data_w), dcache.s2_sel.eq(m_byte_sel), dcache.s2_we.eq(self.m_store) ] # connect cache to arbiter m.d.comb += [ cache_port.addr.eq(dcache.bus_addr), cache_port.dat_w.eq(0), cache_port.sel.eq(0), cache_port.we.eq(0), cache_port.cyc.eq(dcache.bus_valid), cache_port.stb.eq(dcache.bus_valid), cache_port.cti.eq( Mux(dcache.bus_last, CycleType.END, CycleType.INCREMENT)), cache_port.bte.eq(log2_int(self.nwords) - 1), dcache.bus_data.eq(cache_port.dat_r), dcache.bus_ack.eq(cache_port.ack), dcache.bus_err.eq(cache_port.err) ] # -------------------------------------------------- # bare port rdata = Signal.like(bare_port.dat_r) op = Signal() m.d.comb += op.eq(self.x_load | self.x_store) # transaction logic with m.If(bare_port.cyc): with m.If(bare_port.ack | bare_port.err | ~self.m_valid): m.d.sync += [ rdata.eq(bare_port.dat_r), bare_port.we.eq(0), bare_port.cyc.eq(0), bare_port.stb.eq(0) ] with m.Elif(op & self.x_valid & ~self.x_stall & ~x_use_cache): m.d.sync += [ bare_port.addr.eq(self.x_addr), bare_port.dat_w.eq(self.x_data_w), bare_port.sel.eq(self.x_byte_sel), bare_port.we.eq(self.x_store), bare_port.cyc.eq(1), bare_port.stb.eq(1) ] m.d.comb += [bare_port.cti.eq(CycleType.CLASSIC), bare_port.bte.eq(0)] # -------------------------------------------------- # extra logic with m.If(self.x_fence_i): m.d.comb += self.x_busy.eq(wbuffer.r_rdy) with m.Elif(x_use_cache): m.d.comb += self.x_busy.eq(self.x_store & ~wbuffer.w_rdy) with m.Else(): m.d.comb += self.x_busy.eq(bare_port.cyc) with m.If(m_use_cache): m.d.comb += [ self.m_busy.eq(dcache.s2_re & dcache.s2_miss), self.m_load_data.eq(dcache.s2_rdata) ] with m.Elif(self.m_load_error | self.m_store_error): m.d.comb += [self.m_busy.eq(0), self.m_load_data.eq(0)] with m.Else(): m.d.comb += [ self.m_busy.eq(bare_port.cyc), self.m_load_data.eq(rdata) ] # -------------------------------------------------- # exceptions with m.If(self.dport.cyc & self.dport.err): m.d.sync += [ self.m_load_error.eq(~self.dport.we), self.m_store_error.eq(self.dport.we), self.m_badaddr.eq(self.dport.addr) ] with m.Elif(~self.m_stall): m.d.sync += [self.m_load_error.eq(0), self.m_store_error.eq(0)] return m
def elaborate(self, platform): m = Module() m.submodules.bridge = self._bridge # Shortcuts to our components. interface = self.interface token = self.interface.tokenizer rx = self.interface.rx handshakes_out = self.interface.handshakes_out # Logic condition for getting a new setup packet. new_setup = token.new_token & token.is_setup # # Core FIFO. # m.submodules.fifo = fifo = ResetInserter(new_setup)(SyncFIFOBuffered(width=8, depth=10)) m.d.comb += [ # We'll write to the active FIFO whenever the last received token is a SETUP # token, and we have incoming data; and we'll always write the data received fifo.w_en .eq(token.is_setup & rx.valid & rx.next), fifo.w_data .eq(rx.payload), # We'll advance the FIFO whenever our CPU reads from the data CSR; # and we'll always read our data from the FIFO. fifo.r_en .eq(self.data.r_stb), self.data.r_data .eq(fifo.r_data), # Pass the FIFO status on to our CPU. self.have.r_data .eq(fifo.r_rdy), # Always acknowledge SETUP packets as they arrive. handshakes_out.ack .eq(token.is_setup & interface.rx_ready_for_response) ] # # Control registers # # Our address register always reads the current address of the device; # but will generate a m.d.comb += self._address.r_data.eq(interface.active_address) with m.If(self._address.w_stb): m.d.comb += [ interface.address_changed .eq(1), interface.new_address .eq(self._address.w_data), ] # # Status and interrupts. # with m.If(token.new_token): m.d.usb += self.epno.r_data.eq(token.endpoint) # TODO: generate interrupts return DomainRenamer({"sync": "usb"})(m)
def elaborate(self, platform): m = Module() m.submodules.bridge = self._bridge # Shortcuts to our components. token = self.interface.tokenizer tx = self.interface.tx handshakes_out = self.interface.handshakes_out # # Core FIFO. # # Create our FIFO; and set it to be cleared whenever the user requests. m.submodules.fifo = fifo = \ ResetInserter(self.reset.w_stb)(SyncFIFOBuffered(width=8, depth=self.max_packet_size)) m.d.comb += [ # Whenever the user DATA register is written to, add the relevant data to our FIFO. fifo.w_en .eq(self.data.w_stb), fifo.w_data .eq(self.data.w_data), ] # Keep track of the amount of data in our FIFO. bytes_in_fifo = Signal(range(0, self.max_packet_size + 1)) # If we're clearing the whole FIFO, reset our data count. with m.If(self.reset.w_stb): m.d.usb += bytes_in_fifo.eq(0) # Keep track of our FIFO's data count as data is added or removed. increment = fifo.w_en & fifo.w_rdy decrement = fifo.r_en & fifo.r_rdy with m.Elif(increment & ~decrement): m.d.usb += bytes_in_fifo.eq(bytes_in_fifo + 1) with m.Elif(decrement & ~increment): m.d.usb += bytes_in_fifo.eq(bytes_in_fifo - 1) # # Register updates. # # Active endpoint number. with m.If(self.epno.w_stb): m.d.usb += self.epno.r_data.eq(self.epno.w_data) # Keep track of which endpoints are stalled. endpoint_stalled = Array(Signal() for _ in range(16)) # Set the value of our endpoint `stall` based on our `stall` register... with m.If(self.stall.w_stb): m.d.usb += endpoint_stalled[self.epno.r_data].eq(self.stall.w_data) # ... but clear our endpoint `stall` when we get a SETUP packet. with m.If(token.is_setup & token.new_token): m.d.usb += endpoint_stalled[token.endpoint].eq(0) # Manual data toggle control. # TODO: Remove this in favor of automated tracking? m.d.comb += self.interface.tx_pid_toggle.eq(self.pid.r_data) with m.If(self.pid.w_stb): m.d.usb += self.pid.r_data.eq(self.pid.w_data) # # Status registers. # m.d.comb += [ self.have.r_data .eq(fifo.r_rdy) ] # # Control logic. # # Logic shorthand. new_in_token = (token.is_in & token.ready_for_response) endpoint_matches = (token.endpoint == self.epno.r_data) stalled = endpoint_stalled[token.endpoint] with m.FSM(domain='usb') as f: # Drive our IDLE line based on our FSM state. m.d.comb += self.idle.r_data.eq(f.ongoing('IDLE')) # IDLE -- our CPU hasn't yet requested that we send data. # We'll wait for it to do so, and NAK any packets that arrive. with m.State("IDLE"): # If we get an IN token... with m.If(new_in_token): # STALL it, if the endpoint is STALL'd... with m.If(stalled): m.d.comb += handshakes_out.stall.eq(1) # Otherwise, NAK. with m.Else(): m.d.comb += handshakes_out.nak.eq(1) # If the user request that we send data, "prime" the endpoint. # This means we have data to send, but are just waiting for an IN token. with m.If(self.epno.w_stb & ~stalled): m.next = "PRIMED" # PRIMED -- our CPU has provided data, but we haven't been sent an IN token, yet. # Await that IN token. with m.State("PRIMED"): with m.If(new_in_token): # If the target endpoint is STALL'd, reply with STALL no matter what. with m.If(stalled): m.d.comb += handshakes_out.stall.eq(1) # If we have a new IN token to our endpoint, move to responding to it. with m.Elif(endpoint_matches): # If there's no data in our endpoint, send a ZLP. with m.If(~fifo.r_rdy): m.next = "SEND_ZLP" # Otherwise, send our data, starting with our first byte. with m.Else(): m.d.usb += tx.first.eq(1) m.next = "SEND_DATA" # Otherwise, we don't have a response; NAK the packet. with m.Else(): m.d.comb += handshakes_out.nak.eq(1) # SEND_ZLP -- we're now now ready to respond to an IN token with a ZLP. # Send our response. with m.State("SEND_ZLP"): m.d.comb += [ tx.valid .eq(1), tx.last .eq(1) ] m.next = 'IDLE' # SEND_DATA -- we're now ready to respond to an IN token to our endpoint. # Send our response. with m.State("SEND_DATA"): last_packet = (bytes_in_fifo == 1) m.d.comb += [ tx.valid .eq(1), tx.last .eq(last_packet), # Drive our transmit data directly from our FIFO... tx.payload .eq(fifo.r_data), # ... and advance our FIFO each time a data byte is transmitted. fifo.r_en .eq(tx.ready) ] # After we've sent a byte, drop our first flag. with m.If(tx.ready): m.d.usb += tx.first.eq(0) # Once we transmit our last packet, we're done transmitting. Move back to IDLE. with m.If(last_packet & tx.ready): m.next = 'IDLE' return DomainRenamer({"sync": "usb"})(m)
def elaborate(self, platform): m = Module() m.submodules.bridge = self._bridge # Shortcuts to our components. interface = self.interface token = self.interface.tokenizer rx = self.interface.rx handshakes_out = self.interface.handshakes_out # # Control registers # # Active endpoint number. with m.If(self.epno.w_stb): m.d.usb += self.epno.r_data.eq(self.epno.w_data) # Keep track of which endpoints are stalled. endpoint_stalled = Array(Signal() for _ in range(16)) # Allow the CPU to set our enable bit. with m.If(self.enable.w_stb): m.d.usb += self.enable.r_data.eq(self.enable.w_data) # If we've just ACK'd a receive, clear our enable. with m.If(interface.handshakes_out.ack): m.d.usb += self.enable.r_data.eq(0) # Set the value of our endpoint `stall` based on our `stall` register... with m.If(self.stall.w_stb): m.d.usb += endpoint_stalled[self.epno.r_data].eq(self.stall.w_data) # ... but clear our endpoint `stall` when we get a SETUP packet. with m.If(token.is_setup & token.new_token): m.d.usb += endpoint_stalled[token.endpoint].eq(0) # # Core FIFO. # m.submodules.fifo = fifo = ResetInserter(self.reset.w_stb)(SyncFIFOBuffered(width=8, depth=10)) # Shortcut for when we should allow a receive. We'll read when: # - Our `epno` register matches the target register; and # - We've primed the relevant endpoint. # - Our most recent token is an OUT. # - We're not stalled. stalled = token.is_out & endpoint_stalled[token.endpoint] endpoint_matches = (token.endpoint == self.epno.r_data) allow_receive = endpoint_matches & self.enable.r_data & token.is_out & ~stalled nak_receives = token.is_out & ~allow_receive & ~stalled m.d.comb += [ # We'll write to the endpoint iff we have a primed fifo.w_en .eq(allow_receive & rx.valid & rx.next), fifo.w_data .eq(rx.payload), # We'll advance the FIFO whenever our CPU reads from the data CSR; # and we'll always read our data from the FIFO. fifo.r_en .eq(self.data.r_stb), self.data.r_data .eq(fifo.r_data), # Pass the FIFO status on to our CPU. self.have.r_data .eq(fifo.r_rdy), # If we've just finished an allowed receive, ACK. handshakes_out.ack .eq(allow_receive & interface.rx_ready_for_response), # If we were stalled, stall. handshakes_out.stall .eq(stalled & interface.rx_ready_for_response), # If we're not ACK'ing or STALL'ing, NAK all packets. handshakes_out.nak .eq(nak_receives & interface.rx_ready_for_response) ] # # Interrupt/status # return DomainRenamer({"sync": "usb"})(m)
def elaborate(self, platform): m = Module() m.submodules.bus = bus = self.bus in_fifo = self.in_fifo out_fifo = self.out_fifo buf_fifo = m.submodules.buf_fifo = SyncFIFOBuffered( width=8, depth=_COMMAND_BUFFER_SIZE) m.d.comb += bus.ce.eq(1) with m.FSM(): # Page writes in parallel EEPROMs do not tolerate delays, so the entire page needs # to be buffered before programming starts. After receiving the QUEUE command, all # subsequent commands except for RUN are placed into the buffer. The RUN command # restarts command processing. Until the buffer is empty, only buffered commands are # processed. cmd_fifo = Array([out_fifo, buf_fifo])[buf_fifo.r_rdy] a_bytes = (bus.a_bits + 7) // 8 dq_bytes = (bus.dq_bits + 7) // 8 a_index = Signal(range(a_bytes + 1)) dq_index = Signal(range(dq_bytes + 1)) a_latch = Signal(bus.a_bits) dq_latch = Signal(bus.dq_bits) read_cycle_cyc = (math.ceil( self._read_cycle_delay * platform.default_clk_frequency) + 2 ) # FFSynchronizer latency write_cycle_cyc = math.ceil(self._write_cycle_delay * platform.default_clk_frequency) write_hold_cyc = math.ceil(self._write_hold_delay * platform.default_clk_frequency) timer = Signal( range( max(read_cycle_cyc, write_cycle_cyc, write_hold_cyc) + 1)) with m.State("COMMAND"): with m.If(cmd_fifo.r_rdy): m.d.comb += cmd_fifo.r_en.eq(1) with m.Switch(cmd_fifo.r_data): with m.Case(_Command.QUEUE): m.next = "QUEUE-RECV" with m.Case(_Command.SEEK): m.d.sync += a_index.eq(0) m.next = "SEEK-RECV" with m.Case(_Command.INCR): m.d.sync += bus.a.eq(bus.a + 1) m.next = "SEEK-WAIT" with m.Case(_Command.READ): m.d.sync += dq_index.eq(0) m.next = "READ-PULSE" with m.Case(_Command.WRITE): m.d.sync += dq_index.eq(0) m.next = "WRITE-RECV" with m.Case(_Command.POLL): m.next = "POLL-PULSE" with m.Else(): m.d.comb += in_fifo.flush.eq(1) with m.State("QUEUE-RECV"): with m.If(out_fifo.r_rdy): escaped = Signal() with m.If(~escaped & (out_fifo.r_data == _Command.QUEUE)): m.d.comb += out_fifo.r_en.eq(1) m.d.sync += escaped.eq(1) with m.Elif(escaped & (out_fifo.r_data == _Command.RUN)): m.d.comb += out_fifo.r_en.eq(1) m.next = "COMMAND" with m.Else(): m.d.sync += escaped.eq(0) m.d.comb += out_fifo.r_en.eq(buf_fifo.w_rdy) m.d.comb += buf_fifo.w_data.eq(out_fifo.r_data) m.d.comb += buf_fifo.w_en.eq(1) with m.State("SEEK-RECV"): with m.If(a_index == a_bytes): m.d.sync += bus.a.eq(a_latch) m.next = "SEEK-WAIT" with m.Elif(cmd_fifo.r_rdy): m.d.comb += cmd_fifo.r_en.eq(1) m.d.sync += a_latch.word_select(a_index, 8).eq(cmd_fifo.r_data) m.d.sync += a_index.eq(a_index + 1) with m.State("SEEK-WAIT"): with m.If(bus.rdy): m.next = "COMMAND" with m.State("READ-PULSE"): m.d.sync += bus.oe.eq(1) m.d.sync += timer.eq(read_cycle_cyc) m.next = "READ-CYCLE" with m.State("READ-CYCLE"): with m.If(timer == 0): # Normally, this would be the place to deassert OE. However, this would reduce # metastability (during burst reads) in the output buffers of a memory that is # reading bits close to the buffer threshold. Wait, isn't metastability bad? # Normally yes, but this is a special case! Metastability causes unstable # bits, and unstable bits reduce the chance that corrupt data will slip # through undetected. m.d.sync += dq_latch.eq(bus.q) m.next = "READ-SEND" with m.Else(): m.d.sync += timer.eq(timer - 1) with m.State("READ-SEND"): with m.If(dq_index == dq_bytes): m.next = "COMMAND" with m.Elif(in_fifo.w_rdy): m.d.comb += in_fifo.w_en.eq(1) m.d.comb += in_fifo.w_data.eq( dq_latch.word_select(dq_index, 8)) m.d.sync += dq_index.eq(dq_index + 1) with m.State("WRITE-RECV"): with m.If(dq_index == dq_bytes): m.d.sync += bus.d.eq(dq_latch) m.d.sync += bus.oe.eq(0) # see comment in READ-CYCLE m.d.sync += bus.we.eq(1) m.d.sync += timer.eq(write_cycle_cyc) m.next = "WRITE-CYCLE" with m.Elif(cmd_fifo.r_rdy): m.d.comb += cmd_fifo.r_en.eq(1) m.d.sync += dq_latch.word_select(dq_index, 8).eq(cmd_fifo.r_data) m.d.sync += dq_index.eq(dq_index + 1) with m.State("WRITE-CYCLE"): with m.If(timer == 0): m.d.sync += bus.we.eq(0) m.d.sync += timer.eq(write_hold_cyc) m.next = "WRITE-HOLD" with m.Else(): m.d.sync += timer.eq(timer - 1) with m.State("WRITE-HOLD"): with m.If(timer == 0): m.next = "COMMAND" with m.Else(): m.d.sync += timer.eq(timer - 1) with m.State("POLL-PULSE"): m.d.sync += bus.oe.eq(1) m.d.sync += timer.eq(read_cycle_cyc) m.next = "POLL-CYCLE" with m.State("POLL-CYCLE"): with m.If(timer == 0): # There are many different ways EEPROMs can signal readiness, but if they do it # on data lines, they are common in that they all present something else other # than the last written byte on DQ lines. with m.If(bus.q == dq_latch): with m.If(in_fifo.w_rdy): m.d.comb += in_fifo.w_en.eq(1) m.d.sync += bus.oe.eq(0) m.next = "COMMAND" with m.Else(): m.d.sync += timer.eq(timer - 1) return m
def __init__(self, lane: PCIeScrambler, fifo_depth=8): self.dllp = Record(dllp_layout) self.fifo = DomainRenamer("rx")(SyncFIFOBuffered(width=len(self.dllp), depth=fifo_depth)) self.__lane = lane
def elaborate(self, platform): # VGA constants pixel_f = self.timing.pixel_freq hsync_front_porch = self.timing.h_front_porch hsync_pulse_width = self.timing.h_sync_pulse hsync_back_porch = self.timing.h_back_porch vsync_front_porch = self.timing.v_front_porch vsync_pulse_width = self.timing.v_sync_pulse vsync_back_porch = self.timing.v_back_porch # Pins clk25 = platform.request("clk25") ov7670 = platform.request("ov7670") led = [platform.request("led", i) for i in range(8)] leds = Cat([i.o for i in led]) led8_2 = platform.request("led8_2") leds8_2 = Cat([led8_2.leds[i] for i in range(8)]) led8_3 = platform.request("led8_3") leds8_3 = Cat([led8_3.leds[i] for i in range(8)]) leds16 = Cat(leds8_3, leds8_2) btn1 = platform.request("button_fire", 0) btn2 = platform.request("button_fire", 1) up = platform.request("button_up", 0) down = platform.request("button_down", 0) pwr = platform.request("button_pwr", 0) left = platform.request("button_left", 0) right = platform.request("button_right", 0) sw = Cat([platform.request("switch",i) for i in range(4)]) uart = platform.request("uart") divisor = int(platform.default_clk_frequency // 460800) esp32 = platform.request("esp32_spi") csn = esp32.csn sclk = esp32.sclk copi = esp32.copi cipo = esp32.cipo m = Module() # Clock generator. m.domains.sync = cd_sync = ClockDomain("sync") m.domains.pixel = cd_pixel = ClockDomain("pixel") m.domains.shift = cd_shift = ClockDomain("shift") m.submodules.ecp5pll = pll = ECP5PLL() pll.register_clkin(clk25, platform.default_clk_frequency) pll.create_clkout(cd_sync, platform.default_clk_frequency) pll.create_clkout(cd_pixel, pixel_f) pll.create_clkout(cd_shift, pixel_f * 5.0 * (1.0 if self.ddr else 2.0)) # Add CamRead submodule camread = CamRead() m.submodules.camread = camread # Camera config cam_x_res = 640 cam_y_res = 480 camconfig = CamConfig() m.submodules.camconfig = camconfig # Connect the camera pins and config and read modules m.d.comb += [ ov7670.cam_RESET.eq(1), ov7670.cam_PWON.eq(0), ov7670.cam_XCLK.eq(clk25.i), ov7670.cam_SIOC.eq(camconfig.sioc), ov7670.cam_SIOD.eq(camconfig.siod), camconfig.start.eq(btn1), camread.p_data.eq(Cat([ov7670.cam_data[i] for i in range(8)])), camread.href.eq(ov7670.cam_HREF), camread.vsync.eq(ov7670.cam_VSYNC), camread.p_clock.eq(ov7670.cam_PCLK) ] # Create the uart m.submodules.serial = serial = AsyncSerial(divisor=divisor, pins=uart) # Input fifo fifo_depth=1024 m.submodules.fifo = fifo = SyncFIFOBuffered(width=16,depth=fifo_depth) # Frame buffer x_res= cam_x_res // 2 y_res= cam_y_res buffer = Memory(width=16, depth=x_res * y_res) m.submodules.r = r = buffer.read_port() m.submodules.w = w = buffer.write_port() # Button debouncers m.submodules.debup = debup = Debouncer() m.submodules.debdown = debdown = Debouncer() m.submodules.debosd = debosd = Debouncer() m.submodules.debsel = debsel = Debouncer() m.submodules.debsnap = debsnap = Debouncer() m.submodules.debhist = debhist = Debouncer() # Connect the buttons to debouncers m.d.comb += [ debup.btn.eq(up), debdown.btn.eq(down), debosd.btn.eq(pwr), debsel.btn.eq(right), debsnap.btn.eq(left), debhist.btn.eq(btn2) ] # Image processing options flip = Signal(2, reset=1) mono = Signal(reset=0) invert = Signal(reset=0) gamma = Signal(reset=0) border = Signal(reset=0) filt = Signal(reset=0) grid = Signal(reset=0) histo = Signal(reset=1) hbin = Signal(6, reset=0) bin_cnt = Signal(5, reset=0) thresh = Signal(reset=0) threshold = Signal(8, reset=0) hist_chan = Signal(2, reset=0) ccc = CC(reset=(0,18,12,16)) sharpness = Signal(unsigned(4), reset=0) osd_val = Signal(4, reset=0) # Account for spurious start-up button pushes osd_on = Signal(reset=1) osd_sel = Signal(reset=1) snap = Signal(reset=0) frozen = Signal(reset=1) writing = Signal(reset=0) written = Signal(reset=0) byte = Signal(reset=0) w_addr = Signal(18) # Color filter l = Rgb565(reset=(18,12,6)) # Initialised to red LEGO filter h = Rgb565(reset=(21,22,14)) # Region of interest roi = Roi() # VGA signals vga_r = Signal(8) vga_g = Signal(8) vga_b = Signal(8) vga_hsync = Signal() vga_vsync = Signal() vga_blank = Signal() # Fifo co-ordinates f_x = Signal(9) f_y = Signal(9) f_frame_done = Signal() # Pixel from fifo pix = Rgb565() # SPI memory for remote configuration m.submodules.spimem = spimem = SpiMem(addr_bits=32) # Color Control m.submodules.cc = cc = ColorControl() # Image convolution m.submodules.imc = imc = ImageConv() # Statistics m.submodules.stats = stats = Stats() # Histogram m.submodules.hist = hist = Hist() # Filter m.submodules.fil = fil = Filt() # Monochrome m.submodules.mon = mon = Mono() # Sync the fifo with the camera sync_fifo = Signal(reset=0) with m.If(~sync_fifo & ~fifo.r_rdy & (camread.col == cam_x_res - 1) & (camread.row == cam_y_res -1)): m.d.sync += [ sync_fifo.eq(1), f_x.eq(0), f_y.eq(0) ] with m.If(btn1): m.d.sync += sync_fifo.eq(0) # Connect the fifo m.d.comb += [ fifo.w_en.eq(camread.pixel_valid & camread.col[0] & sync_fifo), # Only write every other pixel fifo.w_data.eq(camread.pixel_data), fifo.r_en.eq(fifo.r_rdy & ~imc.o_stall) ] # Calculate fifo co-ordinates m.d.sync += f_frame_done.eq(0) with m.If(fifo.r_en & sync_fifo): m.d.sync += f_x.eq(f_x + 1) with m.If(f_x == x_res - 1): m.d.sync += [ f_x.eq(0), f_y.eq(f_y + 1) ] with m.If(f_y == y_res - 1): m.d.sync += [ f_y.eq(0), f_frame_done.eq(1) ] # Extract pixel from fifo data m.d.comb += [ pix.r.eq(fifo.r_data[11:]), pix.g.eq(fifo.r_data[5:11]), pix.b.eq(fifo.r_data[:5]) ] # Connect color control m.d.comb += [ cc.i.eq(pix), cc.i_cc.eq(ccc) ] # Calculate per-frame statistics, after applying color correction m.d.comb += [ stats.i.eq(cc.o), stats.i_valid.eq(fifo.r_rdy), # This is not valid when a region of interest is active stats.i_avg_valid.eq((f_x >= 32) & (f_x < 288) & (f_y >= 112) & (f_y < 368)), stats.i_frame_done.eq(f_frame_done), stats.i_x.eq(f_x), stats.i_y.eq(f_y), stats.i_roi.eq(roi) ] # Produce histogram, after applying color correction, and after monochrome, for monochrome histogram with m.Switch(hist_chan): with m.Case(0): m.d.comb += hist.i_p.eq(cc.o.r) with m.Case(1): m.d.comb += hist.i_p.eq(cc.o.g) with m.Case(2): m.d.comb += hist.i_p.eq(cc.o.b) with m.Case(3): m.d.comb += hist.i_p.eq(mon.o_m) m.d.comb += [ hist.i_valid.eq(fifo.r_rdy), hist.i_clear.eq(f_frame_done), hist.i_x.eq(f_x), hist.i_y.eq(f_y), hist.i_roi.eq(roi), hist.i_bin.eq(hbin) # Used when displaying histogram ] # Apply filter, after color correction m.d.comb += [ fil.i.eq(cc.o), fil.i_valid.eq(fifo.r_en), fil.i_en.eq(filt), fil.i_frame_done.eq(f_frame_done), fil.i_l.eq(l), fil.i_h.eq(h) ] # Apply mono, after color correction and filter m.d.comb += [ mon.i.eq(fil.o), mon.i_en.eq(mono), mon.i_invert.eq(invert), mon.i_thresh.eq(thresh), mon.i_threshold.eq(threshold) ] # Apply image convolution, after other transformations m.d.comb += [ imc.i.eq(mon.o), imc.i_valid.eq(fifo.r_rdy), imc.i_reset.eq(~sync_fifo), # Select image convolution imc.i_sel.eq(sharpness) ] # Take a snapshot, freeze the camera, and write the framebuffer to the uart # Note that this suspends video output with m.If(debsnap.btn_down | (spimem.wr & (spimem.addr == 22))): with m.If(frozen): m.d.sync += frozen.eq(0) with m.Else(): m.d.sync += [ snap.eq(1), frozen.eq(0), w_addr.eq(0), written.eq(0), byte.eq(0) ] # Wait to end of frame after requesting snapshot, before start of writing to uart with m.If(imc.o_frame_done & snap): m.d.sync += [ frozen.eq(1), snap.eq(0) ] with m.If(~written): m.d.sync += writing.eq(1) # Connect the uart m.d.comb += [ serial.tx.data.eq(Mux(byte, r.data[8:], r.data[:8])), serial.tx.ack.eq(writing) ] # Write to the uart from frame buffer (affects video output) with m.If(writing): with m.If(w_addr == x_res * y_res): m.d.sync += [ writing.eq(0), written.eq(1) ] with m.Elif(serial.tx.ack & serial.tx.rdy): m.d.sync += byte.eq(~byte) with m.If(byte): m.d.sync += w_addr.eq(w_addr+1) # Connect spimem m.d.comb += [ spimem.csn.eq(~csn), spimem.sclk.eq(sclk), spimem.copi.eq(copi), cipo.eq(spimem.cipo), ] # Writable configuration registers spi_wr_vals = Array([ccc.brightness, ccc.redness, ccc.greenness, ccc.blueness, l.r, h.r, l.g, h.g, l.b, h.b, sharpness, filt, border, mono, invert, grid, histo, roi.x[1:], roi.y[1:], roi.w[1:], roi.h[1:], roi.en, None, None, None, threshold, thresh, hist_chan, flip, None, None, None, None, None, None, None, None, None, frozen]) with m.If(spimem.wr): with m.Switch(spimem.addr): for i in range(len(spi_wr_vals)): if spi_wr_vals[i] is not None: with m.Case(i): m.d.sync += spi_wr_vals[i].eq(spimem.dout) # Readable configuration registers spi_rd_vals = Array([ccc.brightness, ccc.redness, ccc.greenness, ccc.blueness, l.r, h.r, l.g, h.g, l.b, h.b, sharpness, filt, border, mono, invert, grid, histo, roi.x[1:], roi.y[1:], roi.w[1:], roi.h[1:], roi.en, fil.o_nz[16:], fil.o_nz[8:16], fil.o_nz[:8], threshold, thresh, hist_chan, flip, stats.o_min.r, stats.o_min.g, stats.o_min.b, stats.o_max.r, stats.o_max.g, stats.o_max.b, stats.o_avg.r, stats.o_avg.g, stats.o_avg.b, frozen, writing, written]) with m.If(spimem.rd): with m.Switch(spimem.addr): for i in range(len(spi_rd_vals)): with m.Case(i): m.d.sync += spimem.din.eq(spi_rd_vals[i]) # Add VGA generator m.submodules.vga = vga = VGA( resolution_x = self.timing.x, hsync_front_porch = hsync_front_porch, hsync_pulse = hsync_pulse_width, hsync_back_porch = hsync_back_porch, resolution_y = self.timing.y, vsync_front_porch = vsync_front_porch, vsync_pulse = vsync_pulse_width, vsync_back_porch = vsync_back_porch, bits_x = 16, # Play around with the sizes because sometimes bits_y = 16 # a smaller/larger value will make it pass timing. ) # Fetch histogram for display old_x = Signal(10) m.d.sync += old_x.eq(vga.o_beam_x) with m.If(vga.o_beam_x == 0): m.d.sync += [ hbin.eq(0), bin_cnt.eq(0) ] with m.Elif(vga.o_beam_x != old_x): m.d.sync += bin_cnt.eq(bin_cnt+1) with m.If(bin_cnt == 19): m.d.sync += [ bin_cnt.eq(0), hbin.eq(hbin+1) ] # Switch between camera and histogram view with m.If(debhist.btn_down): m.d.sync += histo.eq(~histo) # Connect frame buffer, with optional x and y flip x = Signal(10) y = Signal(9) m.d.comb += [ w.en.eq(imc.o_valid & ~frozen), w.addr.eq(imc.o_y * x_res + imc.o_x), w.data.eq(Cat(imc.o.b, imc.o.g, imc.o.r)), y.eq(Mux(flip[1], y_res - 1 - vga.o_beam_y, vga.o_beam_y)), x.eq(Mux(flip[0], x_res - 1 - vga.o_beam_x[1:], vga.o_beam_x[1:])), r.addr.eq(Mux(writing, w_addr, y * x_res + x)) ] # Apply the On-Screen Display (OSD) m.submodules.osd = osd = OSD() hist_col = Signal(8) m.d.comb += [ osd.x.eq(vga.o_beam_x), osd.y.eq(vga.o_beam_y), hist_col.eq(Mux((479 - osd.y) < hist.o_val[8:], 0xff, 0x00)), osd.i_r.eq(Mux(histo, Mux((hist_chan == 0) | (hist_chan == 3), hist_col, 0), Cat(Const(0, unsigned(3)), r.data[11:16]))), osd.i_g.eq(Mux(histo, Mux((hist_chan == 1) | (hist_chan == 3), hist_col, 0), Cat(Const(0, unsigned(2)), r.data[5:11]))), osd.i_b.eq(Mux(histo, Mux((hist_chan == 2) | (hist_chan == 3), hist_col, 0), Cat(Const(0, unsigned(3)), r.data[0:5]))), osd.on.eq(osd_on), osd.osd_val.eq(osd_val), osd.sel.eq(osd_sel), osd.grid.eq(grid), osd.border.eq(border), osd.roi.eq(roi.en & ~histo), osd.roi_x.eq(roi.x), osd.roi_y.eq(roi.y), osd.roi_w.eq(roi.w), osd.roi_h.eq(roi.h) ] # OSD control osd_vals = Array([ccc.brightness, ccc.redness, ccc.greenness, ccc.blueness, mono, flip[0], flip[1], border, sharpness, invert, grid, filt]) with m.If(debosd.btn_down): m.d.sync += osd_on.eq(~osd_on) with m.If(osd_on): with m.If(debsel.btn_down): m.d.sync += osd_sel.eq(~osd_sel) with m.If(debup.btn_down): with m.If(~osd_sel): m.d.sync += osd_val.eq(Mux(osd_val == 0, 11, osd_val-1)) with m.Else(): with m.Switch(osd_val): for i in range(len(osd_vals)): with m.Case(i): if (len(osd_vals[i]) == 1): m.d.sync += osd_vals[i].eq(1) else: m.d.sync += osd_vals[i].eq(osd_vals[i]+1) with m.If(debdown.btn_down): with m.If(~osd_sel): m.d.sync += osd_val.eq(Mux(osd_val == 11, 0, osd_val+1)) with m.Else(): with m.Switch(osd_val): for i in range(len(osd_vals)): with m.Case(i): if (len(osd_vals[i]) == 1): m.d.sync += osd_vals[i].eq(0) else: m.d.sync += osd_vals[i].eq(osd_vals[i]-1) # Show configuration values on leds with m.Switch(osd_val): for i in range(len(osd_vals)): with m.Case(i): m.d.comb += leds.eq(osd_vals[i]) # Generate VGA signals m.d.comb += [ vga.i_clk_en.eq(1), vga.i_test_picture.eq(0), vga.i_r.eq(osd.o_r), vga.i_g.eq(osd.o_g), vga.i_b.eq(osd.o_b), vga_r.eq(vga.o_vga_r), vga_g.eq(vga.o_vga_g), vga_b.eq(vga.o_vga_b), vga_hsync.eq(vga.o_vga_hsync), vga_vsync.eq(vga.o_vga_vsync), vga_blank.eq(vga.o_vga_blank), ] # VGA to digital video converter. tmds = [Signal(2) for i in range(4)] m.submodules.vga2dvid = vga2dvid = VGA2DVID(ddr=self.ddr, shift_clock_synchronizer=False) m.d.comb += [ vga2dvid.i_red.eq(vga_r), vga2dvid.i_green.eq(vga_g), vga2dvid.i_blue.eq(vga_b), vga2dvid.i_hsync.eq(vga_hsync), vga2dvid.i_vsync.eq(vga_vsync), vga2dvid.i_blank.eq(vga_blank), tmds[3].eq(vga2dvid.o_clk), tmds[2].eq(vga2dvid.o_red), tmds[1].eq(vga2dvid.o_green), tmds[0].eq(vga2dvid.o_blue), ] # GPDI pins if (self.ddr): # Vendor specific DDR modules. # Convert SDR 2-bit input to DDR clocked 1-bit output (single-ended) # onboard GPDI. m.submodules.ddr0_clock = Instance("ODDRX1F", i_SCLK = ClockSignal("shift"), i_RST = 0b0, i_D0 = tmds[3][0], i_D1 = tmds[3][1], o_Q = self.o_gpdi_dp[3]) m.submodules.ddr0_red = Instance("ODDRX1F", i_SCLK = ClockSignal("shift"), i_RST = 0b0, i_D0 = tmds[2][0], i_D1 = tmds[2][1], o_Q = self.o_gpdi_dp[2]) m.submodules.ddr0_green = Instance("ODDRX1F", i_SCLK = ClockSignal("shift"), i_RST = 0b0, i_D0 = tmds[1][0], i_D1 = tmds[1][1], o_Q = self.o_gpdi_dp[1]) m.submodules.ddr0_blue = Instance("ODDRX1F", i_SCLK = ClockSignal("shift"), i_RST = 0b0, i_D0 = tmds[0][0], i_D1 = tmds[0][1], o_Q = self.o_gpdi_dp[0]) else: m.d.comb += [ self.o_gpdi_dp[3].eq(tmds[3][0]), self.o_gpdi_dp[2].eq(tmds[2][0]), self.o_gpdi_dp[1].eq(tmds[1][0]), self.o_gpdi_dp[0].eq(tmds[0][0]), ] return m