def __init__(self, nchan=3, depth=8): self.valid_i = Signal() self.chan_synced = Signal() self._r_channels_synced = CSRStatus() lst_control = [] all_control = Signal() for i in range(nchan): name = "data_in" + str(i) data_in = Record(channel_layout, name=name) setattr(self, name, data_in) name = "data_out" + str(i) data_out = Record(channel_layout, name=name) setattr(self, name, data_out) ### syncbuffer = RenameClockDomains(_SyncBuffer(layout_len(channel_layout), depth), "pix") self.submodules += syncbuffer self.comb += [syncbuffer.din.eq(data_in.raw_bits()), data_out.raw_bits().eq(syncbuffer.dout)] is_control = Signal() self.comb += [is_control.eq(~data_out.de), syncbuffer.re.eq(~is_control | all_control)] lst_control.append(is_control) some_control = Signal() self.comb += [all_control.eq(optree("&", lst_control)), some_control.eq(optree("|", lst_control))] self.sync.pix += If(~self.valid_i, self.chan_synced.eq(0)).Else( If(some_control, If(all_control, self.chan_synced.eq(1)).Else(self.chan_synced.eq(0))) ) self.specials += MultiReg(self.chan_synced, self._r_channels_synced.status)
def __init__(self, master, slaves, register=False): ns = len(slaves) slave_sel = Signal(ns) slave_sel_r = Signal(ns) # decode slave addresses self.comb += [slave_sel[i].eq(fun(master.adr)) for i, (fun, bus) in enumerate(slaves)] if register: self.sync += slave_sel_r.eq(slave_sel) else: self.comb += slave_sel_r.eq(slave_sel) # connect master->slaves signals except cyc for slave in slaves: for name, size, direction in _layout: if direction == DIR_M_TO_S and name != "cyc": self.comb += getattr(slave[1], name).eq(getattr(master, name)) # combine cyc with slave selection signals self.comb += [slave[1].cyc.eq(master.cyc & slave_sel[i]) for i, slave in enumerate(slaves)] # generate master ack (resp. err) by ORing all slave acks (resp. errs) self.comb += [ master.ack.eq(optree("|", [slave[1].ack for slave in slaves])), master.err.eq(optree("|", [slave[1].err for slave in slaves])) ] # mux (1-hot) slave data return masked = [Replicate(slave_sel_r[i], flen(master.dat_r)) & slaves[i][1].dat_r for i in range(ns)] self.comb += master.dat_r.eq(optree("|", masked))
def __init__(self, period_bits=24): self.data = Signal(10) self._update = CSR() self._value = CSRStatus(period_bits) ### # (pipeline stage 1) # We ignore the 10th (inversion) bit, as it is independent of the # transition minimization. data_r = Signal(9) self.sync.pix += data_r.eq(self.data[:9]) # (pipeline stage 2) # Count the number of transitions in the TMDS word. transitions = Signal(8) self.comb += [ transitions[i].eq(data_r[i] ^ data_r[i + 1]) for i in range(8) ] transition_count = Signal(max=9) self.sync.pix += transition_count.eq( optree("+", [transitions[i] for i in range(8)])) # Control data characters are designed to have a large number (7) of # transitions to help the receiver synchronize its clock with the # transmitter clock. is_control = Signal() self.sync.pix += is_control.eq( optree("|", [data_r == ct for ct in control_tokens])) # (pipeline stage 3) # The TMDS characters selected to represent pixel data contain five or # fewer transitions. is_error = Signal() self.sync.pix += is_error.eq((transition_count > 4) & ~is_control) # counter period_counter = Signal(period_bits) period_done = Signal() self.sync.pix += Cat(period_counter, period_done).eq(period_counter + 1) wer_counter = Signal(period_bits) wer_counter_r = Signal(period_bits) wer_counter_r_updated = Signal() self.sync.pix += [ wer_counter_r_updated.eq(period_done), If(period_done, wer_counter_r.eq(wer_counter), wer_counter.eq(0)).Elif(is_error, wer_counter.eq(wer_counter + 1)) ] # sync to system clock domain wer_counter_sys = Signal(period_bits) self.submodules.ps_counter = PulseSynchronizer("pix", "sys") self.comb += self.ps_counter.i.eq(wer_counter_r_updated) self.sync += If(self.ps_counter.o, wer_counter_sys.eq(wer_counter_r)) # register interface self.sync += If(self._update.re, self._value.status.eq(wer_counter_sys))
def complete_selector(self, slicer, bankn, slots): rr = self.rr # List outstanding requests for our bank outstandings = [] for slot in slots: outstanding = Signal() self.comb += outstanding.eq( (slicer.bank(slot.adr) == bankn) & \ (slot.state == SLOT_PENDING)) outstandings.append(outstanding) # Row tracking openrow_r = Signal(slicer.geom_settings.row_a) openrow_n = Signal(slicer.geom_settings.row_a) openrow = Signal(slicer.geom_settings.row_a) self.comb += [ openrow_n.eq(slicer.row(self.adr)), If(self.stb, openrow.eq(openrow_n) ).Else( openrow.eq(openrow_r) ) ] self.sync += If(self.stb & self.ack, openrow_r.eq(openrow_n)) hits = [] for slot, os in zip(slots, outstandings): hit = Signal() self.comb += hit.eq((slicer.row(slot.adr) == openrow) & os) hits.append(hit) # Determine best request rr = RoundRobin(self.nslots, SP_CE) has_hit = Signal() self.comb += has_hit.eq(optree("|", hits)) best_hit = [rr.request[i].eq(hit) for i, hit in enumerate(hits)] best_fallback = [rr.request[i].eq(os) for i, os in enumerate(outstandings)] select_stmt = If(has_hit, *best_hit ).Else( *best_fallback ) if slots[0].time: # Implement anti-starvation timer matures = [] for slot, os in zip(slots, outstandings): mature = Signal() comb.append(mature.eq(slot.mature & os)) matures.append(mature) has_mature = Signal() self.comb += has_mature.eq(optree("|", matures)) best_mature = [rr.request[i].eq(mature) for i, mature in enumerate(matures)] select_stmt = If(has_mature, *best_mature).Else(select_stmt) self.comb += select_stmt
def __init__(self, period_bits=24): self.data = Signal(10) self._update = CSR() self._value = CSRStatus(period_bits) ### # (pipeline stage 1) # We ignore the 10th (inversion) bit, as it is independent of the # transition minimization. data_r = Signal(9) self.sync.pix += data_r.eq(self.data[:9]) # (pipeline stage 2) # Count the number of transitions in the TMDS word. transitions = Signal(8) self.comb += [transitions[i].eq(data_r[i] ^ data_r[i+1]) for i in range(8)] transition_count = Signal(max=9) self.sync.pix += transition_count.eq(optree("+", [transitions[i] for i in range(8)])) # Control data characters are designed to have a large number (7) of # transitions to help the receiver synchronize its clock with the # transmitter clock. is_control = Signal() self.sync.pix += is_control.eq(optree("|", [data_r == ct for ct in control_tokens])) # (pipeline stage 3) # The TMDS characters selected to represent pixel data contain five or # fewer transitions. is_error = Signal() self.sync.pix += is_error.eq((transition_count > 4) & ~is_control) # counter period_counter = Signal(period_bits) period_done = Signal() self.sync.pix += Cat(period_counter, period_done).eq(period_counter + 1) wer_counter = Signal(period_bits) wer_counter_r = Signal(period_bits) wer_counter_r_updated = Signal() self.sync.pix += [ wer_counter_r_updated.eq(period_done), If(period_done, wer_counter_r.eq(wer_counter), wer_counter.eq(0) ).Elif(is_error, wer_counter.eq(wer_counter + 1) ) ] # sync to system clock domain wer_counter_sys = Signal(period_bits) self.submodules.ps_counter = PulseSynchronizer("pix", "sys") self.comb += self.ps_counter.i.eq(wer_counter_r_updated) self.sync += If(self.ps_counter.o, wer_counter_sys.eq(wer_counter_r)) # register interface self.sync += If(self._update.re, self._value.status.eq(wer_counter_sys))
def __init__(self, period_bits=24): self.data = Signal(10) self._update = CSR() self._value = CSRStatus(period_bits) ### # pipeline stage 1 # we ignore the 10th (inversion) bit, as it is independent of the transition minimization data_r = Signal(9) self.sync.pix += data_r.eq(self.data[:9]) # pipeline stage 2 transitions = Signal(8) self.comb += [ transitions[i].eq(data_r[i] ^ data_r[i + 1]) for i in range(8) ] transition_count = Signal(max=9) self.sync.pix += transition_count.eq( optree("+", [transitions[i] for i in range(8)])) is_control = Signal() self.sync.pix += is_control.eq( optree("|", [data_r == ct for ct in control_tokens])) # pipeline stage 3 is_error = Signal() self.sync.pix += is_error.eq((transition_count > 4) & ~is_control) # counter period_counter = Signal(period_bits) period_done = Signal() self.sync.pix += Cat(period_counter, period_done).eq(period_counter + 1) wer_counter = Signal(period_bits) wer_counter_r = Signal(period_bits) wer_counter_r_updated = Signal() self.sync.pix += [ wer_counter_r_updated.eq(period_done), If(period_done, wer_counter_r.eq(wer_counter), wer_counter.eq(0)).Elif(is_error, wer_counter.eq(wer_counter + 1)) ] # sync to system clock domain wer_counter_sys = Signal(period_bits) self.submodules.ps_counter = PulseSynchronizer("pix", "sys") self.comb += self.ps_counter.i.eq(wer_counter_r_updated) self.sync += If(self.ps_counter.o, wer_counter_sys.eq(wer_counter_r)) # register interface self.sync += If(self._update.re, self._value.status.eq(wer_counter_sys))
def do_finalize(self): self.tag_call = Signal(self.tagbits) for port in self.ports: self.comb += [ port.call.eq(self.call), port.tag_call.eq(self.tag_call), port.dat_r.eq(self.dat_r) ] self.comb += [ self.dat_w.eq(optree("|", [port.dat_w for port in self.ports])), self.dat_wm.eq(optree("|", [port.dat_wm for port in self.ports])) ]
def __init__(self, period_bits=24): self.data = Signal(10) self._update = CSR() self._value = CSRStatus(period_bits) ### # pipeline stage 1 # we ignore the 10th (inversion) bit, as it is independent of the transition minimization data_r = Signal(9) self.sync.pix += data_r.eq(self.data[:9]) # pipeline stage 2 transitions = Signal(8) self.comb += [transitions[i].eq(data_r[i] ^ data_r[i+1]) for i in range(8)] transition_count = Signal(max=9) self.sync.pix += transition_count.eq(optree("+", [transitions[i] for i in range(8)])) is_control = Signal() self.sync.pix += is_control.eq(optree("|", [data_r == ct for ct in control_tokens])) # pipeline stage 3 is_error = Signal() self.sync.pix += is_error.eq((transition_count > 4) & ~is_control) # counter period_counter = Signal(period_bits) period_done = Signal() self.sync.pix += Cat(period_counter, period_done).eq(period_counter + 1) wer_counter = Signal(period_bits) wer_counter_r = Signal(period_bits) wer_counter_r_updated = Signal() self.sync.pix += [ wer_counter_r_updated.eq(period_done), If(period_done, wer_counter_r.eq(wer_counter), wer_counter.eq(0) ).Elif(is_error, wer_counter.eq(wer_counter + 1) ) ] # sync to system clock domain wer_counter_sys = Signal(period_bits) self.submodules.ps_counter = PulseSynchronizer("pix", "sys") self.comb += self.ps_counter.i.eq(wer_counter_r_updated) self.sync += If(self.ps_counter.o, wer_counter_sys.eq(wer_counter_r)) # register interface self.sync += If(self._update.re, self._value.status.eq(wer_counter_sys))
def get_fragment(self): if not self.finalized: raise FinalizeError slots_fragment = sum([s.get_fragment() for s in self.slots], Fragment()) comb = [] sync = [] # allocate for s in self.slots: comb += [ s.allocate_we.eq(self.we), s.allocate_adr.eq(self.adr) ] choose_slot = None needs_tags = len(self.slots) > 1 for n, s in reversed(list(enumerate(self.slots))): choose_slot = If(s.state == SLOT_EMPTY, s.allocate.eq(self.stb), self.tag_issue.eq(n) if needs_tags else None ).Else(choose_slot) comb.append(choose_slot) comb.append(self.ack.eq(optree("|", [s.state == SLOT_EMPTY for s in self.slots]))) # call comb += [s.call.eq(self.get_call_expression(n)) for n, s in enumerate(self.slots)] return slots_fragment + Fragment(comb, sync)
def get_fragment(self): source = self.endpoints["source"] sinks = [self.endpoints["sink{0}".format(n)] for n in range(len(self.endpoints)-1)] comb = [source.stb.eq(optree("&", [sink.stb for sink in sinks]))] comb += [sink.ack.eq(source.ack & source.stb) for sink in sinks] return Fragment(comb)
def do_finalize(self): nslots = len(self.slots) if nslots > 1: self.tag_issue = Signal(max=nslots) self.tag_call = Signal(self.hub.tagbits) # allocate for s in self.slots: self.comb += [ s.allocate_we.eq(self.we), s.allocate_adr.eq(self.adr) ] choose_slot = None needs_tags = len(self.slots) > 1 for n, s in reversed(list(enumerate(self.slots))): choose_slot = If(s.state == SLOT_EMPTY, s.allocate.eq(self.stb), self.tag_issue.eq(n) if needs_tags else None ).Else(choose_slot) self.comb += choose_slot self.comb += self.ack.eq(optree("|", [s.state == SLOT_EMPTY for s in self.slots])) # call self.comb += [s.call.eq(self.get_call_expression(n)) for n, s in enumerate(self.slots)]
def __init__(self, dfg): dfg.elaborate() # expose unconnected endpoints uc_eps_by_node = dict((node, get_endpoints(node)) for node in dfg) for u, v, d in dfg.edges_iter(data=True): uc_eps_u = uc_eps_by_node[u] source = d["source"] try: del uc_eps_u[source] except KeyError: pass uc_eps_v = uc_eps_by_node[v] sink = d["sink"] try: del uc_eps_v[sink] except KeyError: pass for node, uc_eps in uc_eps_by_node.items(): for k, v in uc_eps.items(): assert(not hasattr(self, k)) setattr(self, k, v) # generate busy signal self.busy = Signal() self.comb += self.busy.eq(optree("|", [node.busy for node in dfg])) # claim ownership of sub-actors and establish connections for node in dfg: self.submodules += node for u, v, d in dfg.edges_iter(data=True): ep_src = getattr(u, d["source"]) ep_dst = getattr(v, d["sink"]) self.comb += ep_src.connect_flat(ep_dst)
def get_fragment(self): if not self.finalized: raise FinalizeError ports = sum([port.get_fragment() for port in self.ports], Fragment()) comb = [] for port in self.ports: comb += [ port.call.eq(self.call), port.tag_call.eq(self.tag_call), port.dat_r.eq(self.dat_r) ] comb += [ self.dat_w.eq(optree("|", [port.dat_w for port in self.ports])), self.dat_wm.eq(optree("|", [port.dat_wm for port in self.ports])) ] return ports + Fragment(comb)
def __init__(self): a = [Foo() for x in range(3)] output = Signal() allsigs = [] for obj in a: allsigs.extend(obj.la) allsigs.extend(obj.lb) self.comb += output.eq(optree("|", allsigs))
def __init__(self, nchan=3, depth=8): self.valid_i = Signal() self.chan_synced = Signal() self._channels_synced = CSRStatus() lst_control = [] all_control = Signal() for i in range(nchan): name = "data_in" + str(i) data_in = Record(channel_layout, name=name) setattr(self, name, data_in) name = "data_out" + str(i) data_out = Record(channel_layout, name=name) setattr(self, name, data_out) ### syncbuffer = RenameClockDomains( _SyncBuffer(layout_len(channel_layout), depth), "pix") self.submodules += syncbuffer self.comb += [ syncbuffer.din.eq(data_in.raw_bits()), data_out.raw_bits().eq(syncbuffer.dout) ] is_control = Signal() self.comb += [ is_control.eq(~data_out.de), syncbuffer.re.eq(~is_control | all_control) ] lst_control.append(is_control) some_control = Signal() self.comb += [ all_control.eq(optree("&", lst_control)), some_control.eq(optree("|", lst_control)) ] self.sync.pix += If(~self.valid_i, self.chan_synced.eq(0)).Else( If( some_control, If(all_control, self.chan_synced.eq(1)).Else(self.chan_synced.eq(0)))) self.specials += MultiReg(self.chan_synced, self._channels_synced.status)
def simple_interconnect_stmts(desc, master, slaves): s2m = desc.get_names(S_TO_M) m2s = desc.get_names(M_TO_S) sl = [getattr(slave, name).eq(getattr(master, name)) for name in m2s for slave in slaves] sl += [getattr(master, name).eq( optree("|", [getattr(slave, name) for slave in slaves]) ) for name in s2m] return sl
def __init__(self, base_port, nshares): self.shared_ports = [SharedPort(base_port) for i in range(nshares)] ### # request issuance self.submodules.rr = roundrobin.RoundRobin(nshares, roundrobin.SP_CE) self.comb += [ self.rr.request.eq(Cat(*[sp.stb for sp in self.shared_ports])), self.rr.ce.eq(base_port.ack) ] self.comb += [ base_port.adr.eq(Array(sp.adr for sp in self.shared_ports)[self.rr.grant]), base_port.we.eq(Array(sp.we for sp in self.shared_ports)[self.rr.grant]), base_port.stb.eq(Array(sp.stb for sp in self.shared_ports)[self.rr.grant]), ] if hasattr(base_port, "tag_issue"): self.comb += [sp.tag_issue.eq(base_port.tag_issue) for sp in self.shared_ports] self.comb += [sp.ack.eq(base_port.ack & (self.rr.grant == n)) for n, sp in enumerate(self.shared_ports)] # request completion self.comb += [sp.call.eq(base_port.call & Array(sp.slots)[base_port.tag_call-base_port.base]) for sp in self.shared_ports] self.comb += [sp.tag_call.eq(base_port.tag_call) for sp in self.shared_ports] self.comb += [sp.dat_r.eq(base_port.dat_r) for sp in self.shared_ports] self.comb += [ base_port.dat_w.eq(optree("|", [sp.dat_w for sp in self.shared_ports])), base_port.dat_wm.eq(optree("|", [sp.dat_wm for sp in self.shared_ports])), ] # request ownership tracking if hasattr(base_port, "tag_issue"): for sp in self.shared_ports: self.sync += If(sp.stb & sp.ack, Array(sp.slots)[sp.tag_issue].eq(1)) for n, slot in enumerate(sp.slots): self.sync += If(base_port.call & (base_port.tag_call == (base_port.base + n)), slot.eq(0)) else: for sp in self.shared_ports: self.sync += [ If(sp.stb & sp.ack, sp.slots[0].eq(1)), If(base_port.call & (base_port.tag_call == base_port.base), sp.slots[0].eq(0)) ]
def __init__(self, charge, sense, width=24): if not isinstance(sense, collections.Iterable): sense = [sense] channels = len(sense) self._start_busy = CSR() self._overflow = CSRStatus(channels) self._polarity = CSRStorage() count = Signal(width) busy = Signal(channels) res = [] for i in range(channels): res.append(CSRStatus(width, name="res"+str(i))) setattr(self, "_res"+str(i), res[-1]) any_busy = Signal() self.comb += [ any_busy.eq(optree("|", [busy[i] for i in range(channels)])), self._start_busy.w.eq(any_busy) ] carry = Signal() self.sync += [ If(self._start_busy.re, count.eq(0), busy.eq((1 << channels)-1), self._overflow.status.eq(0), charge.eq(~self._polarity.storage) ).Elif(any_busy, Cat(count, carry).eq(count + 1), If(carry, self._overflow.status.eq(busy), busy.eq(0) ) ).Else( charge.eq(self._polarity.storage) ) ] for i in range(channels): sense_synced = Signal() self.specials += MultiReg(sense[i], sense_synced) self.sync += If(busy[i], If(sense_synced != self._polarity.storage, res[i].status.eq(count), busy[i].eq(0) ) )
def get_fragment(self): comb = [self.busy.eq(optree("|", [node.busy for node in self.dfg]))] fragment = Fragment(comb) for node in self.dfg: fragment += node.get_fragment() for u, v, d in self.dfg.edges_iter(data=True): ep_src = u.endpoints[d["source"]] ep_dst = v.endpoints[d["sink"]] fragment += get_conn_fragment(ep_src, ep_dst) if hasattr(self, "debugger"): fragment += self.debugger.get_fragment() return fragment
def __init__(self): a = [Bar() for x in range(3)] b = [Foo() for x in range(3)] c = b b = [Bar() for x in range(2)] output = Signal() allsigs = [] for lst in [a, b, c]: for obj in lst: allsigs.extend(obj.sigs) self.comb += output.eq(optree("|", allsigs))
def __init__(self, master, slaves, register=False): ns = len(slaves) slave_sel = Signal(ns) slave_sel_r = Signal(ns) # decode slave addresses self.comb += [ slave_sel[i].eq(fun(master.adr)) for i, (fun, bus) in enumerate(slaves) ] if register: self.sync += slave_sel_r.eq(slave_sel) else: self.comb += slave_sel_r.eq(slave_sel) # connect master->slaves signals except cyc for slave in slaves: for name, size, direction in _layout: if direction == DIR_M_TO_S and name != "cyc": self.comb += getattr(slave[1], name).eq(getattr(master, name)) # combine cyc with slave selection signals self.comb += [ slave[1].cyc.eq(master.cyc & slave_sel[i]) for i, slave in enumerate(slaves) ] # generate master ack (resp. err) by ORing all slave acks (resp. errs) self.comb += [ master.ack.eq(optree("|", [slave[1].ack for slave in slaves])), master.err.eq(optree("|", [slave[1].err for slave in slaves])) ] # mux (1-hot) slave data return masked = [ Replicate(slave_sel_r[i], flen(master.dat_r)) & slaves[i][1].dat_r for i in range(ns) ] self.comb += master.dat_r.eq(optree("|", masked))
def build_binary_control(self, stb_i, ack_o, stb_o, ack_i, latency): valid = Signal(latency) if latency > 1: self.sync += If(self.pipe_ce, valid.eq(Cat(stb_i, valid[:latency-1]))) else: self.sync += If(self.pipe_ce, valid.eq(stb_i)) last_valid = valid[latency-1] self.comb += [ self.pipe_ce.eq(ack_i | ~last_valid), ack_o.eq(self.pipe_ce), stb_o.eq(last_valid), self.busy.eq(optree("|", [valid[i] for i in range(latency)])) ]
def get_fragment(self): muls = [] sync = [] src = self.i for c in self.coef: sreg = Signal((self.wsize, True)) sync.append(sreg.eq(src)) src = sreg c_fp = int(c*2**(self.wsize - 1)) muls.append(c_fp*sreg) sum_full = Signal((2*self.wsize-1, True)) sync.append(sum_full.eq(optree("+", muls))) comb = [self.o.eq(sum_full[self.wsize-1:])] return Fragment(comb, sync)
def __init__(self, n_out, n_state=31, taps=[27, 30]): self.o = Signal(n_out) ### state = Signal(n_state) curval = [state[i] for i in range(n_state)] curval += [0] * (n_out - n_state) for i in range(n_out): nv = ~optree("^", [curval[tap] for tap in taps]) curval.insert(0, nv) curval.pop() self.sync += [state.eq(Cat(*curval[:n_state])), self.o.eq(Cat(*curval))]
def __init__(self, layout, subrecords): self.source = Source(layout) sinks = [] for n, r in enumerate(subrecords): s = Sink(layout_partial(layout, *r)) setattr(self, "sink" + str(n), s) sinks.append(s) self.busy = Signal() ### self.comb += [self.busy.eq(0), self.source.stb.eq(optree("&", [sink.stb for sink in sinks]))] self.comb += [sink.ack.eq(self.source.ack & self.source.stb) for sink in sinks] self.comb += [self.source.payload.eq(sink.payload) for sink in sinks]
def get_fragment(self): comb = [] sync = [] ns = len(self.slaves) slave_sel = Signal(ns) slave_sel_r = Signal(ns) # decode slave addresses comb += [slave_sel[i].eq(fun(self.master.adr)) for i, (fun, bus) in enumerate(self.slaves)] if self.register: sync.append(slave_sel_r.eq(slave_sel)) else: comb.append(slave_sel_r.eq(slave_sel)) # connect master->slaves signals except cyc m2s_names = _desc.get_names(M_TO_S, "cyc") comb += [getattr(slave[1], name).eq(getattr(self.master, name)) for name in m2s_names for slave in self.slaves] # combine cyc with slave selection signals comb += [slave[1].cyc.eq(self.master.cyc & slave_sel[i]) for i, slave in enumerate(self.slaves)] # generate master ack (resp. err) by ORing all slave acks (resp. errs) comb += [ self.master.ack.eq(optree("|", [slave[1].ack for slave in self.slaves])), self.master.err.eq(optree("|", [slave[1].err for slave in self.slaves])) ] # mux (1-hot) slave data return masked = [Replicate(slave_sel_r[i], len(self.master.dat_r)) & self.slaves[i][1].dat_r for i in range(len(self.slaves))] comb.append(self.master.dat_r.eq(optree("|", masked))) return Fragment(comb, sync)
def get_binary_control_fragment(self, stb_i, ack_o, stb_o, ack_i): valid = Signal(self.latency) if self.latency > 1: sync = [If(self.pipe_ce, valid.eq(Cat(stb_i, valid[:self.latency-1])))] else: sync = [If(self.pipe_ce, valid.eq(stb_i))] last_valid = valid[self.latency-1] comb = [ self.pipe_ce.eq(ack_i | ~last_valid), ack_o.eq(self.pipe_ce), stb_o.eq(last_valid), self.busy.eq(optree("|", [valid[i] for i in range(self.latency)])) ] return Fragment(comb, sync)
def connect(self, *slaves): r = [] for f in self.layout: field = f[0] self_e = getattr(self, field) if isinstance(self_e, Signal): direction = f[2] if direction == DIR_M_TO_S: r += [getattr(slave, field).eq(self_e) for slave in slaves] elif direction == DIR_S_TO_M: r.append(self_e.eq(optree("|", [getattr(slave, field) for slave in slaves]))) else: raise TypeError else: for slave in slaves: r += self_e.connect(getattr(slave, field)) return r
def do_finalize(self): sources_u = [v for k, v in xdir(self, True) if isinstance(v, _EventSource)] sources = sorted(sources_u, key=lambda x: x.huid) n = len(sources) self.status = CSR(n) self.pending = CSR(n) self.enable = CSRStorage(n) for i, source in enumerate(sources): self.comb += [ self.status.w[i].eq(source.status), If(self.pending.re & self.pending.r[i], source.clear.eq(1)), self.pending.w[i].eq(source.pending) ] irqs = [self.pending.w[i] & self.enable.storage[i] for i in range(n)] self.comb += self.irq.eq(optree("|", irqs))
def do_finalize(self): sources_u = [v for v in self.__dict__.values() if isinstance(v, _EventSource)] sources = sorted(sources_u, key=lambda x: x.huid) n = len(sources) self.status = CSR(n) self.pending = CSR(n) self.enable = CSRStorage(n) for i, source in enumerate(sources): self.comb += [ self.status.w[i].eq(source.status), If(self.pending.re & self.pending.r[i], source.clear.eq(1)), self.pending.w[i].eq(source.pending) ] irqs = [self.pending.w[i] & self.enable.storage[i] for i in range(n)] self.comb += self.irq.eq(optree("|", irqs))
def __init__(self, n_out, n_state=31, taps=[27, 30]): self.o = Signal(n_out) ### state = Signal(n_state) curval = [state[i] for i in range(n_state)] curval += [0] * (n_out - n_state) for i in range(n_out): nv = ~optree("^", [curval[tap] for tap in taps]) curval.insert(0, nv) curval.pop() self.sync += [ state.eq(Cat(*curval[:n_state])), self.o.eq(Cat(*curval)) ]
def __init__(self, required_controls=8): self.raw_data = Signal(10) self.synced = Signal() self.data = Signal(10) self._r_char_synced = CSRStatus() self._r_ctl_pos = CSRStatus(bits_for(9)) ### raw_data1 = Signal(10) self.sync.pix += raw_data1.eq(self.raw_data) raw = Signal(20) self.comb += raw.eq(Cat(raw_data1, self.raw_data)) found_control = Signal() control_position = Signal(max=10) self.sync.pix += found_control.eq(0) for i in range(10): self.sync.pix += If(optree("|", [raw[i:i+10] == t for t in control_tokens]), found_control.eq(1), control_position.eq(i) ) control_counter = Signal(max=required_controls) previous_control_position = Signal(max=10) word_sel = Signal(max=10) self.sync.pix += [ If(found_control & (control_position == previous_control_position), If(control_counter == (required_controls - 1), control_counter.eq(0), self.synced.eq(1), word_sel.eq(control_position) ).Else( control_counter.eq(control_counter + 1) ) ).Else( control_counter.eq(0) ), previous_control_position.eq(control_position) ] self.specials += MultiReg(self.synced, self._r_char_synced.status) self.specials += MultiReg(word_sel, self._r_ctl_pos.status) self.sync.pix += self.data.eq(raw >> word_sel)
def get_fragment(self): sources = [self.endpoints[e] for e in self.sources()] sink = self.endpoints[self.sinks()[0]] already_acked = Signal(len(sources)) sync = [ If(sink.stb, already_acked.eq(already_acked | Cat(*[s.ack for s in sources])), If(sink.ack, already_acked.eq(0)) ) ] comb = [ sink.ack.eq(optree("&", [s.ack | already_acked[n] for n, s in enumerate(sources)])) ] for n, s in enumerate(sources): comb.append(s.stb.eq(sink.stb & ~already_acked[n])) return Fragment(comb, sync)
def __init__(self, coef, wsize=16): self.coef = coef self.wsize = wsize self.i = Signal((self.wsize, True)) self.o = Signal((self.wsize, True)) ### muls = [] src = self.i for c in self.coef: sreg = Signal((self.wsize, True)) self.sync += sreg.eq(src) src = sreg c_fp = int(c*2**(self.wsize - 1)) muls.append(c_fp*sreg) sum_full = Signal((2*self.wsize-1, True)) self.sync += sum_full.eq(optree("+", muls)) self.comb += self.o.eq(sum_full[self.wsize-1:])
def __init__(self, coef, wsize=16): self.coef = coef self.wsize = wsize self.i = Signal((self.wsize, True)) self.o = Signal((self.wsize, True)) ### muls = [] src = self.i for c in self.coef: sreg = Signal((self.wsize, True)) self.sync += sreg.eq(src) src = sreg c_fp = int(c * 2**(self.wsize - 1)) muls.append(c_fp * sreg) sum_full = Signal((2 * self.wsize - 1, True)) self.sync += sum_full.eq(optree("+", muls)) self.comb += self.o.eq(sum_full[self.wsize - 1:])
def __init__(self, dfg): dfg.elaborate() # expose unconnected endpoints uc_eps_by_node = dict((node, get_endpoints(node)) for node in dfg) for u, v, d in dfg.edges_iter(data=True): uc_eps_u = uc_eps_by_node[u] source = d["source"] try: del uc_eps_u[source] except KeyError: pass uc_eps_v = uc_eps_by_node[v] sink = d["sink"] try: del uc_eps_v[sink] except KeyError: pass for node, uc_eps in uc_eps_by_node.items(): for k, v in uc_eps.items(): assert(not hasattr(self, k)) setattr(self, k, v) # connect abstract busy signals for node in dfg: try: abstract_busy_signal = dfg.abstract_busy_signals[id(node)] except KeyError: pass else: self.comb += abstract_busy_signal.eq(node.busy) # generate busy signal self.busy = Signal() self.comb += self.busy.eq(optree("|", [node.busy for node in dfg])) # claim ownership of sub-actors and establish connections for node in dfg: self.submodules += node for u, v, d in dfg.edges_iter(data=True): ep_src = getattr(u, d["source"]) ep_dst = getattr(v, d["sink"]) self.comb += ep_src.connect_flat(ep_dst)
def __init__(self, layout, subrecords): self.source = Source(layout) sinks = [] for n, r in enumerate(subrecords): s = Sink(layout_partial(layout, *r)) setattr(self, "sink" + str(n), s) sinks.append(s) self.busy = Signal() ### self.comb += [ self.busy.eq(0), self.source.stb.eq(optree("&", [sink.stb for sink in sinks])) ] self.comb += [ sink.ack.eq(self.source.ack & self.source.stb) for sink in sinks ] self.comb += [self.source.payload.eq(sink.payload) for sink in sinks]
def __init__(self, dat_width, width, polynom): self.data = Signal(dat_width) self.last = Signal(width) self.next = Signal(width) # # # def _optimize_eq(l): """ Replace even numbers of XORs in the equation with an equivalent XOR """ d = OrderedDict() for e in l: if e in d: d[e] += 1 else: d[e] = 1 r = [] for key, value in d.items(): if value%2 != 0: r.append(key) return r # compute and optimize CRC's LFSR curval = [[("state", i)] for i in range(width)] for i in range(dat_width): feedback = curval.pop() + [("din", i)] for j in range(width-1): if (polynom & (1<<(j+1))): curval[j] += feedback curval[j] = _optimize_eq(curval[j]) curval.insert(0, feedback) # implement logic for i in range(width): xors = [] for t, n in curval[i]: if t == "state": xors += [self.last[n]] elif t == "din": xors += [self.data[n]] self.comb += self.next[i].eq(optree("^", xors))
def __init__(self, dat_width, width, polynom): self.d = Signal(dat_width) self.last = Signal(width) self.next = Signal(width) ### def _optimize_eq(l): """ Replace even numbers of XORs in the equation with an equivalent XOR """ d = {} for e in l: if e in d: d[e] += 1 else: d[e] = 1 r = [] for key, value in d.items(): if value % 2 != 0: r.append(key) return r # compute and optimize CRC's LFSR curval = [[("state", i)] for i in range(width)] for i in range(dat_width): feedback = curval.pop() + [("din", i)] curval.insert(0, feedback) for j in range(1, width - 1): if (polynom & (1 << j)): curval[j] += feedback curval[j] = _optimize_eq(curval[j]) # implement logic for i in range(width): xors = [] for t, n in curval[i]: if t == "state": xors += [self.last[n]] elif t == "din": xors += [self.d[n]] self.comb += self.next[i].eq(optree("^", xors))
def connect_flat(self, *slaves): r = [] iter_slaves = [slave.iter_flat() for slave in slaves] for m_signal, m_direction in self.iter_flat(): if m_direction == DIR_M_TO_S: for iter_slave in iter_slaves: s_signal, s_direction = next(iter_slave) assert(s_direction == DIR_M_TO_S) r.append(s_signal.eq(m_signal)) elif m_direction == DIR_S_TO_M: s_signals = [] for iter_slave in iter_slaves: s_signal, s_direction = next(iter_slave) assert(s_direction == DIR_S_TO_M) s_signals.append(s_signal) r.append(m_signal.eq(optree("|", s_signals))) else: raise TypeError return r
def connect_flat(self, *slaves): r = [] iter_slaves = [slave.iter_flat() for slave in slaves] for m_signal, m_direction in self.iter_flat(): if m_direction == DIR_M_TO_S: for iter_slave in iter_slaves: s_signal, s_direction = next(iter_slave) assert (s_direction == DIR_M_TO_S) r.append(s_signal.eq(m_signal)) elif m_direction == DIR_S_TO_M: s_signals = [] for iter_slave in iter_slaves: s_signal, s_direction = next(iter_slave) assert (s_direction == DIR_S_TO_M) s_signals.append(s_signal) r.append(m_signal.eq(optree("|", s_signals))) else: raise TypeError return r
def __init__(self, required_controls=8): self.raw_data = Signal(10) self.synced = Signal() self.data = Signal(10) self._char_synced = CSRStatus() self._ctl_pos = CSRStatus(bits_for(9)) ### raw_data1 = Signal(10) self.sync.pix += raw_data1.eq(self.raw_data) raw = Signal(20) self.comb += raw.eq(Cat(raw_data1, self.raw_data)) found_control = Signal() control_position = Signal(max=10) self.sync.pix += found_control.eq(0) for i in range(10): self.sync.pix += If( optree("|", [raw[i:i + 10] == t for t in control_tokens]), found_control.eq(1), control_position.eq(i)) control_counter = Signal(max=required_controls) previous_control_position = Signal(max=10) word_sel = Signal(max=10) self.sync.pix += [ If( found_control & (control_position == previous_control_position), If(control_counter == (required_controls - 1), control_counter.eq(0), self.synced.eq(1), word_sel.eq(control_position)).Else( control_counter.eq(control_counter + 1))).Else( control_counter.eq(0)), previous_control_position.eq(control_position) ] self.specials += MultiReg(self.synced, self._char_synced.status) self.specials += MultiReg(word_sel, self._ctl_pos.status) self.sync.pix += self.data.eq(raw >> word_sel)
def connect(self, *slaves): r = [] for f in self.layout: field = f[0] self_e = getattr(self, field) if isinstance(self_e, Signal): direction = f[2] if direction == DIR_M_TO_S: r += [getattr(slave, field).eq(self_e) for slave in slaves] elif direction == DIR_S_TO_M: r.append( self_e.eq( optree("|", [getattr(slave, field) for slave in slaves]))) else: raise TypeError else: for slave in slaves: r += self_e.connect(getattr(slave, field)) return r
def __init__(self, layout, subrecords): self.sink = Sink(layout) sources = [] for n, r in enumerate(subrecords): s = Source(layout_partial(layout, *r)) setattr(self, "source" + str(n), s) sources.append(s) self.busy = Signal() ### self.comb += [ source.payload.eq(self.sink.payload) for source in sources ] already_acked = Signal(len(sources)) self.sync += If( self.sink.stb, already_acked.eq(already_acked | Cat(*[s.ack for s in sources])), If(self.sink.ack, already_acked.eq(0))) self.comb += self.sink.ack.eq( optree("&", [s.ack | already_acked[n] for n, s in enumerate(sources)])) for n, s in enumerate(sources): self.comb += s.stb.eq(self.sink.stb & ~already_acked[n])
def __init__(self, *event_managers): self.irq = Signal() self.comb += self.irq.eq(optree("|", [ev.irq for ev in event_managers]))
def __init__(self): self.sink = Sink(line_layout) self.trigger = Signal() self.aux = Signal() self.silence = Signal() self.arm = Signal() self.data = Signal(16) ### line = Record(line_layout) dt_dec = Signal(16) dt_end = Signal(16) dt = Signal(16) adv = Signal() tic = Signal() toc = Signal() stb = Signal() toc0 = Signal() inc = Signal() lp = self.sink.payload self.comb += [ adv.eq(self.arm & self.sink.stb & (self.trigger | ~(line.header.wait | lp.header.trigger))), tic.eq(dt_dec == dt_end), toc.eq(dt == line.dt), stb.eq(tic & toc & adv), self.sink.ack.eq(stb), inc.eq(self.arm & tic & (~toc | (~toc0 & ~adv))), ] subs = [ Volt(lp, stb & (lp.header.typ == 0), inc), Dds(lp, stb & (lp.header.typ == 1), inc), ] for i, sub in enumerate(subs): self.submodules += sub self.sync += [ toc0.eq(toc), self.data.eq(optree("+", [sub.data for sub in subs])), self.aux.eq(line.header.aux), self.silence.eq(line.header.silence), If( ~tic, dt_dec.eq(dt_dec + 1), ).Elif( ~toc, dt_dec.eq(0), dt.eq(dt + 1), ).Elif( stb, line.header.eq(lp.header), line.dt.eq(lp.dt - 1), dt_end.eq((1 << lp.header.shift) - 1), dt_dec.eq(0), dt.eq(0), ) ]
def __init__(self, slaves, depth=256, bus=None, with_wishbone=True): time_width, addr_width, data_width = [_[1] for _ in ventilator_layout] self.submodules.ctrl = CycleControl() self.submodules.ev = ev = EventManager() ev.in_readable = EventSourceLevel() ev.out_overflow = EventSourceProcess() ev.in_overflow = EventSourceLevel() ev.out_readable = EventSourceProcess() ev.stopped = EventSourceProcess() ev.started = EventSourceProcess() ev.finalize() self._in_time = CSRStatus(time_width) self._in_addr = CSRStatus(addr_width) self._in_data = CSRStatus(data_width) self._in_next = CSR() self._in_flush = CSR() self._out_time = CSRStorage(time_width, write_from_dev=with_wishbone) self._out_addr = CSRStorage(addr_width, write_from_dev=with_wishbone) self._out_data = CSRStorage(data_width, write_from_dev=with_wishbone) self._out_next = CSR() self._out_flush = CSR() self.busy = Signal() ### if with_wishbone: if bus is None: bus = wishbone.Interface() self.bus = bus slaves = [(self.ctrl, 0x00000000, 0xffffff00)] + slaves self.submodules.in_fifo = in_fifo = SyncFIFOBuffered( ventilator_layout, depth) self.submodules.out_fifo = out_fifo = SyncFIFOBuffered( ventilator_layout, depth) self.submodules.enc = PriorityEncoder(len(slaves)) wb_in_next = Signal() wb_out_next = Signal() out_request = Signal() in_request = Signal() # CSRs and Events self.comb += [ ev.in_readable.trigger.eq(in_fifo.readable), ev.out_overflow.trigger.eq(~out_fifo.writable), ev.in_overflow.trigger.eq(~in_fifo.writable), ev.out_readable.trigger.eq(out_fifo.readable), ev.started.trigger.eq(~self.ctrl.run), ev.stopped.trigger.eq(self.ctrl.run), self.ctrl.have_in.eq(~self.enc.n), self.ctrl.have_out.eq(out_fifo.readable), self._in_time.status.eq(in_fifo.dout.time), self._in_addr.status.eq(in_fifo.dout.addr), self._in_data.status.eq(in_fifo.dout.data), in_fifo.re.eq(self._in_next.re | wb_in_next), in_fifo.flush.eq(self._in_flush.re), out_fifo.din.time.eq(self._out_time.storage), out_fifo.din.addr.eq(self._out_addr.storage), out_fifo.din.data.eq(self._out_data.storage), out_fifo.we.eq(self._out_next.re | wb_out_next), out_fifo.flush.eq(self._out_flush.re), ] # din dout strobing self.comb += [ # TODO: 0 <= diff <= plausibility range out_request.eq(out_fifo.readable & self.ctrl.run & (self.ctrl.cycle == out_fifo.dout.time)), # ignore in_fifo.writable in_request.eq(~self.enc.n & self.ctrl.run), self.busy.eq(out_request | in_request), ] # to slaves addrs = [] datas = [] stbs = [] acks = [] for i, (slave, prefix, mask) in enumerate(slaves): prefix &= mask source = Source(slave_layout) sink = Sink(slave_layout) self.comb += [ source.connect(slave.dout), sink.connect(slave.din), ] sel = Signal() acks.append(sel & source.ack) addrs.append(prefix | (sink.payload.addr & (~mask & 0xffffffff))) datas.append(sink.payload.data) stbs.append(sink.stb) self.comb += [ sel.eq(out_fifo.dout.addr & mask == prefix), source.payload.addr.eq(out_fifo.dout.addr), source.payload.data.eq(out_fifo.dout.data), source.stb.eq(sel & out_request), sink.ack.eq((self.enc.o == i) & in_request), ] self.comb += out_fifo.re.eq(out_request & optree("|", acks)) # from slaves self.comb += [ self.enc.i.eq(Cat(stbs)), in_fifo.din.time.eq(self.ctrl.cycle), in_fifo.din.addr.eq(Array(addrs)[self.enc.o]), in_fifo.din.data.eq(Array(datas)[self.enc.o]), in_fifo.we.eq(in_request), ] # optional high throughput wishbone access if with_wishbone: self.comb += [ self._out_time.dat_w.eq(bus.dat_w), self._out_addr.dat_w.eq(bus.dat_w), self._out_data.dat_w.eq(bus.dat_w), If( bus.cyc & bus.stb, If( bus.we, Case( bus.adr[:4], { 0x5: wb_in_next.eq(1), 0x6: self._out_time.we.eq(1), 0x7: self._out_addr.we.eq(1), 0x8: self._out_data.we.eq(1), 0x9: wb_out_next.eq(1), }), ), Case( bus.adr[:4], { 0x0: bus.dat_r.eq(self.ctrl.cycle), 0x1: bus.dat_r.eq(self.ev.status.w), 0x2: bus.dat_r.eq(in_fifo.dout.time), 0x3: bus.dat_r.eq(in_fifo.dout.addr), 0x4: bus.dat_r.eq(in_fifo.dout.data), }), ) ] self.sync += bus.ack.eq(bus.cyc & bus.stb & ~bus.ack)
def __init__(self, phy_settings, geom_settings, timing_settings): if phy_settings.memtype in ["SDR"]: burst_length = phy_settings.nphases * 1 # command multiplication*SDR elif phy_settings.memtype in ["DDR", "LPDDR", "DDR2", "DDR3"]: burst_length = phy_settings.nphases * 2 # command multiplication*DDR burst_width = phy_settings.dfi_databits * phy_settings.nphases address_align = log2_int(burst_length) # # # self.dfi = dfi = dfibus.Interface(geom_settings.addressbits, geom_settings.bankbits, phy_settings.dfi_databits, phy_settings.nphases) self.bus = bus = wishbone.Interface(burst_width) rdphase = phy_settings.rdphase wrphase = phy_settings.wrphase precharge_all = Signal() activate = Signal() refresh = Signal() write = Signal() read = Signal() # Compute current column, bank and row from wishbone address slicer = _AddressSlicer(geom_settings.colbits, geom_settings.bankbits, geom_settings.rowbits, address_align) # Manage banks bank_open = Signal() bank_idle = Signal() bank_hit = Signal() banks = [] for i in range(2**geom_settings.bankbits): bank = _Bank(geom_settings) self.comb += [ bank.open.eq(activate), bank.reset.eq(precharge_all), bank.row.eq(slicer.row(bus.adr)) ] banks.append(bank) self.submodules += banks cases = {} for i, bank in enumerate(banks): cases[i] = [bank.ce.eq(1)] self.comb += Case(slicer.bank(bus.adr), cases) self.comb += [ bank_hit.eq(optree("|", [bank.hit & bank.ce for bank in banks])), bank_idle.eq(optree("|", [bank.idle & bank.ce for bank in banks])), ] # Timings write2precharge_timer = WaitTimer(2 + timing_settings.tWR - 1) self.submodules += write2precharge_timer self.comb += write2precharge_timer.wait.eq(~write) refresh_timer = WaitTimer(timing_settings.tREFI) self.submodules += refresh_timer self.comb += refresh_timer.wait.eq(~refresh) # Main FSM self.submodules.fsm = fsm = FSM() fsm.act( "IDLE", If(refresh_timer.done, NextState("PRECHARGE-ALL")).Elif( bus.stb & bus.cyc, If(bank_hit, If(bus.we, NextState("WRITE")).Else(NextState("READ"))).Elif( ~bank_idle, If(write2precharge_timer.done, NextState("PRECHARGE"))).Else( NextState("ACTIVATE")))) fsm.act( "READ", read.eq(1), dfi.phases[rdphase].ras_n.eq(1), dfi.phases[rdphase].cas_n.eq(0), dfi.phases[rdphase].we_n.eq(1), dfi.phases[rdphase].rddata_en.eq(1), NextState("WAIT-READ-DONE"), ) fsm.act( "WAIT-READ-DONE", If(dfi.phases[rdphase].rddata_valid, bus.ack.eq(1), NextState("IDLE"))) fsm.act("WRITE", write.eq(1), dfi.phases[wrphase].ras_n.eq(1), dfi.phases[wrphase].cas_n.eq(0), dfi.phases[wrphase].we_n.eq(0), dfi.phases[wrphase].wrdata_en.eq(1), NextState("WRITE-LATENCY")) fsm.act("WRITE-ACK", bus.ack.eq(1), NextState("IDLE")) fsm.act("PRECHARGE-ALL", precharge_all.eq(1), dfi.phases[rdphase].ras_n.eq(0), dfi.phases[rdphase].cas_n.eq(1), dfi.phases[rdphase].we_n.eq(0), NextState("PRE-REFRESH")) fsm.act( "PRECHARGE", # do no reset bank since we are going to re-open it dfi.phases[0].ras_n.eq(0), dfi.phases[0].cas_n.eq(1), dfi.phases[0].we_n.eq(0), NextState("TRP")) fsm.act( "ACTIVATE", activate.eq(1), dfi.phases[0].ras_n.eq(0), dfi.phases[0].cas_n.eq(1), dfi.phases[0].we_n.eq(1), NextState("TRCD"), ) fsm.act("REFRESH", refresh.eq(1), dfi.phases[rdphase].ras_n.eq(0), dfi.phases[rdphase].cas_n.eq(0), dfi.phases[rdphase].we_n.eq(1), NextState("POST-REFRESH")) fsm.delayed_enter("WRITE-LATENCY", "WRITE-ACK", phy_settings.write_latency - 1) fsm.delayed_enter("TRP", "ACTIVATE", timing_settings.tRP - 1) fsm.delayed_enter("TRCD", "IDLE", timing_settings.tRCD - 1) fsm.delayed_enter("PRE-REFRESH", "REFRESH", timing_settings.tRP - 1) fsm.delayed_enter("POST-REFRESH", "IDLE", timing_settings.tRFC - 1) # DFI commands for phase in dfi.phases: if hasattr(phase, "reset_n"): self.comb += phase.reset_n.eq(1) if hasattr(phase, "odt"): self.comb += phase.odt.eq(1) self.comb += [ phase.cke.eq(1), phase.cs_n.eq(0), phase.bank.eq(slicer.bank(bus.adr)), If(precharge_all, phase.address.eq(2**10)).Elif( activate, phase.address.eq(slicer.row(bus.adr))).Elif( write | read, phase.address.eq(slicer.col(bus.adr))) ] # DFI datapath self.comb += [ bus.dat_r.eq(Cat(phase.rddata for phase in dfi.phases)), Cat(phase.wrdata for phase in dfi.phases).eq(bus.dat_w), Cat(phase.wrdata_mask for phase in dfi.phases).eq(~bus.sel), ]
def __init__(self, interface, counter, fifo_depth, guard_io_cycles): data_width = rtlink.get_data_width(interface) address_width = rtlink.get_address_width(interface) fine_ts_width = rtlink.get_fine_ts_width(interface) ev_layout = [] if data_width: ev_layout.append(("data", data_width)) if address_width: ev_layout.append(("address", address_width)) ev_layout.append(("timestamp", counter.width + fine_ts_width)) # ev must be valid 1 cycle before we to account for the latency in # generating replace, sequence_error and nop self.ev = Record(ev_layout) self.writable = Signal() self.we = Signal() # maximum throughput 1/2 self.underflow = Signal() # valid 1 cycle after we, pulsed self.sequence_error = Signal() self.collision_error = Signal() # # # # FIFO fifo = RenameClockDomains(AsyncFIFO(ev_layout, fifo_depth), { "write": "rsys", "read": "rio" }) self.submodules += fifo # Buffer buf_pending = Signal() buf = Record(ev_layout) buf_just_written = Signal() # Special cases replace = Signal() sequence_error = Signal() collision_error = Signal() any_error = Signal() nop = Signal() self.sync.rsys += [ # Note: replace does not perform any RTLink address checks, # i.e. a write to a different address will be silently replaced # as well. replace.eq(self.ev.timestamp == buf.timestamp), # Detect sequence errors on coarse timestamps only # so that they are mutually exclusive with collision errors. sequence_error.eq(self.ev.timestamp[fine_ts_width:] < buf.timestamp[fine_ts_width:]) ] if fine_ts_width: self.sync.rsys += collision_error.eq( (self.ev.timestamp[fine_ts_width:] == buf.timestamp[fine_ts_width:]) & (self.ev.timestamp[:fine_ts_width] != buf.timestamp[:fine_ts_width])) self.comb += any_error.eq(sequence_error | collision_error) if interface.suppress_nop: # disable NOP at reset: do not suppress a first write with all 0s nop_en = Signal(reset=0) self.sync.rsys += [ nop.eq(nop_en & optree("&", [ getattr(self.ev, a) == getattr(buf, a) for a in ("data", "address") if hasattr(self.ev, a) ], default=0)), # buf now contains valid data. enable NOP. If(self.we & ~any_error, nop_en.eq(1)), # underflows cancel the write. allow it to be retried. If(self.underflow, nop_en.eq(0)) ] self.comb += [ self.sequence_error.eq(self.we & sequence_error), self.collision_error.eq(self.we & collision_error) ] # Buffer read and FIFO write self.comb += fifo.din.eq(buf) in_guard_time = Signal() self.comb += in_guard_time.eq( buf.timestamp[fine_ts_width:] < counter.value_sys + guard_io_cycles) self.sync.rsys += If(in_guard_time, buf_pending.eq(0)) self.comb += \ If(buf_pending, If(in_guard_time, If(buf_just_written, self.underflow.eq(1) ).Else( fifo.we.eq(1) ) ), If(self.we & ~replace & ~nop & ~any_error, fifo.we.eq(1) ) ) # Buffer write # Must come after read to handle concurrent read+write properly self.sync.rsys += [ buf_just_written.eq(0), If(self.we & ~nop & ~any_error, buf_just_written.eq(1), buf_pending.eq(1), buf.eq(self.ev)) ] self.comb += self.writable.eq(fifo.writable) # Buffer output of FIFO to improve timing dout_stb = Signal() dout_ack = Signal() dout = Record(ev_layout) self.sync.rio += \ If(fifo.re, dout_stb.eq(1), dout.eq(fifo.dout) ).Elif(dout_ack, dout_stb.eq(0) ) self.comb += fifo.re.eq(fifo.readable & (~dout_stb | dout_ack)) # FIFO read through buffer # TODO: report error on stb & busy self.comb += [ dout_ack.eq(dout.timestamp[fine_ts_width:] == counter.value_rio), interface.stb.eq(dout_stb & dout_ack) ] if data_width: self.comb += interface.data.eq(dout.data) if address_width: self.comb += interface.address.eq(dout.address) if fine_ts_width: self.comb += interface.fine_ts.eq(dout.timestamp[:fine_ts_width])
def do_finalize(self): nmasters = len(self._masters) m_ca, m_ba, m_rca = self._split_master_addresses( self._controller_bits, self._bank_bits, self._rca_bits, self._cba_shift) for nc, controller in enumerate(self._controllers): if self._controller_bits: controller_selected = [ca == nc for ca in m_ca] else: controller_selected = [1] * nmasters master_req_acks = [0] * nmasters master_dat_acks = [0] * nmasters rrs = [ roundrobin.RoundRobin(nmasters, roundrobin.SP_CE) for n in range(self._nbanks) ] self.submodules += rrs for nb, rr in enumerate(rrs): bank = getattr(controller, "bank" + str(nb)) # for each master, determine if another bank locks it master_locked = [] for nm, master in enumerate(self._masters): locked = 0 for other_nb, other_rr in enumerate(rrs): if other_nb != nb: other_bank = getattr(controller, "bank" + str(other_nb)) locked = locked | (other_bank.lock & (other_rr.grant == nm)) master_locked.append(locked) # arbitrate bank_selected = [ cs & (ba == nb) & ~locked for cs, ba, locked in zip( controller_selected, m_ba, master_locked) ] bank_requested = [ bs & master.stb for bs, master in zip(bank_selected, self._masters) ] self.comb += [ rr.request.eq(Cat(*bank_requested)), rr.ce.eq(~bank.stb & ~bank.lock) ] # route requests self.comb += [ bank.adr.eq(Array(m_rca)[rr.grant]), bank.we.eq(Array(self._masters)[rr.grant].we), bank.stb.eq(Array(bank_requested)[rr.grant]) ] master_req_acks = [ master_req_ack | ((rr.grant == nm) & bank_selected[nm] & bank.req_ack) for nm, master_req_ack in enumerate(master_req_acks) ] master_dat_acks = [ master_dat_ack | ((rr.grant == nm) & bank.dat_ack) for nm, master_dat_ack in enumerate(master_dat_acks) ] self.comb += [ master.req_ack.eq(master_req_ack) for master, master_req_ack in zip(self._masters, master_req_acks) ] self.comb += [ master.dat_ack.eq(master_dat_ack) for master, master_dat_ack in zip(self._masters, master_dat_acks) ] # route data writes controller_selected_wl = controller_selected for i in range(self._write_latency): n_controller_selected_wl = [Signal() for i in range(nmasters)] self.sync += [ n.eq(o) for n, o in zip(n_controller_selected_wl, controller_selected_wl) ] controller_selected_wl = n_controller_selected_wl dat_w_maskselect = [] dat_we_maskselect = [] for master, selected in zip(self._masters, controller_selected_wl): o_dat_w = Signal(self._dw) o_dat_we = Signal(self._dw // 8) self.comb += If(selected, o_dat_w.eq(master.dat_w), o_dat_we.eq(master.dat_we)) dat_w_maskselect.append(o_dat_w) dat_we_maskselect.append(o_dat_we) self.comb += [ controller.dat_w.eq(optree("|", dat_w_maskselect)), controller.dat_we.eq(optree("|", dat_we_maskselect)) ] # route data reads if self._controller_bits: for master in self._masters: controller_sel = Signal(self._controller_bits) for nc, controller in enumerate(self._controllers): for nb in range(nbanks): bank = getattr(controller, "bank" + str(nb)) self.comb += If(bank.stb & bank.ack, controller_sel.eq(nc)) for i in range(self._read_latency): n_controller_sel = Signal(self._controller_bits) self.sync += n_controller_sel.eq(controller_sel) controller_sel = n_controller_sel self.comb += master.dat_r.eq( Array(self._controllers)[controller_sel].dat_r) else: self.comb += [ master.dat_r.eq(self._controllers[0].dat_r) for master in self._masters ]
def __init__(self, phy_settings, geom_settings, timing_settings, controller_settings, bank_machines, refresher, dfi, lasmic, with_bandwidth=False): assert (phy_settings.nphases == len(dfi.phases)) self.phy_settings = phy_settings # Command choosing requests = [bm.cmd for bm in bank_machines] self.submodules.choose_cmd = choose_cmd = _CommandChooser(requests) self.submodules.choose_req = choose_req = _CommandChooser(requests) self.comb += [ choose_cmd.want_reads.eq(0), choose_cmd.want_writes.eq(0) ] if phy_settings.nphases == 1: self.comb += [ choose_cmd.want_cmds.eq(1), choose_req.want_cmds.eq(1) ] # Command steering nop = CommandRequest(geom_settings.addressbits, geom_settings.bankbits) commands = [nop, choose_cmd.cmd, choose_req.cmd, refresher.cmd] # nop must be 1st (STEER_NOP, STEER_CMD, STEER_REQ, STEER_REFRESH) = range(4) steerer = _Steerer(commands, dfi) self.submodules += steerer # Read/write turnaround read_available = Signal() write_available = Signal() self.comb += [ read_available.eq( optree("|", [req.stb & req.is_read for req in requests])), write_available.eq( optree("|", [req.stb & req.is_write for req in requests])) ] def anti_starvation(timeout): en = Signal() max_time = Signal() if timeout: t = timeout - 1 time = Signal(max=t + 1) self.comb += max_time.eq(time == 0) self.sync += If(~en, time.eq(t)).Elif(~max_time, time.eq(time - 1)) else: self.comb += max_time.eq(0) return en, max_time read_time_en, max_read_time = anti_starvation( controller_settings.read_time) write_time_en, max_write_time = anti_starvation( controller_settings.write_time) # Refresh self.comb += [bm.refresh_req.eq(refresher.req) for bm in bank_machines] go_to_refresh = Signal() self.comb += go_to_refresh.eq( optree("&", [bm.refresh_gnt for bm in bank_machines])) # Datapath all_rddata = [p.rddata for p in dfi.phases] all_wrdata = [p.wrdata for p in dfi.phases] all_wrdata_mask = [p.wrdata_mask for p in dfi.phases] self.comb += [ lasmic.dat_r.eq(Cat(*all_rddata)), Cat(*all_wrdata).eq(lasmic.dat_w), Cat(*all_wrdata_mask).eq(~lasmic.dat_we) ] # Control FSM fsm = FSM() self.submodules += fsm def steerer_sel(steerer, phy_settings, r_w_n): r = [] for i in range(phy_settings.nphases): s = steerer.sel[i].eq(STEER_NOP) if r_w_n == "read": if i == phy_settings.rdphase: s = steerer.sel[i].eq(STEER_REQ) elif i == phy_settings.rdcmdphase: s = steerer.sel[i].eq(STEER_CMD) elif r_w_n == "write": if i == phy_settings.wrphase: s = steerer.sel[i].eq(STEER_REQ) elif i == phy_settings.wrcmdphase: s = steerer.sel[i].eq(STEER_CMD) else: raise ValueError r.append(s) return r fsm.act( "READ", read_time_en.eq(1), choose_req.want_reads.eq(1), choose_cmd.cmd.ack.eq(1), choose_req.cmd.ack.eq(1), steerer_sel(steerer, phy_settings, "read"), If( write_available, # TODO: switch only after several cycles of ~read_available? If(~read_available | max_read_time, NextState("RTW"))), If(go_to_refresh, NextState("REFRESH"))) fsm.act( "WRITE", write_time_en.eq(1), choose_req.want_writes.eq(1), choose_cmd.cmd.ack.eq(1), choose_req.cmd.ack.eq(1), steerer_sel(steerer, phy_settings, "write"), If(read_available, If(~write_available | max_write_time, NextState("WTR"))), If(go_to_refresh, NextState("REFRESH"))) fsm.act("REFRESH", steerer.sel[0].eq(STEER_REFRESH), If(~refresher.req, NextState("READ"))) fsm.delayed_enter( "RTW", "WRITE", phy_settings.read_latency - 1) # FIXME: reduce this, actual limit is around (cl+1)/nphases fsm.delayed_enter("WTR", "READ", timing_settings.tWTR - 1) # FIXME: workaround for zero-delay loop simulation problem with Icarus Verilog fsm.finalize() self.comb += refresher.ack.eq(fsm.state == fsm.encoding["REFRESH"]) self.with_bandwidth = with_bandwidth
def __init__(self): self.d = Signal(8) self.c = Signal(2) self.de = Signal() self.out = Signal(10) ### # stage 1 - count number of 1s in data d = Signal(8) n1d = Signal(max=9) self.sync += [ n1d.eq(optree("+", [self.d[i] for i in range(8)])), d.eq(self.d) ] # stage 2 - add 9th bit q_m = Signal(9) q_m8_n = Signal() self.comb += q_m8_n.eq((n1d > 4) | ((n1d == 4) & ~d[0])) for i in range(8): if i: curval = curval ^ d[i] ^ q_m8_n else: curval = d[0] self.sync += q_m[i].eq(curval) self.sync += q_m[8].eq(~q_m8_n) # stage 3 - count number of 1s and 0s in q_m[:8] q_m_r = Signal(9) n0q_m = Signal(max=9) n1q_m = Signal(max=9) self.sync += [ n0q_m.eq(optree("+", [~q_m[i] for i in range(8)])), n1q_m.eq(optree("+", [q_m[i] for i in range(8)])), q_m_r.eq(q_m) ] # stage 4 - final encoding cnt = Signal((6, True)) s_c = self.c s_de = self.de for p in range(3): new_c = Signal(2) new_de = Signal() self.sync += new_c.eq(s_c), new_de.eq(s_de) s_c, s_de = new_c, new_de self.sync += If( s_de, If((cnt == 0) | (n1q_m == n0q_m), self.out[9].eq(~q_m_r[8]), self.out[8].eq(q_m_r[8]), If(q_m_r[8], self.out[:8].eq(q_m_r[:8]), cnt.eq(cnt + n1q_m - n0q_m)).Else( self.out[:8].eq(~q_m_r[:8]), cnt.eq(cnt + n0q_m - n1q_m)) ).Else( If((~cnt[5] & (n1q_m > n0q_m)) | (cnt[5] & (n0q_m > n1q_m)), self.out[9].eq(1), self.out[8].eq(q_m_r[8]), self.out[:8].eq(~q_m_r[:8]), cnt.eq(cnt + Cat(0, q_m_r[8]) + n0q_m - n1q_m)).Else( self.out[9].eq(0), self.out[8].eq(q_m_r[8]), self.out[:8].eq(q_m_r[:8]), cnt.eq(cnt - Cat(0, ~q_m_r[8]) + n1q_m - n0q_m)))).Else( self.out.eq(Array(control_tokens)[s_c]), cnt.eq(0))