def lock(self, fsm): name = fsm.name new_lock_id = self._get_id(name) if new_lock_id > 2**self.width - 1: raise ValueError('too many lock IDs') # try try_state = fsm.current state_cond = fsm.state == fsm.current try_cond = vtypes.Not(self.lock_reg) fsm_cond = vtypes.Ors(try_cond, self.lock_id == new_lock_id) self.seq.If(state_cond, try_cond)(self.lock_reg(1), self.lock_id(new_lock_id)) fsm.If(fsm_cond).goto_next() # verify cond = vtypes.Ands(self.lock_reg, self.lock_id == new_lock_id) fsm.If(vtypes.Not(cond)).goto(try_state) # try again fsm.If(cond).goto_next() # OK return 1
def enq(self, wdata, cond=None, delay=0): """ Enque operation """ if self._enq_disabled: raise TypeError('Enq disabled.') if cond is not None: self.seq.If(cond) current_delay = self.seq.current_delay not_full = vtypes.Not(self.wif.full) ack = vtypes.Ands(not_full, self.wif.enq) if current_delay + delay == 0: ready = vtypes.Not(self.wif.almost_full) else: ready = self._count + (current_delay + delay + 1) < self._max_size self.seq.Delay(current_delay + delay).EagerVal().If(not_full)( self.wif.wdata(wdata)) self.seq.Then().Delay(current_delay + delay)(self.wif.enq(1)) # de-assert self.seq.Delay(current_delay + delay + 1)(self.wif.enq(0)) return ack, ready
def push_read_dataflow(self, data, counter=None, cond=None): """ @return done """ if self._read_disabled: raise TypeError('Read disabled.') if counter is not None and not isinstance(counter, vtypes.Reg): raise TypeError("counter must be Reg or None.") if counter is None: counter = self.read_counters[-1] ack = vtypes.Ands(counter > 0, vtypes.Ors(self.rdata.rready, vtypes.Not(self.rdata.rvalid))) last = self.m.TmpReg(initval=0) if cond is None: cond = ack else: cond = (cond, ack) raw_data, raw_valid = data.read(cond=cond) # write condition self.seq.If(raw_valid) self.seq.If(vtypes.Ands(ack, counter > 0))( self.rdata.rdata(raw_data), self.rdata.rvalid(1), self.rdata.rlast(0), counter.dec() ) self.seq.Then().If(counter == 1)( self.rdata.rlast(1), last(1) ) # de-assert self.seq.Delay(1)( self.rdata.rvalid(0), self.rdata.rlast(0), last(0) ) # retry self.seq.If(vtypes.Ands(self.rdata.rvalid, vtypes.Not(self.rdata.rready)))( self.rdata.rvalid(self.rdata.rvalid), self.rdata.rlast(self.rdata.rlast), last(last) ) done = last return done
def write_data(self, data, counter=None, cond=None): """ @return ack, last """ if self._write_disabled: raise TypeError('Write disabled.') if counter is not None and not isinstance(counter, vtypes.Reg): raise TypeError("counter must be Reg or None.") if counter is None: counter = self.write_counters[-1] if cond is not None: self.seq.If(cond) ack = vtypes.Ands(counter > 0, vtypes.Ors(self.wdata.wready, vtypes.Not(self.wdata.wvalid))) last = self.m.TmpReg(initval=0) self.seq.If(vtypes.Ands(ack, counter > 0))( self.wdata.wdata(data), self.wdata.wvalid(1), self.wdata.wlast(0), self.wdata.wstrb(vtypes.Repeat( vtypes.Int(1, 1), (self.wdata.datawidth // 8))), counter.dec() ) self.seq.Then().If(counter == 1)( self.wdata.wlast(1), last(1) ) # de-assert self.seq.Delay(1)( self.wdata.wvalid(0), self.wdata.wlast(0), last(0) ) # retry self.seq.If(vtypes.Ands(self.wdata.wvalid, vtypes.Not(self.wdata.wready)))( self.wdata.wvalid(self.wdata.wvalid), self.wdata.wlast(self.wdata.wlast), last(last) ) return ack, last
def write_request(self, addr, length=1, cond=None, counter=None): """ @return ack, counter """ if self._write_disabled: raise TypeError('Write disabled.') if isinstance(length, int) and length > 2 ** self.burst_size_width: raise ValueError("length must be less than 257.") if isinstance(length, int) and length < 1: raise ValueError("length must be more than 0.") if counter is not None and not isinstance(counter, vtypes.Reg): raise TypeError("counter must be Reg or None.") if cond is not None: self.seq.If(cond) ack = vtypes.Ors(self.waddr.awready, vtypes.Not(self.waddr.awvalid)) if counter is None: counter = self.m.TmpReg(self.burst_size_width, initval=0) self.write_counters.append(counter) self.seq.If(vtypes.Ands(ack, counter == 0))( self.waddr.awaddr(addr), self.waddr.awlen(length - 1), self.waddr.awvalid(1), counter(length) ) self.seq.Then().If(length == 0)( self.waddr.awvalid(0) ) # de-assert self.seq.Delay(1)( self.waddr.awvalid(0) ) # retry self.seq.If(vtypes.Ands(self.waddr.awvalid, vtypes.Not(self.waddr.awready)))( self.waddr.awvalid(self.waddr.awvalid) ) return ack, counter
def deq(self, cond=None, delay=0): """ Deque operation """ if self._deq_disabled: raise TypeError('Deq disabled.') if cond is not None: self.seq.If(cond) not_empty = vtypes.Not(self.rif.empty) current_delay = self.seq.current_delay self.seq.Delay(current_delay + delay)(self.rif.deq(1)) rdata = self.rif.rdata rvalid = self.m.TmpReg(initval=0) self.seq.Then().Delay(current_delay + delay + 1)(rvalid( vtypes.Ands(not_empty, self.rif.deq))) # de-assert self.seq.Delay(current_delay + delay + 1)(self.rif.deq(0)) self.seq.Delay(current_delay + delay + 2)(rvalid(0)) return rdata, rvalid
def Else(self, *statement, **kwargs): self._clear_elif_cond() if len(self.last_cond) == 0: raise ValueError("No previous condition for Else.") old = self.last_cond.pop() self.last_cond.append(vtypes.Not(old)) # if the true-statement has delay attributes, # Else statement is separated. if 'delay' in self.last_kwargs and self.last_kwargs['delay'] > 0: prev_cond = self.last_cond ret = self.Then()(*statement) self.last_cond = prev_cond return ret # if there is additional attribute, Else statement is separated. has_args = not (len(self.next_kwargs) == 0 or # has no args (len(self.next_kwargs) == 1 and 'cond' in kwargs) ) # has only 'cond' if has_args: prev_cond = self.last_cond ret = self.Then()(*statement) self.last_cond = prev_cond return ret if not isinstance(self.last_if_statement, vtypes.If): raise ValueError("Last if-statement is not If") self.last_if_statement.Else(*statement) self._add_dst_var(statement) return self
def Elif(self, *cond): if len(self.last_cond) == 0: raise ValueError("No previous condition for Else.") cond = make_condition(*cond) old = self.last_cond.pop() self.last_cond.append(vtypes.Not(old)) self.last_cond.append(cond) # if the true-statement has delay attributes, Else statement is # separated. if 'delay' in self.last_kwargs and self.last_kwargs['delay'] > 0: prev_cond = self.last_cond ret = self.Then() self.last_cond = prev_cond return ret if not isinstance(self.last_if_statement, vtypes.If): raise ValueError("Last if-statement is not If") self.elif_cond = cond cond = self._make_cond(self.last_cond) self.next_kwargs['cond'] = cond return self
def try_lock(self, fsm): name = fsm.name new_lock_id = self._get_id(name) if new_lock_id > 2**self.width - 1: raise ValueError('too many lock IDs') # try try_state = fsm.current state_cond = fsm.state == fsm.current try_cond = vtypes.Not(self.lock_reg) self.seq.If(state_cond, try_cond)(self.lock_reg(1), self.lock_id(new_lock_id)) fsm.goto_next() # verify cond = vtypes.And(self.lock_reg, self.lock_id == new_lock_id) result = self.m.TmpReg(initval=0) fsm(result(cond)) fsm.goto_next() return result
def implement_control(self, seq): self.valid_list = None if self.ivalid is None and self.oready is None: if self.ovalid is not None: self.ovalid.assign(1) if self.iready is not None: self.iready.assign(1) self.senable = None return if self.oready is None: self._make_valid_chain(seq) self.senable = None return if self.ivalid is None: self.iready.assign(self.oready) self.senable = self.oready return cond = vtypes.OrList(vtypes.Not(self.ovalid), self.oready) self.senable = self.module.TmpWire() self.senable.assign(cond) self._make_valid_chain(seq, self.senable) self.iready.assign(self.senable)
def push_read_data(self, data, counter=None, cond=None): """ @return ack, last """ if self._read_disabled: raise TypeError('Read disabled.') if counter is not None and not isinstance(counter, vtypes.Reg): raise TypeError("counter must be Reg or None.") if counter is None: counter = self.read_counters[-1] if cond is not None: self.seq.If(cond) ack = vtypes.Ands(counter > 0, vtypes.Ors(self.rdata.rready, vtypes.Not(self.rdata.rvalid))) last = self.m.TmpReg(initval=0) self.seq.If(vtypes.Ands(ack, counter > 0))( self.rdata.rdata(data), self.rdata.rvalid(1), self.rdata.rlast(0), counter.dec() ) self.seq.Then().If(counter == 1)( self.rdata.rlast(1), last(1) ) # de-assert self.seq.Delay(1)( self.rdata.rvalid(0), self.rdata.rlast(0), last(0) ) # retry self.seq.If(vtypes.Ands(self.rdata.rvalid, vtypes.Not(self.rdata.rready)))( self.rdata.rvalid(self.rdata.rvalid), self.rdata.rlast(self.rdata.rlast), last(last) ) return ack, last
def mkFifoDefinition(name, datawidth=32, addrwidth=4): m = module.Module(name) clk = m.Input('CLK') rst = m.Input('RST') wif = FifoWriteSlaveInterface(m, name, datawidth) rif = FifoReadSlaveInterface(m, name, datawidth) mem = m.Reg('mem', datawidth, 2**addrwidth) head = m.Reg('head', addrwidth, initval=0) tail = m.Reg('tail', addrwidth, initval=0) is_empty = m.Wire('is_empty') is_almost_empty = m.Wire('is_almost_empty') is_full = m.Wire('is_full') is_almost_full = m.Wire('is_almost_full') mask = (2**addrwidth) - 1 is_empty.assign(head == tail) is_almost_empty.assign(head == ((tail + 1) & mask)) is_full.assign(((head + 1) & mask) == tail) is_almost_full.assign(((head + 2) & mask) == tail) rdata = m.Reg('rdata_reg', datawidth, initval=0) wif.full.assign(is_full) wif.almost_full.assign(vtypes.Ors(is_almost_full, is_full)) rif.empty.assign(is_empty) rif.almost_empty.assign(vtypes.Ors(is_almost_empty, is_empty)) seq = Seq(m, '', clk, rst) seq.If(vtypes.Ands(wif.enq, vtypes.Not(is_full)))(mem[head](wif.wdata), head.inc()) seq.If(vtypes.Ands(rif.deq, vtypes.Not(is_empty)))(rdata(mem[tail]), tail.inc()) rif.rdata.assign(rdata) seq.make_always() return m
def pull_read_request(self, cond=None, counter=None): """ @return addr, counter, valid """ if self._read_disabled: raise TypeError('Read disabled.') if counter is not None and not isinstance(counter, vtypes.Reg): raise TypeError("counter must be Reg or None.") if counter is None: counter = self.m.TmpReg(self.burst_size_width, initval=0) self.read_counters.append(counter) ready = make_condition(cond) ack = vtypes.Ands(self.raddr.arready, self.raddr.arvalid) addr = self.m.TmpReg(self.addrwidth, initval=0) valid = self.m.TmpReg(initval=0) val = (vtypes.Not(valid) if ready is None else vtypes.Ands(ready, vtypes.Not(valid))) prev_subst = self.raddr.arready._get_subst() if not prev_subst: self.raddr.arready.assign(val) else: self.raddr.arready.subst[0].overwrite_right( vtypes.Ors(prev_subst[0].right, val)) self.seq.If(ack)( addr(self.raddr.araddr), counter(self.raddr.arlen + 1) ) self.seq( valid(ack) ) return addr, counter, valid
def write_dataflow(self, port, addr, data, length=1, stride=1, cond=None, when=None): """ @return done 'data' and 'when' must be dataflow variables """ if self._write_disabled[port]: raise TypeError('Write disabled.') counter = self.m.TmpReg(length.bit_length() + 1, initval=0) last = self.m.TmpReg(initval=0) ext_cond = make_condition(cond) data_cond = make_condition(counter > 0, vtypes.Not(last)) if when is None or not isinstance(when, df_numeric): raw_data, raw_valid = data.read(cond=data_cond) else: data_list, raw_valid = read_multi(self.m, data, when, cond=data_cond) raw_data = data_list[0] when = data_list[1] when_cond = make_condition(when, ready=data_cond) if when_cond is not None: raw_valid = vtypes.Ands(when_cond, raw_valid) self.seq.If(ext_cond, counter == 0)( self.interfaces[port].addr(addr - stride), counter(length), ) self.seq.If(raw_valid, counter > 0)( self.interfaces[port].addr(self.interfaces[port].addr + stride), self.interfaces[port].wdata(raw_data), self.interfaces[port].wenable(1), counter.dec()) self.seq.If(raw_valid, counter == 1)(last(1)) # de-assert self.seq.Delay(1)(self.interfaces[port].wenable(0), last(0)) done = last return done
def setup_clock(m, clk, hperiod=5): ret = m.Initial(clk(0), vtypes.Forever(clk(vtypes.Not(clk), ldelay=hperiod))) # for verilator if not hasattr(m, 'verilator_clock'): m.verilator_clock = collections.OrderedDict() m.verilator_clock[clk] = hperiod return ret
def __init__(self, m, name, clk, rst, datawidth=32, addrwidth=4): self.m = m self.name = name self.clk = clk self.rst = rst self.datawidth = datawidth self.addrwidth = addrwidth self.wif = FifoWriteInterface(self.m, name, datawidth) self.rif = FifoReadInterface(self.m, name, datawidth) self.definition = mkFifoDefinition(name, datawidth, addrwidth) self.inst = self.m.Instance(self.definition, 'inst_' + name, ports=m.connect_ports(self.definition)) self.seq = Seq(m, name, clk, rst) # entry counter self._max_size = (2 ** self.addrwidth - 1 if isinstance(self.addrwidth, int) else vtypes.Int(2) ** self.addrwidth - 1) self._count = self.m.Reg( 'count_' + name, self.addrwidth + 1, initval=0) self.seq.If( vtypes.Ands(vtypes.Ands(self.wif.enq, vtypes.Not(self.wif.full)), vtypes.Ands(self.rif.deq, vtypes.Not(self.rif.empty))))( self._count(self._count) ).Elif(vtypes.Ands(self.wif.enq, vtypes.Not(self.wif.full)))( self._count.inc() ).Elif(vtypes.Ands(self.rif.deq, vtypes.Not(self.rif.empty)))( self._count.dec() ) self._enq_disabled = False self._deq_disabled = False self.mutex = None
def _binary_op_div(self, op, r): lvalue = self.value lpoint = self.point lsigned = self.signed if not isinstance(r, Fixed): rvalue = r rsigned = vtypes.get_signed(r) rpoint = 0 else: rvalue = r.value rsigned = r.signed rpoint = r.point point = _max_mux(lpoint, rpoint) signed = lsigned and rsigned lwidth = lvalue.bit_length() rwidth = rvalue.bit_length() ldata, rdata = adjust(lvalue, rvalue, lpoint, rpoint, signed) try: lmsb = ldata[lwidth - 1] except: lmsb = (ldata >> (lwidth - 1) & 0x1) try: rmsb = rdata[lwidth - 1] except: rmsb = (rdata >> (rwidth - 1) & 0x1) abs_ldata = (ldata if not lsigned else vtypes.Mux(lmsb == 0, ldata, vtypes.Unot(ldata) + 1)) abs_rdata = (rdata if not rsigned else vtypes.Mux(rmsb == 0, rdata, vtypes.Unot(rdata) + 1)) abs_data = op(abs_ldata, abs_rdata) data = (abs_data if not signed else vtypes.Mux(vtypes.Ors(vtypes.Ands(lmsb, rmsb), vtypes.Ands(vtypes.Not(lmsb), vtypes.Not(rmsb))), abs_data, vtypes.Unot(abs_data) + 1)) return Fixed(data, point, signed)
def deq(self, fsm): cond = fsm.state == fsm.current rdata, rvalid = fifo.Fifo.deq(self, cond=cond) fsm.If(vtypes.Not(self.empty)).goto_next() fsm.goto_next() rdata_reg = self.m.TmpReg(self.datawidth, initval=0, signed=True) fsm.If(rvalid)(rdata_reg(rdata)) fsm.If(rvalid).goto_next() return rdata_reg
def write_dataflow(self, port, addr, data, length=1, cond=None, when=None): """ @return done """ if self._write_disabled[port]: raise TypeError('Write disabled.') counter = self.m.TmpReg(length.bit_length() + 1, initval=0) last = self.m.TmpReg(initval=0) ext_cond = make_condition(cond) data_cond = make_condition(counter > 0, vtypes.Not(last)) all_cond = make_condition(data_cond, ext_cond) raw_data, raw_valid = data.read(cond=data_cond) when_cond = make_condition(when, ready=data_cond) if when_cond is not None: raw_valid = vtypes.Ands(when_cond, raw_valid) self.seq.If(make_condition(ext_cond, counter == 0))( self.interfaces[port].addr(addr - 1), counter(length), ) self.seq.If(make_condition(raw_valid, counter > 0))( self.interfaces[port].addr.inc(), self.interfaces[port].wdata(raw_data), self.interfaces[port].wenable(1), counter.dec() ) self.seq.If(make_condition(raw_valid, counter == 1))( last(1) ) # de-assert self.seq.Delay(1)( self.interfaces[port].wenable(0), last(0) ) done = last return done
def deq_rtl(self, cond=None): """ Deque """ if self._deq_disabled: raise TypeError('Deq disabled.') cond = make_condition(cond) ready = vtypes.Not(self.rif.empty) if cond is not None: deq_cond = vtypes.Ands(cond, ready) else: deq_cond = ready util.add_enable_cond(self.rif.deq, deq_cond, 1) data = self.rif.rdata valid = self.seq.Prev(deq_cond, 1) return data, valid, ready
def enq_rtl(self, wdata, cond=None): """ Enque """ if self._enq_disabled: raise TypeError('Enq disabled.') cond = make_condition(cond) ready = vtypes.Not(self.wif.almost_full) if cond is not None: enq_cond = vtypes.Ands(cond, ready) enable = cond else: enq_cond = ready enable = vtypes.Int(1, 1) util.add_mux(self.wif.wdata, enable, wdata) util.add_enable_cond(self.wif.enq, enable, enq_cond) ack = self.seq.Prev(ready, 1) return ack, ready
def setup_clock(m, clk, hperiod=5): ret = m.Initial(clk(0), vtypes.Forever(clk(vtypes.Not(clk), ldelay=hperiod))) return ret
def read_dataflow(self, port, addr, length=1, cond=None): """ @return data, last, done """ data_valid = self.m.TmpReg(initval=0) last_valid = self.m.TmpReg(initval=0) data_ready = self.m.TmpWire() last_ready = self.m.TmpWire() data_ready.assign(1) last_ready.assign(1) data_ack = vtypes.Ors(data_ready, vtypes.Not(data_valid)) last_ack = vtypes.Ors(last_ready, vtypes.Not(last_valid)) ext_cond = make_condition(cond) data_cond = make_condition(data_ack, last_ack) prev_data_cond = self.seq.Prev(data_cond, 1) all_cond = make_condition(data_cond, ext_cond) data = self.m.TmpWireLike(self.interfaces[port].rdata) prev_data = self.seq.Prev(data, 1) data.assign( vtypes.Mux(prev_data_cond, self.interfaces[port].rdata, prev_data)) counter = self.m.TmpReg(length.bit_length() + 1, initval=0) next_valid_on = self.m.TmpReg(initval=0) next_valid_off = self.m.TmpReg(initval=0) next_last = self.m.TmpReg(initval=0) last = self.m.TmpReg(initval=0) self.seq.If(make_condition(data_cond, next_valid_off))(last(0), data_valid(0), last_valid(0), next_valid_off(0)) self.seq.If(make_condition(data_cond, next_valid_on))(data_valid(1), last_valid(1), last(next_last), next_last(0), next_valid_on(0), next_valid_off(1)) self.seq.If( make_condition(ext_cond, counter == 0, vtypes.Not(next_last), vtypes.Not(last)))( self.interfaces[port].addr(addr), counter(length - 1), next_valid_on(1), ) self.seq.If(make_condition(data_cond, counter > 0))( self.interfaces[port].addr.inc(), counter.dec(), next_valid_on(1), next_last(0)) self.seq.If(make_condition(data_cond, counter == 1))(next_last(1)) df = self.df if self.df is not None else dataflow df_data = df.Variable(data, data_valid, data_ready) df_last = df.Variable(last, last_valid, last_ready, width=1) done = last return df_data, df_last, done
def done(self, fsm): return vtypes.Not(self.busy)
def write_dataflow_pattern(self, port, addr, data, pattern, cond=None, when=None): """ @return done 'data' and 'when' must be dataflow variables """ if self._write_disabled[port]: raise TypeError('Write disabled.') if not isinstance(pattern, (tuple, list)): raise TypeError('pattern must be list or tuple.') if not pattern: raise ValueError( 'pattern must have one (size, stride) pair at least.') if not isinstance(pattern[0], (tuple, list)): pattern = (pattern, ) last = self.m.TmpReg(initval=0) running = self.m.TmpReg(initval=0) ext_cond = make_condition(cond) data_cond = make_condition(running, vtypes.Not(last)) if when is None or not isinstance(when, df_numeric): raw_data, raw_valid = data.read(cond=data_cond) else: data_list, raw_valid = read_multi(self.m, data, when, cond=data_cond) raw_data = data_list[0] when = data_list[1] when_cond = make_condition(when, ready=data_cond) if when_cond is not None: raw_valid = vtypes.Ands(when_cond, raw_valid) offset_addr = self.m.TmpWire(self.addrwidth) offsets = [self.m.TmpReg(self.addrwidth, initval=0) for _ in pattern] offset_addr_value = addr for offset in offsets: offset_addr_value = offset + offset_addr_value offset_addr.assign(offset_addr_value) count_list = [ self.m.TmpReg(out_size.bit_length() + 1, initval=0) for (out_size, out_stride) in pattern ] self.seq.If(ext_cond, vtypes.Not(running))(running(1)) self.seq.If(raw_valid, running)(self.interfaces[port].addr(offset_addr), self.interfaces[port].wdata(raw_data), self.interfaces[port].wenable(1)) update_count = None last_one = None for offset, count, (out_size, out_stride) in zip(offsets, count_list, pattern): self.seq.If(ext_cond, vtypes.Not(running))(count(out_size - 1), offset(0)) self.seq.If(raw_valid, running, update_count)(count.dec(), offset(offset + out_stride)) self.seq.If(raw_valid, running, update_count, count == 0)(count(out_size - 1), offset(0)) if update_count is None: update_count = count == 0 else: update_count = vtypes.Ands(update_count, count == 0) if last_one is None: last_one = count == 0 else: last_one = vtypes.Ands(last_one, count == 0) self.seq.If(raw_valid, last_one)(running(0), last(1)) # de-assert self.seq.Delay(1)(self.interfaces[port].wenable(0), last(0)) done = last return done
def read_dataflow_pattern(self, port, addr, pattern, cond=None, point=0, signed=False): """ @return data, last, done """ if not isinstance(pattern, (tuple, list)): raise TypeError('pattern must be list or tuple.') if not pattern: raise ValueError( 'pattern must have one (size, stride) pair at least.') if not isinstance(pattern[0], (tuple, list)): pattern = (pattern, ) data_valid = self.m.TmpReg(initval=0) last_valid = self.m.TmpReg(initval=0) data_ready = self.m.TmpWire() last_ready = self.m.TmpWire() data_ready.assign(1) last_ready.assign(1) data_ack = vtypes.Ors(data_ready, vtypes.Not(data_valid)) last_ack = vtypes.Ors(last_ready, vtypes.Not(last_valid)) ext_cond = make_condition(cond) data_cond = make_condition(data_ack, last_ack) prev_data_cond = self.seq.Prev(data_cond, 1) data = self.m.TmpWireLike(self.interfaces[port].rdata) prev_data = self.seq.Prev(data, 1) data.assign( vtypes.Mux(prev_data_cond, self.interfaces[port].rdata, prev_data)) next_valid_on = self.m.TmpReg(initval=0) next_valid_off = self.m.TmpReg(initval=0) next_last = self.m.TmpReg(initval=0) last = self.m.TmpReg(initval=0) running = self.m.TmpReg(initval=0) next_addr = self.m.TmpWire(self.addrwidth) offset_addr = self.m.TmpWire(self.addrwidth) offsets = [ self.m.TmpReg(self.addrwidth, initval=0) for _ in pattern[1:] ] offset_addr_value = addr for offset in offsets: offset_addr_value = offset + offset_addr_value offset_addr.assign(offset_addr_value) offsets.insert(0, None) count_list = [ self.m.TmpReg(out_size.bit_length() + 1, initval=0) for (out_size, out_stride) in pattern ] self.seq.If(data_cond, next_valid_off)(last(0), data_valid(0), last_valid(0), next_valid_off(0)) self.seq.If(data_cond, next_valid_on)(data_valid(1), last_valid(1), last(next_last), next_last(0), next_valid_on(0), next_valid_off(1)) self.seq.If(ext_cond, vtypes.Not(running), vtypes.Not(next_last), vtypes.Not(last))(self.interfaces[port].addr(addr), running(1), next_valid_on(1)) self.seq.If(data_cond, running)(self.interfaces[port].addr(next_addr), next_valid_on(1), next_last(0)) update_count = None update_offset = None update_addr = None last_one = None stride_value = None carry = None for offset, count, (out_size, out_stride) in zip(offsets, count_list, pattern): self.seq.If(ext_cond, vtypes.Not(running), vtypes.Not(next_last), vtypes.Not(last))(count(out_size - 1)) self.seq.If(data_cond, running, update_count)(count.dec()) self.seq.If(data_cond, running, update_count, count == 0)(count(out_size - 1)) if offset is not None: self.seq.If(ext_cond, vtypes.Not(running), vtypes.Not(next_last), vtypes.Not(last))(offset(0)) self.seq.If(data_cond, running, update_offset, vtypes.Not(carry))(offset(offset + out_stride)) self.seq.If(data_cond, running, update_offset, count == 0)(offset(0)) if update_count is None: update_count = count == 0 else: update_count = vtypes.Ands(update_count, count == 0) if update_offset is None: update_offset = vtypes.Mux(out_size == 1, 1, count == 1) else: update_offset = vtypes.Ands(update_offset, count == carry) if update_addr is None: update_addr = count == 0 else: update_addr = vtypes.Mux(carry, count == 0, update_addr) if last_one is None: last_one = count == 0 else: last_one = vtypes.Ands(last_one, count == 0) if stride_value is None: stride_value = out_stride else: stride_value = vtypes.Mux(carry, out_stride, stride_value) if carry is None: carry = out_size == 1 else: carry = vtypes.Ands(carry, out_size == 1) next_addr.assign( vtypes.Mux(update_addr, offset_addr, self.interfaces[port].addr + stride_value)) self.seq.If(data_cond, running, last_one)(running(0), next_last(1)) df = self.df if self.df is not None else dataflow df_data = df.Variable(data, data_valid, data_ready, width=self.datawidth, point=point, signed=signed) df_last = df.Variable(last, last_valid, last_ready, width=1) done = last return df_data, df_last, done
def read_dataflow_reuse(self, port, addr, length=1, stride=1, reuse_size=1, num_outputs=1, cond=None, point=0, signed=False): """ @return data, last, done """ if not isinstance(num_outputs, int): raise TypeError('num_outputs must be int') data_valid = [self.m.TmpReg(initval=0) for _ in range(num_outputs)] last_valid = self.m.TmpReg(initval=0) data_ready = [self.m.TmpWire() for _ in range(num_outputs)] last_ready = self.m.TmpWire() for r in data_ready: r.assign(1) last_ready.assign(1) data_ack = vtypes.Ands(*[ vtypes.Ors(r, vtypes.Not(v)) for v, r in zip(data_valid, data_ready) ]) last_ack = vtypes.Ors(last_ready, vtypes.Not(last_valid)) ext_cond = make_condition(cond) data_cond = make_condition(data_ack, last_ack) counter = self.m.TmpReg(length.bit_length() + 1, initval=0) last = self.m.TmpReg(initval=0) reuse_data = [ self.m.TmpReg(self.datawidth, initval=0) for _ in range(num_outputs) ] next_reuse_data = [ self.m.TmpReg(self.datawidth, initval=0) for _ in range(num_outputs) ] reuse_count = self.m.TmpReg(reuse_size.bit_length() + 1, initval=0) fill_reuse_count = self.m.TmpReg(initval=0) fetch_done = self.m.TmpReg(initval=0) fsm = TmpFSM(self.m, self.clk, self.rst) # initial state fsm.If(ext_cond)(self.interfaces[port].addr(addr - stride), fetch_done(0), counter(length)) fsm.If(ext_cond, length > 0).goto_next() # initial prefetch state for n in next_reuse_data: fsm( self.interfaces[port].addr(self.interfaces[port].addr + stride), counter(vtypes.Mux(counter > 0, counter - 1, counter))) fsm.Delay(2)(n(self.interfaces[port].rdata)) fsm.goto_next() fsm.goto_next() fsm.goto_next() # initial update state for n, r in zip(next_reuse_data, reuse_data): fsm(r(n)) fsm(fill_reuse_count(1), fetch_done(counter == 0)) fsm.Delay(1)(fill_reuse_count(0)) fsm.goto_next() # prefetch state read_start_state = fsm.current for n in next_reuse_data: fsm( self.interfaces[port].addr(self.interfaces[port].addr + stride), counter(vtypes.Mux(counter > 0, counter - 1, counter))) fsm.Delay(2)(n(self.interfaces[port].rdata)) fsm.goto_next() fsm.goto_next() fsm.goto_next() # update state for n, r in zip(next_reuse_data, reuse_data): fsm.If(data_cond, reuse_count == 0)(r(n)) fsm.If(data_cond, reuse_count == 0)(fill_reuse_count(vtypes.Not(fetch_done)), fetch_done(counter == 0)) fsm.Delay(1)(fill_reuse_count(0)) # next -> prefetch state or initial state fsm.If(data_cond, reuse_count == 0, counter == 0).goto_init() fsm.If(data_cond, reuse_count == 0, counter > 0).goto(read_start_state) # output signal control self.seq.If(data_cond, last_valid)(last(0), [d(0) for d in data_valid], last_valid(0)) self.seq.If(fill_reuse_count)(reuse_count(reuse_size)) self.seq.If(data_cond, reuse_count > 0)(reuse_count.dec(), [d(1) for d in data_valid], last_valid(1), last(0)) self.seq.If(data_cond, reuse_count == 1, fetch_done)(last(1)) df = self.df if self.df is not None else dataflow df_last = df.Variable(last, last_valid, last_ready, width=1) done = last df_reuse_data = [ df.Variable(d, v, r, width=self.datawidth, point=point, signed=signed) for d, v, r in zip(reuse_data, data_valid, data_ready) ] return tuple(df_reuse_data + [df_last, done])
def read_dataflow_reuse_pattern(self, port, addr, pattern, reuse_size=1, num_outputs=1, cond=None, point=0, signed=False): """ @return data, last, done """ if not isinstance(pattern, (tuple, list)): raise TypeError('pattern must be list or tuple.') if not pattern: raise ValueError( 'pattern must have one (size, stride) pair at least.') if not isinstance(pattern[0], (tuple, list)): pattern = (pattern, ) if not isinstance(num_outputs, int): raise TypeError('num_outputs must be int') data_valid = [self.m.TmpReg(initval=0) for _ in range(num_outputs)] last_valid = self.m.TmpReg(initval=0) data_ready = [self.m.TmpWire() for _ in range(num_outputs)] last_ready = self.m.TmpWire() for r in data_ready: r.assign(1) last_ready.assign(1) data_ack = vtypes.Ands(*[ vtypes.Ors(r, vtypes.Not(v)) for v, r in zip(data_valid, data_ready) ]) last_ack = vtypes.Ors(last_ready, vtypes.Not(last_valid)) ext_cond = make_condition(cond) data_cond = make_condition(data_ack, last_ack) next_addr = self.m.TmpWire(self.addrwidth) offset_addr = self.m.TmpWire(self.addrwidth) offsets = [ self.m.TmpReg(self.addrwidth, initval=0) for _ in pattern[1:] ] offset_addr_value = addr for offset in offsets: offset_addr_value = offset + offset_addr_value offset_addr.assign(offset_addr_value) offsets.insert(0, None) count_list = [ self.m.TmpReg(out_size.bit_length() + 1, initval=0) for (out_size, out_stride) in pattern ] last = self.m.TmpReg(initval=0) reuse_data = [ self.m.TmpReg(self.datawidth, initval=0) for _ in range(num_outputs) ] next_reuse_data = [ self.m.TmpReg(self.datawidth, initval=0) for _ in range(num_outputs) ] reuse_count = self.m.TmpReg(reuse_size.bit_length() + 1, initval=0) fill_reuse_count = self.m.TmpReg(initval=0) prefetch_done = self.m.TmpReg(initval=0) fetch_done = self.m.TmpReg(initval=0) update_addr = None stride_value = None carry = None for offset, count, (out_size, out_stride) in zip(offsets, count_list, pattern): if update_addr is None: update_addr = count == 0 else: update_addr = vtypes.Mux(carry, count == 0, update_addr) if stride_value is None: stride_value = out_stride else: stride_value = vtypes.Mux(carry, out_stride, stride_value) if carry is None: carry = out_size == 1 else: carry = vtypes.Ands(carry, out_size == 1) next_addr.assign( vtypes.Mux(update_addr, offset_addr, self.interfaces[port].addr + stride_value)) fsm = TmpFSM(self.m, self.clk, self.rst) # initial state fsm.If(ext_cond)(self.interfaces[port].addr(addr - stride_value), prefetch_done(0), fetch_done(0)) first = True for offset, count, (out_size, out_stride) in zip(offsets, count_list, pattern): fsm.If(ext_cond)(count(out_size) if first else count(out_size - 1), ) if offset is not None: fsm.If(ext_cond)(offset(0)) first = False fsm.If(ext_cond).goto_next() # initial prefetch state for n in next_reuse_data: update_count = None update_offset = None last_one = None carry = None for offset, count, (out_size, out_stride) in zip(offsets, count_list, pattern): fsm.If(update_count)(count.dec()) fsm.If(update_count, count == 0)(count(out_size - 1)) fsm(self.interfaces[port].addr(next_addr)) fsm.Delay(2)(n(self.interfaces[port].rdata)) if offset is not None: fsm.If(update_offset, vtypes.Not(carry))(offset(offset + out_stride)) fsm.If(update_offset, count == 0)(offset(0)) if update_count is None: update_count = count == 0 else: update_count = vtypes.Ands(update_count, count == 0) if update_offset is None: update_offset = vtypes.Mux(out_size == 1, 1, count == 1) else: update_offset = vtypes.Ands(update_offset, count == carry) if last_one is None: last_one = count == 0 else: last_one = vtypes.Ands(last_one, count == 0) if carry is None: carry = out_size == 1 else: carry = vtypes.Ands(carry, out_size == 1) fsm.goto_next() fsm.If(last_one)(prefetch_done(1)) fsm.goto_next() fsm.goto_next() # initial update state for r, n in zip(reuse_data, next_reuse_data): fsm(r(n)) fsm(fetch_done(prefetch_done), fill_reuse_count(vtypes.Not(fetch_done))) fsm.Delay(1)(fill_reuse_count(0)) fsm.goto_next() # prefetch state read_start_state = fsm.current for n in next_reuse_data: update_count = None update_offset = None last_one = None carry = None for offset, count, (out_size, out_stride) in zip(offsets, count_list, pattern): fsm.If(update_count)(count.dec()) fsm.If(update_count, count == 0)(count(out_size - 1)) fsm(self.interfaces[port].addr(next_addr)) fsm.Delay(2)(n(self.interfaces[port].rdata)) if offset is not None: fsm.If(update_offset, vtypes.Not(carry))(offset(offset + out_stride)) fsm.If(update_offset, count == 0)(offset(0)) if update_count is None: update_count = count == 0 else: update_count = vtypes.Ands(update_count, count == 0) if update_offset is None: update_offset = vtypes.Mux(out_size == 1, 1, count == 1) else: update_offset = vtypes.Ands(update_offset, count == carry) if last_one is None: last_one = count == 0 else: last_one = vtypes.Ands(last_one, count == 0) if carry is None: carry = out_size == 1 else: carry = vtypes.Ands(carry, out_size == 1) fsm.goto_next() fsm.If(last_one)(prefetch_done(1)) fsm.goto_next() fsm.goto_next() # update state for r, n in zip(reuse_data, next_reuse_data): fsm.If(data_cond, reuse_count == 0)(r(n)) fsm.If(data_cond, reuse_count == 0)(fetch_done(prefetch_done), fill_reuse_count(vtypes.Not(fetch_done))) fsm.Delay(1)(fill_reuse_count(0)) # next -> prefetch state or initial state fsm.If(data_cond, reuse_count == 0, fetch_done).goto_init() fsm.If(data_cond, reuse_count == 0, vtypes.Not(fetch_done)).goto(read_start_state) # output signal control self.seq.If(data_cond, last_valid)(last(0), [d(0) for d in data_valid], last_valid(0)) self.seq.If(fill_reuse_count)(reuse_count(reuse_size)) self.seq.If(data_cond, reuse_count > 0)(reuse_count.dec(), [d(1) for d in data_valid], last_valid(1), last(0)) self.seq.If(data_cond, reuse_count == 1, fetch_done)(last(1)) df = self.df if self.df is not None else dataflow df_last = df.Variable(last, last_valid, last_ready, width=1) done = last df_reuse_data = [ df.Variable(d, v, r, width=self.datawidth, point=point, signed=signed) for d, v, r in zip(reuse_data, data_valid, data_ready) ] return tuple(df_reuse_data + [df_last, done])
def _make_tmp(self, data, valid, ready, width=None, initval=0, acc_ops=()): tmp_data = self._add_reg( 'data', self.tmp_count, width=width, initval=initval) if valid is not None: tmp_valid = self._add_reg('valid', self.tmp_count, initval=0) else: tmp_valid = None if ready: tmp_ready = self._add_wire('ready', self.tmp_count) else: tmp_ready = None self.tmp_count += 1 # all ready all_ready = None for r in ready: if r is None: continue if all_ready is None: all_ready = r else: all_ready = vtypes.AndList(all_ready, r) # data data_cond_vars = [] if valid is not None: data_cond_vars.append(valid) if tmp_ready is not None: data_cond_vars.append(all_ready) if tmp_valid is not None: data_cond_vars.append(vtypes.OrList( tmp_ready, vtypes.Not(tmp_valid))) else: data_cond_vars.append(tmp_ready) if len(data_cond_vars) == 0: data_cond = None elif len(data_cond_vars) == 1: data_cond = data_cond_vars[0] else: data_cond = vtypes.AndList(*data_cond_vars) # Accumulator for op in acc_ops: if not isinstance(op, type): data = op(tmp_data, data) elif issubclass(op, vtypes._BinaryOperator): data = op(tmp_data, data) elif issubclass(op, vtypes._UnaryOperator): data = op(data) if not isinstance(data, vtypes._Numeric): raise TypeError("Operator '%s' returns unsupported object type '%s'." % (str(op), str(type(data)))) self.seq.add(tmp_data(data), cond=data_cond) # valid valid_cond_vars = [] if tmp_ready is not None: valid_cond_vars.append(all_ready) ordy = vtypes.OrList(tmp_ready, vtypes.Not(tmp_valid)) valid_cond_vars.append(ordy) if len(valid_cond_vars) == 0: valid_cond = None elif len(valid_cond_vars) == 1: valid_cond = valid_cond_vars[0] else: valid_cond = vtypes.AndList(*valid_cond_vars) if tmp_valid is not None: if tmp_ready is not None: self.seq.add(tmp_valid(0), cond=vtypes.AndList( tmp_valid, tmp_ready)) self.seq.add(tmp_valid(valid), cond=valid_cond) # ready if tmp_ready is not None: ordy = vtypes.AndList(vtypes.OrList( tmp_ready, vtypes.Not(tmp_valid)), valid) for r in ready: _connect_ready(self.m, r, ordy) return tmp_data, tmp_valid, tmp_ready
def add_dump(self, m, seq, input_vars, output_vars, all_vars): pipeline_depth = self.pipeline_depth() log_pipeline_depth = max( int(math.ceil(math.log(max(pipeline_depth, 10), 10))), 1) seq( self.dump_step(1) ) for i in range(pipeline_depth + 1): seq.If(seq.Prev(self.dump_enable, i))( self.dump_step.inc() ) def get_name(obj): if hasattr(obj, 'name'): return obj.name if isinstance(obj, vtypes._Constant): return obj.__class__.__name__ raise TypeError() longest_name_len = 0 for input_var in sorted(input_vars, key=lambda x: x.object_id): if not (self.dump_mode == 'all' or self.dump_mode == 'stream' or self.dump_mode == 'input' or self.dump_mode == 'inout' or (self.dump_mode == 'selective' and hasattr(input_var, 'dump') and input_var.dump)): continue name = get_name(input_var.sig_data) length = len(name) + 6 longest_name_len = max(longest_name_len, length) for var in sorted(all_vars, key=lambda x: (-1, x.object_id) if x.end_stage is None else (x.end_stage, x.object_id)): if not (self.dump_mode == 'all' or self.dump_mode == 'stream' or (self.dump_mode == 'selective' and hasattr(var, 'dump') and var.dump)): continue name = get_name(var.sig_data) length = len(name) + 6 longest_name_len = max(longest_name_len, length) for output_var in sorted(output_vars, key=lambda x: x.object_id): if not (self.dump_mode == 'all' or self.dump_mode == 'stream' or self.dump_mode == 'output' or self.dump_mode == 'inout' or (self.dump_mode == 'selective' and hasattr(output_var, 'dump') and output_var.dump)): continue name = get_name(output_var.output_sig_data) length = len(name) + 6 longest_name_len = max(longest_name_len, length) longest_var_len = 0 for var in sorted(all_vars, key=lambda x: (-1, x.object_id) if x.start_stage is None else (x.start_stage, x.object_id)): bitwidth = vtypes.get_width(var.sig_data) if bitwidth is None: bitwidth = 1 if bitwidth <= 0: bitwidth = 1 base = (var.dump_base if hasattr(var, 'dump_base') else self.dump_base) total_length = int(math.ceil(bitwidth / math.log(base, 2))) #point_length = int(math.ceil(var.point / math.log(base, 2))) #point_length = max(point_length, 8) #longest_var_len = max(longest_var_len, total_length, point_length) longest_var_len = max(longest_var_len, total_length) for input_var in sorted(input_vars, key=lambda x: x.object_id): base = (input_var.dump_base if hasattr(input_var, 'dump_base') else self.dump_base) base_char = ('b' if base == 2 else 'o' if base == 8 else 'd' if base == 10 and input_var.point <= 0 else # 'f' if base == 10 and input_var.point > 0 else 'g' if base == 10 and input_var.point > 0 else 'x') prefix = ('0b' if base == 2 else '0o' if base == 8 else ' ' if base == 10 else '0x') # if base_char == 'f': # point_length = int(math.ceil(input_var.point / math.log(base, 2))) # point_length = max(point_length, 8) # fmt_list = [prefix, '%', # '%d.%d' % (longest_var_len + 1, point_length), base_char] # if base_char == 'g': # fmt_list = [prefix, '%', base_char] # else: # fmt_list = [prefix, '%', '%d' % (longest_var_len + 1), base_char] fmt_list = [prefix, '%', '%d' % (longest_var_len + 1), base_char] if input_var not in all_vars: fmt_list.append(' (unused)') input_var.dump_fmt = ''.join(fmt_list) for output_var in sorted(output_vars, key=lambda x: x.object_id): base = (output_var.dump_base if hasattr(output_var, 'dump_base') else self.dump_base) base_char = ('b' if base == 2 else 'o' if base == 8 else 'd' if base == 10 and output_var.point <= 0 else # 'f' if base == 10 and output_var.point > 0 else 'g' if base == 10 and output_var.point > 0 else 'x') prefix = ('0b' if base == 2 else '0o' if base == 8 else ' ' if base == 10 else '0x') # if base_char == 'f': # point_length = int(math.ceil(output_var.point / math.log(base, 2))) # point_length = max(point_length, 8) # fmt_list = [prefix, '%', # '%d.%d' % (longest_var_len + 1, point_length), base_char] # if base_char == 'g': # fmt_list = [prefix, '%', base_char] # else: # fmt_list = [prefix, '%', '%d' % (longest_var_len + 1), base_char] fmt_list = [prefix, '%', '%d' % (longest_var_len + 1), base_char] if output_var not in all_vars: fmt_list.append(' (unused)') output_var.dump_fmt = ''.join(fmt_list) for var in sorted(all_vars, key=lambda x: (-1, x.object_id) if x.start_stage is None else (x.start_stage, x.object_id)): base = (var.dump_base if hasattr(var, 'dump_base') else self.dump_base) base_char = ('b' if base == 2 else 'o' if base == 8 else 'd' if base == 10 and var.point <= 0 else # 'f' if base == 10 and var.point > 0 else 'g' if base == 10 and var.point > 0 else 'x') prefix = ('0b' if base == 2 else '0o' if base == 8 else ' ' if base == 10 else '0x') # if base_char == 'f': # point_length = int(math.ceil(var.point / math.log(base, 2))) # point_length = max(point_length, 8) # fmt_list = [prefix, '%', # '%d.%d' % (longest_var_len + 1, point_length), base_char] # if base_char == 'g': # fmt_list = [prefix, '%', base_char] # else: # fmt_list = [prefix, '%', '%d' % (longest_var_len + 1), base_char] fmt_list = [prefix, '%', '%d' % (longest_var_len + 1), base_char] var.dump_fmt = ''.join(fmt_list) enables = [] for input_var in sorted(input_vars, key=lambda x: x.object_id): if not (self.dump_mode == 'all' or self.dump_mode == 'stream' or self.dump_mode == 'input' or self.dump_mode == 'inout' or (self.dump_mode == 'selective' and hasattr(input_var, 'dump') and input_var.dump)): continue vfmt = input_var.dump_fmt name = get_name(input_var.sig_data) name_alignment = ' ' * (longest_name_len - len(name) - len('(in) ')) fmt = ''.join(['<', self.name, ' step:%d, ', 'stage:%', str( log_pipeline_depth), 'd, age:%d> (in) ', name_alignment, name, ' = ', vfmt]) stage = input_var.end_stage if input_var.end_stage is not None else 0 enable = seq.Prev(self.dump_enable, stage) enables.append(enable) age = seq.Prev(self.dump_step, stage) - 1 if input_var.point > 0: sig_data = vtypes.Div(vtypes.SystemTask('itor', input_var.sig_data), 1.0 * (2 ** input_var.point)) elif input_var.point < 0: sig_data = vtypes.Times(input_var.sig_data, 2 ** -input_var.point) else: sig_data = input_var.sig_data seq.If(enable, vtypes.Not(self.dump_mask))( vtypes.Display(fmt, self.dump_step, stage, age, sig_data) ) for var in sorted(all_vars, key=lambda x: (-1, x.object_id) if x.end_stage is None else (x.end_stage, x.object_id)): if not (self.dump_mode == 'all' or self.dump_mode == 'stream' or (self.dump_mode == 'selective' and hasattr(var, 'dump') and var.dump)): continue vfmt = var.dump_fmt name = get_name(var.sig_data) name_alignment = ' ' * (longest_name_len - len(name)) stage = var.end_stage if var.end_stage is not None else 0 fmt = ''.join(['<', self.name, ' step:%d, ', 'stage:%', str(log_pipeline_depth), 'd, age:%d> ', name_alignment, name, ' = ', vfmt]) enable = seq.Prev(self.dump_enable, stage) enables.append(enable) age = seq.Prev(self.dump_step, stage) - 1 if var.point > 0: sig_data = vtypes.Div(vtypes.SystemTask('itor', var.sig_data), 1.0 * (2 ** var.point)) elif var.point < 0: sig_data = vtypes.Times(var.sig_data, 2 ** -var.point) else: sig_data = var.sig_data seq.If(enable, vtypes.Not(self.dump_mask))( vtypes.Display(fmt, self.dump_step, stage, age, sig_data) ) for output_var in sorted(output_vars, key=lambda x: x.object_id): if not (self.dump_mode == 'all' or self.dump_mode == 'stream' or self.dump_mode == 'output' or self.dump_mode == 'inout' or (self.dump_mode == 'selective' and hasattr(output_var, 'dump') and output_var.dump)): continue vfmt = output_var.dump_fmt name = get_name(output_var.output_sig_data) name_alignment = ' ' * (longest_name_len - len(name) - len('(out) ')) fmt = ''.join(['<', self.name, ' step:%d, ', 'stage:%', str( log_pipeline_depth), 'd, age:%d> (out) ', name_alignment, name, ' = ', vfmt]) stage = output_var.end_stage if output_var.end_stage is not None else 0 enable = seq.Prev(self.dump_enable, stage) enables.append(enable) age = seq.Prev(self.dump_step, stage) - 1 if output_var.point > 0: sig_data = vtypes.Div(vtypes.SystemTask('itor', output_var.output_sig_data), 1.0 * (2 ** output_var.point)) elif output_var.point < 0: sig_data = vtypes.Times(output_var.output_sig_data, 2 ** -output_var.point) else: sig_data = output_var.output_sig_data seq.If(enable, vtypes.Not(self.dump_mask))( vtypes.Display(fmt, self.dump_step, stage, age, sig_data) )