def __init__(s, opaque_nbits, data_nbits): s.type_ = BitField(MemMsgType.bits) s.opaque = BitField(opaque_nbits) s.test = BitField(2) s.stat = BitField(MemMsgStatus.bits) s.len = BitField(clog2(data_nbits / 8)) s.data = BitField(data_nbits)
def __init__(s, entry_type, num_entries, KillOpaqueType, KillArgType): s.EntryType = entry_type s.EntryIdx = clog2(num_entries) s.NumEntries = num_entries s.KillOpaqueType = KillOpaqueType s.KillArgType = KillArgType super(ReorderBufferInterface, s).__init__([ MethodSpec( 'add', args={ 'idx': s.EntryIdx, 'value': s.EntryType, 'kill_opaque': s.KillOpaqueType, }, rets=None, call=True, rdy=False, ), MethodSpec( 'check_done', args={ 'idx': s.EntryIdx, }, rets={ 'is_rdy': Bits(1), }, call=False, rdy=False, ), MethodSpec( 'peek', args={ 'idx': s.EntryIdx, }, rets={ 'value': s.EntryType, }, call=False, rdy=False, ), MethodSpec( 'free', args=None, rets=None, call=True, rdy=False, ), MethodSpec('kill_notify', args={ 'msg': s.KillArgType, }, rets=None, call=False, rdy=False), ])
def __init__(s, opaque_nbits, addr_nbits, data_nbytes): s.opaque_nbits = opaque_nbits s.addr_nbits = addr_nbits s.data_nbytes = data_nbytes s.len_nbits = clog2(data_nbytes) s.type_ = BitField(MemMsgType.bits) s.opaque = BitField(opaque_nbits) s.addr = BitField(addr_nbits) s.len_ = BitField(s.len_nbits) s.data = BitField(data_nbytes * 8)
def __init__(s, opaque_nbits, test_nbits, data_nbytes): s.opaque_nbits = opaque_nbits s.test_nbits = test_nbits s.data_nbytes = data_nbytes s.type_ = BitField(MemMsgType.bits) s.opaque = BitField(opaque_nbits) s.test = BitField(test_nbits) s.stat = BitField(MemMsgStatus.bits) s.len_ = BitField(clog2(data_nbytes)) s.data = BitField(data_nbytes * 8)
def __init__(s, nbits, max_ops): s.Data = Bits(nbits) # +1 because we can perform [0, max_ops] ops s.Ops = Bits(clog2(max_ops + 1)) super(WrapIncVarInterface, s).__init__([ MethodSpec( 'inc', args={ 'in': s.Data, 'ops': s.Ops, }, rets={ 'out': s.Data, }, call=False, rdy=False, ), ])
def __init__(s, opaque_nbits, addr_nbits, data_nbits): s.type_ = BitField(MemMsgType.bits) s.opaque = BitField(opaque_nbits) s.addr = BitField(addr_nbits) s.len = BitField(clog2(data_nbits / 8)) s.data = BitField(data_nbits)
from pymtl import * from lizard.bitutil import clog2 XLEN = 64 XLEN_BYTES = XLEN // 8 ILEN = 32 ILEN_BYTES = ILEN // 8 CSR_SPEC_NBITS = 12 NUM_ISSUE_SLOTS = 16 NUM_MEM_ISSUE_SLOTS = 8 ROB_SIZE = 32 ROB_IDX_NBITS = clog2(ROB_SIZE) DECODED_IMM_LEN = 21 RESET_VECTOR = Bits(XLEN, 0x200) AREG_COUNT = 32 AREG_IDX_NBITS = clog2(AREG_COUNT) PREG_COUNT = 64 PREG_IDX_NBITS = clog2(PREG_COUNT) INST_IDX_NBITS = ROB_IDX_NBITS MAX_SPEC_DEPTH = 2 assert MAX_SPEC_DEPTH > 0 SPEC_IDX_NBITS = clog2(MAX_SPEC_DEPTH) SPEC_MASK_NBITS = MAX_SPEC_DEPTH
def __init__(s, interface, ncycles): UseInterface(s, interface) assert s.interface.DataLen % ncycles == 0 nsteps = s.interface.DataLen // ncycles END = s.interface.DataLen - 1 AEND = s.interface.DataLen iface = NonRestoringDividerStepInterface(s.interface.DataLen) s.unit = NonRestoringDividerStep(iface, nsteps) s.acc = Register( RegisterInterface(s.interface.DataLen + 1, enable=True)) s.divisor = Register( RegisterInterface(s.interface.DataLen, enable=True)) s.dividend = Register( RegisterInterface(s.interface.DataLen, enable=True)) # Set if we need to take twos compliment at end s.negate = Register(RegisterInterface(1, enable=True)) s.negate_rem = Register(RegisterInterface(1, enable=True)) s.connect(s.negate.write_call, s.div_call) s.connect(s.negate_rem.write_call, s.div_call) s.connect(s.divisor.write_call, s.div_call) # Connect up the unit s.connect(s.unit.div_acc, s.acc.read_data) s.connect(s.unit.div_divisor, s.divisor.read_data) s.connect(s.unit.div_dividend, s.dividend.read_data) s.counter = Register(RegisterInterface(clog2(ncycles + 1), enable=True)) s.busy = Register(RegisterInterface(1, enable=True), reset_value=0) @s.combinational def handle_calls(): # Arguments s.div_rdy.v = not s.busy.read_data or s.result_call # Results s.result_rdy.v = s.busy.read_data and s.counter.read_data == 0 s.result_quotient.v = s.dividend.read_data s.result_rem.v = s.acc.read_data[:s.interface.DataLen] # Figure out if we need to negative s.negate.write_data.v = s.div_signed and (s.div_divisor[END] ^ s.div_dividend[END]) s.negate_rem.write_data.v = s.div_signed and s.div_dividend[END] @s.combinational def handle_counter(): s.counter.write_call.v = s.counter.read_data != 0 or s.div_call s.counter.write_data.v = 0 if s.div_call: s.counter.write_data.v = ncycles else: s.counter.write_data.v = s.counter.read_data - 1 @s.combinational def set_div_regs(): s.acc.write_call.v = s.div_call or s.counter.read_data > 0 s.dividend.write_call.v = s.div_call or s.counter.read_data > 0 # Load the values s.acc.write_data.v = 0 s.divisor.write_data.v = 0 s.divisor.write_data.v = ~s.div_divisor + 1 if ( s.div_signed and s.div_divisor[END]) else s.div_divisor s.dividend.write_data.v = ~s.div_dividend + 1 if ( s.div_signed and s.div_dividend[END]) else s.div_dividend if not s.div_call: s.dividend.write_data.v = s.unit.div_dividend_next s.acc.write_data.v = s.unit.div_acc_next # Special case last cycle if s.counter.read_data == 1: if s.unit.div_acc_next[AEND]: s.acc.write_data.v += s.divisor.read_data if s.negate_rem.read_data: # Last cycle, compliment s.acc.write_data.v = ~s.acc.write_data + 1 # Only if not divided by zero if s.negate.read_data and s.divisor.read_data != 0: s.dividend.write_data.v = ~s.dividend.write_data + 1 @s.combinational def handle_busy(): s.busy.write_call.v = s.div_call or s.result_call or s.preempt_call s.busy.write_data.v = s.div_call
def __init__(s, alu_interface): UseInterface(s, alu_interface) xlen = s.interface.Xlen CLOG2_XLEN = clog2(xlen) # PYMTL BROKEN: TWO_XLEN = 2 * xlen XLEN_M1 = xlen - 1 # Input s.s0_ = Wire(xlen) s.s1_ = Wire(xlen) s.func_ = Wire(ALUFunc.bits) s.usign_ = Wire(1) # Output s.res_ = Wire(xlen) # Internals s.shamt_ = Wire(CLOG2_XLEN) # PYMTL_BROKEN: These are all work arrounds due to slicing s.cmp_u_ = Wire(1) s.sra_ = Wire(TWO_XLEN) # Since single cycle, always ready s.connect(s.exec_rdy, 1) # PYMTL_BROKEN: s.connect(s.exec_res, s.res_) translates as a continous # assign to a reg named s.res_ @s.combinational def assign_res(): s.exec_res.v = s.res_ s.connect(s.s0_, s.exec_src0) s.connect(s.s1_, s.exec_src1) s.connect(s.func_, s.exec_func) s.connect(s.usign_, s.exec_unsigned) # All workarorunds due to slicing in concat() issues: s.s0_lower_ = Wire(XLEN_M1) s.s0_up_ = Wire(1) s.s1_lower_ = Wire(XLEN_M1) s.s1_up_ = Wire(1) @s.combinational def set_cmp(): # We flip the upper most bit if signed s.s0_up_.v = s.s0_[XLEN_M1] if s.usign_ else not s.s0_[XLEN_M1] s.s1_up_.v = s.s1_[XLEN_M1] if s.usign_ else not s.s1_[XLEN_M1] s.s0_lower_.v = s.s0_[0:XLEN_M1] s.s1_lower_.v = s.s1_[0:XLEN_M1] # Now we can concat and compare s.cmp_u_.v = concat(s.s0_up_, s.s0_lower_) < concat( s.s1_up_, s.s1_lower_) @s.combinational def set_shamt(): s.shamt_.v = s.s1_[0:CLOG2_XLEN] s.sra_.v = sext(s.s0_, TWO_XLEN) >> s.shamt_ @s.combinational def eval_comb(): s.res_.v = 0 if s.func_ == ALUFunc.ALU_ADD: s.res_.v = s.s0_ + s.s1_ elif s.func_ == ALUFunc.ALU_SUB: s.res_.v = s.s0_ - s.s1_ elif s.func_ == ALUFunc.ALU_AND: s.res_.v = s.s0_ & s.s1_ elif s.func_ == ALUFunc.ALU_OR: s.res_.v = s.s0_ | s.s1_ elif s.func_ == ALUFunc.ALU_XOR: s.res_.v = s.s0_ ^ s.s1_ elif s.func_ == ALUFunc.ALU_SLL: s.res_.v = s.s0_ << s.shamt_ elif s.func_ == ALUFunc.ALU_SRL: s.res_.v = s.s0_ >> s.shamt_ elif s.func_ == ALUFunc.ALU_SRA: s.res_.v = s.sra_[:xlen] elif s.func_ == ALUFunc.ALU_SLT: s.res_.v = zext(s.cmp_u_, xlen)