def execute_mulhu( s, inst ): a, b = s.rf[inst.rs1], s.rf[inst.rs2] multhi = multhi64( a, b ) s.rf[ inst.rd ] = sext_xlen( multhi ) s.pc += 4
def execute_rem( s, inst ): a = signed( s.rf[inst.rs1], 64 ) b = signed( s.rf[inst.rs2], 64 ) if b == 0: s.rf[ inst.rd ] = a else: s.rf[ inst.rd ] = sext_xlen( abs(a) % abs(b) * (1 if (a > 0) else -1) ) s.pc += 4
def execute_divu( s, inst ): a = s.rf[inst.rs1] b = s.rf[inst.rs2] if b == 0: s.rf[ inst.rd ] = -1 else: s.rf[ inst.rd ] = sext_xlen( a / b ) s.pc += 4
def execute_remu( s, inst ): a = s.rf[inst.rs1] b = s.rf[inst.rs2] if b == 0: s.rf[ inst.rd ] = a else: s.rf[ inst.rd ] = sext_xlen( a % b ) s.pc += 4
def execute_div( s, inst ): a = signed( s.rf[inst.rs1], 64 ) b = signed( s.rf[inst.rs2], 64 ) if b == 0: s.rf[ inst.rd ] = -1 else: s.rf[ inst.rd ] = sext_xlen( abs(a) / abs(b) * (-1 if (a < 0) ^ (b < 0) else 1) ) s.pc += 4
def execute_mulhsu( s, inst ): a, b = s.rf[inst.rs1], s.rf[inst.rs2] a_s = signed(a, 64) a = abs(a_s) multlo = trim_64( a * b ) multhi = multhi64( a, b ) # negate -- taken from # http://stackoverflow.com/questions/1541426/computing-high-64-bits-of-a-64x64-int-product-in-c # this requires us to do low multiplication as well, so it's probably # not very efficient if a_s < 0: multhi = ~multhi if multlo == 0: multhi += 1 s.rf[ inst.rd ] = sext_xlen( multhi ) s.pc += 4
def execute_sub( s, inst ): s.rf[ inst.rd ] = sext_xlen( s.rf[inst.rs1] - s.rf[inst.rs2]) s.pc += 4
def execute_sll( s, inst ): shamt = s.rf[inst.rs2] & (s.xlen-1) s.rf[ inst.rd ] = sext_xlen( s.rf[inst.rs1] << shamt ) s.pc += 4
def execute_slli( s, inst ): if SHAMT( s, inst ) > s.xlen: raise TRAP_ILLEGAL_INSTRUCTION() s.rf[ inst.rd ] = sext_xlen( s.rf[inst.rs1] << SHAMT( s, inst ) ) s.pc += 4
def execute_add( s, inst ): s.rf[ inst.rd ] = sext_xlen( s.rf[inst.rs1] + s.rf[inst.rs2]) s.pc += 4
def execute_add(s, inst): s.rf[inst.rd] = sext_xlen(s.rf[inst.rs1] + s.rf[inst.rs2]) s.pc += 4
def execute_addi( s, inst ): s.rf[ inst.rd ] = sext_xlen( s.rf[inst.rs1] + inst.i_imm ) s.pc += 4
def execute_sll(s, inst): shamt = s.rf[inst.rs2] & (s.xlen - 1) s.rf[inst.rd] = sext_xlen(s.rf[inst.rs1] << shamt) s.pc += 4
def execute_sub(s, inst): s.rf[inst.rd] = sext_xlen(s.rf[inst.rs1] - s.rf[inst.rs2]) s.pc += 4
def execute_jal( s, inst ): tmp = sext_xlen( s.pc + 4 ) s.pc = JUMP_TARGET( s, inst ) s.rf[ inst.rd ] = tmp;
def execute_slli(s, inst): if SHAMT(s, inst) > s.xlen: raise TRAP_ILLEGAL_INSTRUCTION() s.rf[inst.rd] = sext_xlen(s.rf[inst.rs1] << SHAMT(s, inst)) s.pc += 4
def execute_sra( s, inst ): s.rf[ inst.rd ] = sext_xlen( signed( s.rf[inst.rs1], 64 ) >> (s.rf[inst.rs2] & (s.xlen-1)) ) s.pc += 4
def execute_sra(s, inst): s.rf[inst.rd] = sext_xlen( signed(s.rf[inst.rs1], 64) >> (s.rf[inst.rs2] & (s.xlen - 1))) s.pc += 4
def execute_jalr(s, inst): tmp = sext_xlen(s.pc + 4) s.pc = (s.rf[inst.rs1] + inst.i_imm) & 0xFFFFFFFE s.rf[inst.rd] = tmp
def execute_jal(s, inst): tmp = sext_xlen(s.pc + 4) s.pc = JUMP_TARGET(s, inst) s.rf[inst.rd] = tmp
def execute_auipc(s, inst): s.rf[inst.rd] = sext_xlen(inst.u_imm + s.pc) s.pc += 4
def execute_auipc( s, inst ): s.rf[ inst.rd ] = sext_xlen(inst.u_imm + s.pc) s.pc += 4
def execute_mul( s, inst ): s.rf[ inst.rd ] = sext_xlen( s.rf[inst.rs1] * s.rf[inst.rs2]) s.pc += 4
def execute_jalr( s, inst ): tmp = sext_xlen( s.pc + 4 ) s.pc = (s.rf[inst.rs1] + inst.i_imm) & 0xFFFFFFFE s.rf[ inst.rd ] = tmp;
def execute_addi(s, inst): s.rf[inst.rd] = sext_xlen(s.rf[inst.rs1] + inst.i_imm) s.pc += 4