def i_add(self, op): src1 = self.getOperValue(op, 1) src2 = self.getOperValue(op, 2) #FIXME PDE and flags if src1 == None or src2 == None: self.undefFlags() self.setOperValue(op, 0, None) return dsize = op.opers[0].tsize ssize = op.opers[1].tsize s2size = op.opers[2].tsize usrc1 = e_bits.unsigned(src1, 4) usrc2 = e_bits.unsigned(src2, 4) ssrc1 = e_bits.signed(src1, 4) ssrc2 = e_bits.signed(src2, 4) ures = usrc1 + usrc2 sres = ssrc1 + ssrc2 self.setOperValue(op, 0, ures) curmode = self.getProcMode() if op.iflags & IF_S: if op.opers[0].reg == 15 and (curmode != PM_sys and curmode != PM_usr): self.setCPSR(self.getSPSR(curmode)) else: raise Exception("Messed up opcode... adding to r15 from PM_usr or PM_sys") self.setFlag(PSR_N_bit, e_bits.is_signed(ures, dsize)) self.setFlag(PSR_Z_bit, not ures) self.setFlag(PSR_C_bit, e_bits.is_unsigned_carry(ures, dsize)) self.setFlag(PSR_V_bit, e_bits.is_signed_overflow(sres, dsize))
def intSubBase(self, src1, src2, Sflag=0, rd=0): # So we can either do a BUNCH of crazyness with xor and shifting to # get the necessary flags here, *or* we can just do both a signed and # unsigned sub and use the results. udst = e_bits.unsigned(src1, 4) usrc = e_bits.unsigned(src2, 4) sdst = e_bits.signed(src1, 4) ssrc = e_bits.signed(src2, 4) ures = udst - usrc sres = sdst - ssrc if Sflag: curmode = self.getProcMode() if rd == 15: if curmode != PM_sys and curmode != PM_usr: self.setCPSR(self.getSPSR(curmode)) else: raise Exception( "Messed up opcode... adding to r15 from PM_usr or PM_sys" ) self.setFlag(PSR_N_bit, e_bits.is_signed(ures, 4)) self.setFlag(PSR_Z_bit, not ures) self.setFlag(PSR_C_bit, e_bits.is_unsigned_carry(ures, 4)) self.setFlag(PSR_V_bit, e_bits.is_signed_overflow(sres, 4)) return ures
def i_adc(self, op): dst = self.getOperValue(op, 0) src = self.getOperValue(op, 1) cf = 0 if self.getFlag(EFLAGS_CF): cf = 1 dsize = op.opers[0].tsize ssize = op.opers[1].tsize sdst = e_bits.signed(dst, dsize) ssrc = e_bits.signed(src, ssize) if (isinstance(op.opers[1], i386ImmOper) and ssize < dsize): src = e_bits.sign_extend(src, ssize, dsize) ssize = dsize #FIXME perhaps unify the add/adc flags/arith code res = dst + src + cf sres = sdst + ssrc + cf tsize = op.opers[0].tsize self.setFlag(EFLAGS_CF, e_bits.is_unsigned_carry(res, tsize)) self.setFlag(EFLAGS_PF, e_bits.is_parity_byte(res)) self.setFlag(EFLAGS_AF, e_bits.is_aux_carry(src, dst)) self.setFlag(EFLAGS_ZF, not res) self.setFlag(EFLAGS_SF, e_bits.is_signed(res, tsize)) self.setFlag(EFLAGS_OF, e_bits.is_signed_overflow(sres, dsize)) self.setOperValue(op, 0, res)
def intSubBase(self, src1, src2, Sflag=0, rd=0): # So we can either do a BUNCH of crazyness with xor and shifting to # get the necessary flags here, *or* we can just do both a signed and # unsigned sub and use the results. udst = e_bits.unsigned(src1, 4) usrc = e_bits.unsigned(src2, 4) sdst = e_bits.signed(src1, 4) ssrc = e_bits.signed(src2, 4) ures = udst - usrc sres = sdst - ssrc if Sflag: curmode = self.getProcMode() if rd == 15: if(curmode != PM_sys and curmode != PM_usr): self.setCPSR(self.getSPSR(curmode)) else: raise Exception("Messed up opcode... adding to r15 from PM_usr or PM_sys") self.setFlag(PSR_N_bit, e_bits.is_signed(ures, 4)) self.setFlag(PSR_Z_bit, not ures) self.setFlag(PSR_C_bit, not e_bits.is_unsigned_carry(ures, 4)) self.setFlag(PSR_V_bit, e_bits.is_signed_overflow(sres, 4)) return ures
def i_add(self, op): dst = self.getOperValue(op, 0) src = self.getOperValue(op, 1) dsize = op.opers[0].tsize ssize = op.opers[1].tsize if dsize > ssize: src = e_bits.sign_extend(src, ssize, dsize) ssize = dsize udst = e_bits.unsigned(dst, dsize) usrc = e_bits.unsigned(src, ssize) sdst = e_bits.signed(dst, dsize) ssrc = e_bits.signed(src, ssize) ures = udst + usrc sres = sdst + ssrc self.setFlag(EFLAGS_CF, e_bits.is_unsigned_carry(ures, dsize)) self.setFlag(EFLAGS_PF, e_bits.is_parity_byte(ures)) self.setFlag(EFLAGS_AF, e_bits.is_aux_carry(src, dst)) self.setFlag(EFLAGS_ZF, not ures) self.setFlag(EFLAGS_SF, e_bits.is_signed(ures, dsize)) self.setFlag(EFLAGS_OF, e_bits.is_signed_overflow(sres, dsize)) self.setOperValue(op, 0, ures)
def i_add(self, op): dst = self.getOperValue(op, 0) src = self.getOperValue(op, 1) dsize = op.opers[0].tsize ssize = op.opers[1].tsize #FIXME PDE and flags if dst == None or src == None: self.undefFlags() self.setOperValue(op, 0, None) return if dsize > ssize: src = e_bits.sign_extend(src, ssize, dsize) ssize = dsize udst = e_bits.unsigned(dst, dsize) usrc = e_bits.unsigned(src, ssize) sdst = e_bits.signed(dst, dsize) ssrc = e_bits.signed(src, ssize) ures = udst + usrc sres = sdst + ssrc self.setFlag(EFLAGS_CF, e_bits.is_unsigned_carry(ures, dsize)) self.setFlag(EFLAGS_PF, e_bits.is_parity_byte(ures)) self.setFlag(EFLAGS_AF, e_bits.is_aux_carry(src, dst)) self.setFlag(EFLAGS_ZF, not ures) self.setFlag(EFLAGS_SF, e_bits.is_signed(ures, dsize)) self.setFlag(EFLAGS_OF, e_bits.is_signed_overflow(sres, dsize)) self.setOperValue(op, 0, ures)
def integerAddition(self, op): """ Do the core of integer addition but only *return* the resulting value rather than assigning it. Architectures shouldn't have to override this as operand order doesn't matter """ src = self.getOperValue(op, 0) dst = self.getOperValue(op, 1) #FIXME PDE and flags if src == None: self.undefFlags() self.setOperValue(op, 1, None) return ssize = op.opers[0].tsize dsize = op.opers[1].tsize udst = e_bits.unsigned(dst, dsize) sdst = e_bits.signed(dst, dsize) usrc = e_bits.unsigned(src, dsize) ssrc = e_bits.signed(src, dsize) ures = usrc + udst sres = ssrc + sdst return (ssize, dsize, sres, ures, sdst, udst)
def integerAddition(self, op): """ Do the core of integer addition but only *return* the resulting value rather than assigning it. Architectures shouldn't have to override this as operand order doesn't matter """ src = self.getOperValue(op, 0) dst = self.getOperValue(op, 1) #FIXME PDE and flags if src is None: self.undefFlags() self.setOperValue(op, 1, None) return ssize = op.opers[0].tsize dsize = op.opers[1].tsize udst = e_bits.unsigned(dst, dsize) sdst = e_bits.signed(dst, dsize) usrc = e_bits.unsigned(src, dsize) ssrc = e_bits.signed(src, dsize) ures = usrc + udst sres = ssrc + sdst return (ssize, dsize, sres, ures, sdst, udst)
def i_dec(self, op): dstidx = len(op.opers) - 1 if dstidx == 1: ssize = op.opers[0].tsize dsize = op.opers[1].tsize src = self.getOperValue(op, 0) dst = self.getOperValue(op, 1) udst = e_bits.unsigned(dst, dsize) sdst = e_bits.signed(dst, dsize) usrc = e_bits.unsigned(src, ssize) ssrc = e_bits.signed(src, ssize) else: dsize = op.opers[0].tsize dst = self.getOperValue(op, 0) udst = e_bits.unsigned(dst, dsize) sdst = e_bits.signed(dst, dsize) ssrc = usrc = 1 ures = udst - usrc sres = sdst - ssrc self.setFlag(CCR_Z, not ures) self.setFlag(CCR_N, e_bits.is_signed(ures, dsize)) self.setFlag(CCR_V, e_bits.is_signed_overflow(sres, dsize)) # V must be set if previous value was 0x80 (per docs, page 73 of H8/300) self.setOperValue(op, dstidx, ures)
def i_dec(self, op): dstidx = len(op.opers) - 1 if dstidx == 1: ssize = op.opers[0].tsize dsize = op.opers[1].tsize src = self.getOperValue(op, 0) dst = self.getOperValue(op, 1) udst = e_bits.unsigned(dst, dsize) sdst = e_bits.signed(dst, dsize) usrc = e_bits.unsigned(src, ssize) ssrc = e_bits.signed(src, ssize) else: dsize = op.opers[0].tsize dst = self.getOperValue(op, 0) udst = e_bits.unsigned(dst, dsize) sdst = e_bits.signed(dst, dsize) ssrc = usrc = 1 ures = udst - usrc sres = sdst - ssrc self.setFlag(h8_regs.CCR_Z, not ures) self.setFlag(h8_regs.CCR_N, e_bits.is_signed(ures, dsize)) self.setFlag(h8_regs.CCR_V, e_bits.is_signed_overflow(sres, dsize)) # V must be set if previous value was 0x80 (per docs, page 73 of H8/300) self.setOperValue(op, dstidx, ures)
def AddWithCarry(self, src1, src2, carry=0, Sflag=0, rd=0): '''////AddWithCarry() ============== (bits(N), bit, bit) AddWithCarry(bits(N) x, bits(N) y, bit carry_in) unsigned_sum = UInt(x) + UInt(y) + UInt(carry_in); signed_sum = SInt(x) + SInt(y) + UInt(carry_in); result = unsigned_sum<N-1:0>; // same value as signed_sum<N-1:0> carry_out = if UInt(result) == unsigned_sum then '0' else '1'; overflow = if SInt(result) == signed_sum then '0' else '1'; return (result, carry_out, overflow); An important property of the AddWithCarry() function is that if: (result, carry_out, overflow) = AddWithCarry(x, NOT(y), carry_in) then: * if carry_in == '1', then result == x-y with: overflow == '1' if signed overflow occurred during the subtraction carry_out == '1' if unsigned borrow did not occur during the subtraction, that is, if x >= y * if carry_in == '0', then result == x-y-1 with: overflow == '1' if signed overflow occurred during the subtraction carry_out == '1' if unsigned borrow did not occur during the subtraction, that is, if x > y. Together, these mean that the carry_in and carry_out bits in AddWithCarry() calls can act as NOT borrow flags for subtractions as well as carry flags for additions. (@ we don't retrn carry-out and overflow, but set the flags here) ''' udst = e_bits.unsigned(src1, 4) usrc = e_bits.unsigned(src2, 4) sdst = e_bits.signed(src1, 4) ssrc = e_bits.signed(src2, 4) ures = (udst + usrc + carry) & 0xffffffff sres = (sdst + ssrc + carry) result = ures & 0x7fffffff #newcarry = (ures != result) newcarry = (udst >= usrc) overflow = (sres != result) if Sflag: curmode = self.getProcMode() if rd == 15: if (curmode != PM_sys and curmode != PM_usr): self.setCPSR(self.getSPSR(curmode)) else: raise Exception( "Messed up opcode... adding to r15 from PM_usr or PM_sys" ) else: self.setFlag(PSR_N_bit, e_bits.is_signed(ures, 4)) self.setFlag(PSR_Z_bit, not ures) self.setFlag(PSR_C_bit, newcarry) self.setFlag(PSR_V_bit, overflow) return ures
def rd_pc_imm8(va, value): # add rd = shmaskval(value, 8, 0x7) imm = e_bits.signed(shmaskval(value, 0, 0xff), 1) * 4 oper0 = ArmRegOper(rd, va=va) # pre-compute PC relative addr oper1 = ArmImmOper((va&0xfffffffc) + 4 + imm) return oper0,oper1
def rd_pc_imm8(va, value): # add rd = shmaskval(value, 8, 0x7) imm = e_bits.signed(shmaskval(value, 0, 0xff), 1) * 4 oper0 = ArmRegOper(rd, va=va) # pre-compute PC relative addr oper1 = ArmImmOper((va & 0xfffffffc) + 4 + imm) return oper0, oper1
def bl_imm23(va, val, val2): # bl opcode = INS_BL flags = envi.IF_CALL # need next two bytes imm = (val & 0x7ff) << 12 imm |= ((val2 & 0x7ff) << 1) # break down the components S = (val >> 10) & 1 j1 = (val2 >> 13) & 1 j2 = (val2 >> 11) & 1 i1 = ~(j1 ^ S) & 0x1 i2 = ~(j2 ^ S) & 0x1 X = (val2 >> 12) & 1 mnem = ('blx', 'bl')[X] imm = (S << 24) | (i1 << 23) | (i2 << 22) | ((val & 0x3ff) << 12) | ( (val2 & 0x7ff) << 1) #sign extend a 23-bit number if S: imm |= 0xff000000 oper0 = ArmPcOffsetOper(e_bits.signed(imm, 4), va=va) return ((oper0, ), mnem, opcode, flags)
def i_inc(self, op): dstidx = len(op.opers) - 1 if dstidx == 1: ssize = op.opers[0].tsize dsize = op.opers[1].tsize src = self.getOperValue(op, 0) dst = self.getOperValue(op, 1) udst = e_bits.unsigned(dst, dsize) # TODO: What is sdst and why does it exist? # sdst = e_bits.signed(dst, dsize) usrc = e_bits.unsigned(src, ssize) ssrc = e_bits.signed(src, ssize) else: dsize = op.opers[0].tsize dst = self.getOperValue(op, 0) udst = e_bits.unsigned(dst, dsize) # sdst = e_bits.signed(dst, dsize) ssrc = usrc = 1 ures = usrc + udst sres = ssrc + udst self.setFlag(h8_regs.CCR_Z, not ures) self.setFlag(h8_regs.CCR_N, e_bits.is_signed(ures, dsize)) self.setFlag(h8_regs.CCR_V, e_bits.is_signed_overflow(sres, dsize)) # V must be set if previous value was 0x7f (per docs, page 78 of H8/300) self.setOperValue(op, dstidx, ures)
def doSubC(self, a, b, carry, size): ua = e_bits.unsigned(a, size) ub = e_bits.unsigned(b, size) sa = e_bits.signed(a, size) sb = e_bits.signed(b, size) ures = ua - ub - 1 + carry sres = sa - sb - 1 + carry res = e_bits.unsigned(ures, size) self.setFlag(SR_N, e_bits.msb(res, size)) self.setFlag(SR_Z, res == 0) self.setFlag(SR_C, not e_bits.is_unsigned_carry(ures, size)) self.setFlag(SR_V, e_bits.is_signed_overflow(sres, size)) return res
def p_disp8(va, val, buf, off, tsize): # bcc, bsr iflags = 0 op = val >> 8 disp8 = e_bits.signed(val & 0xfe, 1) opers = (h8_operands.H8PcOffsetOper(disp8, va, 1), ) return (op, None, opers, iflags, 2)
def i_divxs(self, op): ssize = op.opers[0].tsize dsize = op.opers[1].tsize divisor = self.getOperValue(op, 0) dividend = self.getOperValue(op, 1) sdivisor = e_bits.signed(divisor, ssize) sdividend = e_bits.signed(dividend, dsize) quotient = sdividend / sdivisor remainder = sdividend % sdivisor rdval = (remainder << 8) | quotient self.setOperValue(op, 1, rdval) self.setFlag(CCR_Z, not quotient) self.setFlag(CCR_N, e_bits.is_signed(quotient, 4))
def i_divxs(self, op): ssize = op.opers[0].tsize dsize = op.opers[1].tsize divisor = self.getOperValue(op, 0) dividend = self.getOperValue(op, 1) sdivisor = e_bits.signed(divisor, ssize) sdividend = e_bits.signed(dividend, dsize) quotient = sdividend / sdivisor remainder = sdividend % sdivisor rdval = (remainder << 8) | quotient self.setOperValue(op, 1, rdval) self.setFlag(h8_regs.CCR_Z, not quotient) self.setFlag(h8_regs.CCR_N, e_bits.is_signed(quotient, 4))
def p_disp8(va, val, buf, off, tsize): # bcc, bsr iflags = 0 op = val >> 8 disp8 = e_bits.signed(val & 0xfe, 1) opers = ( H8PcOffsetOper(disp8, va, 1), ) return (op, None, opers, iflags, 2)
def i_mulxs(self, op): ''' mul, extend as signed rs is 8 bits rd is 16 bits, but only uses the lower 8 bits for multiplicand product is then stored in 16-bit rd flags are not updated ''' ssize = op.opers[0].tsize dsize = op.opers[1].tsize src = self.getOperValue(op, 0) dst = self.getOperValue(op, 1) ssrc = e_bits.signed(src, ssize) sdst = e_bits.signed(dst, dsize) sres = (sdst * ssrc) val = sres & e_bits.u_maxes[dsize] self.setOperValue(op, 1, val)
def i_neg(self, op): dsize = op.opers[0].tsize oper = self.getOperValue(op, 0) oper = e_bits.signed(oper, dsize) oper = -oper self.setOperValue(op, 0, oper) self.setFlag(CCR_H, e_bits.is_signed_half_carry(oper, dsize, oper)) self.setFlag(CCR_N, e_bits.is_signed(oper, dsize)) self.setFlag(CCR_Z, not oper) self.setFlag(CCR_V, e_bits.is_signed_overflow(oper, dsize)) self.setFlag(CCR_C, e_bits.is_unsigned_carry(oper, dsize))
def i_neg(self, op): dsize = op.opers[0].tsize oper = self.getOperValue(op, 0) oper = e_bits.signed(oper, dsize) oper = -oper self.setOperValue(op, 0, oper) self.setFlag(h8_regs.CCR_H, e_bits.is_signed_half_carry(oper, dsize, oper)) self.setFlag(h8_regs.CCR_N, e_bits.is_signed(oper, dsize)) self.setFlag(h8_regs.CCR_Z, not oper) self.setFlag(h8_regs.CCR_V, e_bits.is_signed_overflow(oper, dsize)) self.setFlag(h8_regs.CCR_C, e_bits.is_unsigned_carry(oper, dsize))
def intSubBase(self, subtrahend, minuend, ssize, msize): ''' Base for integer subtraction. Segmented such that order of operands can easily be overridden by subclasses. Does not set flags (arch-specific), and doesn't set the dest operand. That's up to the instruction implementation. So we can either do a BUNCH of crazyness with xor and shifting to get the necessary flags here, *or* we can just do both a signed and unsigned sub and use the results. Math vocab refresher: Subtrahend - Minuend = Difference ''' usubtra = e_bits.unsigned(subtrahend, ssize) uminuend = e_bits.unsigned(minuend, msize) ssubtra = e_bits.signed(subtrahend, ssize) sminuend = e_bits.signed(minuend, msize) ures = usubtra - uminuend sres = ssubtra - sminuend return (ssize, msize, sres, ures, ssubtra, usubtra)
def intSubBase(self, src, dst, ssize, dsize): usrc = e_bits.unsigned(src, ssize) udst = e_bits.unsigned(dst, dsize) ssrc = e_bits.signed(src, ssize) sdst = e_bits.signed(dst, dsize) ures = udst - usrc sres = sdst - ssrc #print "dsize/ssize: %d %d" % (dsize, ssize) #print "unsigned: %d %d %d" % (usrc, udst, ures) #print "signed: %d %d %d" % (ssrc, sdst, sres) self.setFlag(EFLAGS_OF, e_bits.is_signed_overflow(sres, dsize)) self.setFlag(EFLAGS_AF, e_bits.is_aux_carry(usrc, udst)) self.setFlag(EFLAGS_CF, e_bits.is_unsigned_carry(ures, dsize)) self.setFlag(EFLAGS_SF, e_bits.is_signed(ures, dsize)) self.setFlag(EFLAGS_ZF, not sres) self.setFlag(EFLAGS_PF, e_bits.is_parity_byte(ures)) return ures
def intSubBase(self, src, dst, ssize, dsize): usrc = e_bits.unsigned(src, ssize) udst = e_bits.unsigned(dst, dsize) ssrc = e_bits.signed(src, ssize) sdst = e_bits.signed(dst, dsize) ures = udst - usrc sres = sdst - ssrc #print "dsize/ssize: %d %d" % (dsize, ssize) #print "unsigned: %d %d %d" % (usrc, udst, ures) #print "signed: %d %d %d" % (ssrc, sdst, sres) self.setFlag(EFLAGS_OF, e_bits.is_signed_overflow(sres, dsize)) self.setFlag(EFLAGS_AF, e_bits.is_aux_carry_sub(usrc, udst)) self.setFlag(EFLAGS_CF, e_bits.is_unsigned_carry(ures, dsize)) self.setFlag(EFLAGS_SF, e_bits.is_signed(ures, dsize)) self.setFlag(EFLAGS_ZF, not sres) self.setFlag(EFLAGS_PF, e_bits.is_parity_byte(ures)) return ures
def intSubBase(self, src, dst, ssize, dsize): usrc = e_bits.unsigned(src, ssize) udst = e_bits.unsigned(dst, dsize) ssrc = e_bits.signed(src, ssize) sdst = e_bits.signed(dst, dsize) ures = udst - usrc sres = sdst - ssrc #print "dsize/ssize: %d %d" % (dsize, ssize) #print "unsigned: %d %d %d" % (usrc, udst, ures) #print "signed: %d %d %d" % (ssrc, sdst, sres) """ http://cnx.org/content/m23497/latest/ Bit Description 8 V Overflow bit.V = 1 -> Result of an arithmetic operation overflows the signed-variable range. 2 N Negative flag.N = 1 -> result of a byte or word operation is negative. 1 Z Zero flag.Z = 1 -> result of a byte or word operation is 0. 0 C Carry flag.C = 1 -> result of a byte or word operation produced a carry. REG_SR_C = 1 << 0 # Carry bit REG_SR_Z = 1 << 1 REG_SR_N = 1 << 2 REG_SR_V = 1 << 8 """ #print "ures: %x udst: %x usrc: %x"% (ures, udst, usrc) self.setFlag(REG_SR_V, e_bits.is_signed_overflow(sres, dsize)) self.setFlag(REG_SR_Z, not sres) self.setFlag(REG_SR_N, sres < 0) #self.setFlag(REG_SR_C, e_bits.is_aux_carry(usrc, udst)) self.setFlag(REG_SR_C, e_bits.is_unsigned_carry(ures, dsize)) return ures
def p_branch(opval, va): # primary branch encoding. others were added later in the media section off = e_bits.signed(opval, 3) off <<= 2 link = (opval>>24) & 1 #FIXME this assumes A1 branch encoding. olist = ( ArmOffsetOper(off, va),) if link: flags = envi.IF_CALL else: flags = envi.IF_BRANCH opcode = (IENC_BRANCH << 16) + link return (opcode, b_mnem[link], olist, flags)
def p_disp16(va, val, buf, off, tsize): # bcc, bsr val2, = struct.unpack('>H', buf[off + 2:off + 4]) iflags = 0 op = val disp16 = e_bits.signed(val2 & 0xfffffe, 2) mnem = None if (op & 0xf00 == 0x800): opnibble = (val >> 4) & 0xf mnem, iflags = bcc[opnibble] opers = (h8_operands.H8PcOffsetOper(disp16, va, 2), ) return (op, mnem, opers, iflags, 4)
def i_inc(self, op): size = op.opers[0].tsize val = self.getOperValue(op, 0) sval = e_bits.signed(val, size) sval += 1 self.setOperValue(op, 0, sval) # Another arithmetic op where doing signed and unsigned is easier ;) self.setFlag(EFLAGS_OF, e_bits.is_signed_overflow(sval, size)) self.setFlag(EFLAGS_SF, e_bits.is_signed(sval, size)) self.setFlag(EFLAGS_ZF, not sval) self.setFlag(EFLAGS_AF, (sval & 0xf == 0)) self.setFlag(EFLAGS_PF, e_bits.is_parity_byte(sval))
def p_disp16(va, val, buf, off, tsize): # bcc, bsr val2, = struct.unpack('>H', buf[off+2: off+4]) iflags = 0 op = val disp16 = e_bits.signed(val2 & 0xfffffe, 2) mnem = None if (op & 0xf00 == 0x800): opnibble = (val>>4) & 0xf mnem, iflags = bcc[opnibble] opers = ( H8PcOffsetOper(disp16, va, 2), ) return (op, mnem, opers, iflags, 4)
def i_inc(self, op): if op.iflags & IF_BYTE: size = BYTE size = WORD #size = op.opers[0].tsize val = self.getOperValue(op, 0) sval = e_bits.signed(val, size) sval += 1 self.setOperValue(op, 0, sval) # Another arithmetic op where doing signed and unsigned is easier ;) self.setFlag(REG_SR_V, e_bits.is_signed_overflow(sval, size)) self.setFlag(REG_SR_Z, not sval) self.setFlag(REG_SR_N, sval < 0) self.setFlag(REG_SR_C, e_bits.is_aux_carry(val, 1))
def bl_imm23(va, val, val2): # bl flags = 0 # need next two bytes imm = (val&0x7ff) << 12 imm |= ((val2&0x7ff) << 1) # break down the components S = (val>>10)&1 j1 = (val2>>13)&1 j2 = (val2>>11)&1 i1 = ~ (j1 ^ S) & 0x1 i2 = ~ (j2 ^ S) & 0x1 X = (val2>>12)&1 mnem = ('blx','bl')[X] imm = (S<<24) | (i1<<23) | (i2<<22) | ((val&0x3ff) << 12) | ((val2&0x7ff) << 1) #sign extend a 23-bit number if S: imm |= 0xff000000 oper0 = ArmPcOffsetOper(e_bits.signed(imm,4), va=va) return ((oper0, ) , mnem, flags)
def i_idiv(self, op): #FIXME this needs emulation testing! tsize = op.opers[0].tsize if tsize == 1: ax = self.getRegister(REG_AX) ax = e_bits.signed(ax, 2) d = self.getOperValue(op, 0) d = e_bits.signed(d, 1) if d == 0: raise envi.DivideByZero(self) q = ax / d r = ax % d res = ((r & 0xff) << 8) | (q & 0xff) self.setRegister(REG_AX, res) elif tsize == 2: val = self.twoRegCompound(REG_DX, REG_AX, 2) val = e_bits.signed(val, 4) d = self.getOperValue(op, 0) d = e_bits.signed(d, 2) if d == 0: raise envi.DivideByZero(self) q = val / d r = val % d self.setRegister(REG_AX, q) self.setRegister(REG_DX, r) elif tsize == 4: val = self.twoRegCompound(REG_EDX, REG_EAX, 4) val = e_bits.signed(val, 8) d = self.getOperValue(op, 0) d = e_bits.signed(d, 4) if d == 0: raise envi.DivideByZero(self) q = val / d r = val % d self.setRegister(REG_EAX, q) self.setRegister(REG_EDX, r) else: raise envi.UnsupportedInstruction(self, op)
def extended_parse_modrm(self, bytes, offset, opersize, regbase=0): """ Return a tuple of (size, Operand) """ mod, reg, rm = self.parse_modrm(ord(bytes[offset])) size = 1 #print "EXTENDED MOD REG RM",mod,reg,rm if mod == 3: # Easy one, just a reg # FIXME only use self.byteRegOffset in 32 bit mode, NOT 64 bit... if opersize == 1: rm = self.byteRegOffset(rm) elif opersize == 2: rm += RMETA_LOW16 #print "OPERSIZE",opersize,rm return (size, i386RegOper(rm + regbase, opersize)) elif mod == 0: # means we are [reg] unless rm == 4 (SIB) or rm == 5 ([imm32]) if rm == 5: imm = e_bits.parsebytes(bytes, offset + size, 4) size += 4 # NOTE: in 64 bit mode, *this* is where we differ, (This case is RIP relative) return (size, i386ImmMemOper(imm, opersize)) elif rm == 4: sibsize, scale, index, base, imm = self.parse_sib( bytes, offset + size, mod) size += sibsize if base != None: base += regbase # Adjust for different register addressing modes if index != None: index += regbase # Adjust for different register addressing modes oper = i386SibOper(opersize, reg=base, imm=imm, index=index, scale=scale_lookup[scale]) return (size, oper) else: return (size, i386RegMemOper(regbase + rm, opersize)) elif mod == 1: # mod 1 means we are [ reg + disp8 ] (unless rm == 4 which means sib + disp8) if rm == 4: sibsize, scale, index, base, imm = self.parse_sib( bytes, offset + size, mod) size += sibsize disp = e_bits.parsebytes(bytes, offset + size, 1, sign=True) size += 1 if base != None: base += regbase # Adjust for different register addressing modes if index != None: index += regbase # Adjust for different register addressing modes oper = i386SibOper(opersize, reg=base, index=index, scale=scale_lookup[scale], disp=disp) return (size, oper) else: x = e_bits.signed(ord(bytes[offset + size]), 1) size += 1 return (size, i386RegMemOper(regbase + rm, opersize, disp=x)) elif mod == 2: # Means we are [ reg + disp32 ] (unless rm == 4 which means SIB + disp32) if rm == 4: sibsize, scale, index, base, imm = self.parse_sib( bytes, offset + size, mod) size += sibsize disp = e_bits.parsebytes(bytes, offset + size, 4, sign=True) size += 4 if base != None: base += regbase # Adjust for different register addressing modes if index != None: index += regbase # Adjust for different register addressing modes oper = i386SibOper(opersize, reg=base, imm=imm, index=index, scale=scale_lookup[scale], disp=disp) return (size, oper) else: # NOTE: Immediate displacements in SIB are still 4 bytes in 64 bit mode disp = e_bits.parsebytes(bytes, offset + size, 4, sign=True) size += 4 return (size, i386RegMemOper(regbase + rm, opersize, disp=disp)) else: raise Exception("How does mod == %d" % mod)
def extended_parse_modrm(self, bytez, offset, opersize, regbase=0, prefixes=0): """ Return a tuple of (size, Operand) """ if prefixes & PREFIX_ADDR_SIZE: if opersize == 4 and self.ptrsize == 4: return self.shortend_parse_modrm(bytez, offset, opersize, regbase=regbase, prefixes=prefixes) mod, reg, rm = self.parse_modrm(bytez[offset]) size = 1 if mod == 3: # Easy one, just a reg # FIXME only use self.byteRegOffset in 32 bit mode, NOT 64 bit... if opersize == 1: rm = self.byteRegOffset(rm, prefixes=prefixes) elif opersize == 2: rm += RMETA_LOW16 return (size, i386RegOper(rm+regbase, opersize)) elif mod == 0: # means we are [reg] unless rm == 4 (SIB) or rm == 5 ([imm32]) # BUT JOKES -- the table is totally different in 16 bit mode BECAUSE WHY if rm == 5: imm = e_bits.parsebytes(bytez, offset + size, 4) size += 4 # NOTE: in 64 bit mode, *this* is where we differ, (This case is RIP relative) return(size, i386ImmMemOper(imm, opersize)) elif rm == 4: sibsize, scale, index, base, imm = self.parse_sib(bytez, offset+size, mod, prefixes=prefixes) size += sibsize if base is not None: base += regbase # Adjust for different register addressing modes if index is not None: index += regbase # Adjust for different register addressing modes oper = i386SibOper(opersize, reg=base, imm=imm, index=index, scale=scale_lookup[scale]) return (size, oper) else: return(size, i386RegMemOper(regbase+rm, opersize)) elif mod == 1: # mod 1 means we are [ reg + disp8 ] (unless rm == 4 which means sib + disp8) if rm == 4: sibsize, scale, index, base, imm = self.parse_sib(bytez, offset+size, mod, prefixes=prefixes) size += sibsize disp = e_bits.parsebytes(bytez, offset+size, 1, sign=True) size += 1 if base is not None: base += regbase # Adjust for different register addressing modes if index is not None: index += regbase # Adjust for different register addressing modes oper = i386SibOper(opersize, reg=base, index=index, scale=scale_lookup[scale], disp=disp) return (size,oper) else: x = e_bits.signed(bytez[offset+size], 1) size += 1 return(size, i386RegMemOper(regbase+rm, opersize, disp=x)) elif mod == 2: # Means we are [ reg + disp32 ] (unless rm == 4 which means SIB + disp32) if rm == 4: sibsize, scale, index, base, imm = self.parse_sib(bytez,offset+size,mod, prefixes=prefixes) size += sibsize disp = e_bits.parsebytes(bytez, offset + size, 4, sign=True) size += 4 if base is not None: base += regbase # Adjust for different register addressing modes if index is not None: index += regbase # Adjust for different register addressing modes oper = i386SibOper(opersize, reg=base, imm=imm, index=index, scale=scale_lookup[scale], disp=disp) return (size, oper) else: # NOTE: Immediate displacements in SIB are still 4 bytes in 64 bit mode disp = e_bits.parsebytes(bytez, offset+size, 4, sign=True) size += 4 return(size, i386RegMemOper(regbase+rm, opersize, disp=disp)) else: raise Exception("How does mod == %d" % mod)
def extended_parse_modrm(self, bytez, offset, opersize, regbase=0, prefixes=0): """ Return a tuple of (size, Operand) """ mod,reg,rm = self.parse_modrm(ord(bytez[offset])) size = 1 #print "EXTENDED MOD REG RM",mod,reg,rm if mod == 3: # Easy one, just a reg # FIXME only use self.byteRegOffset in 32 bit mode, NOT 64 bit... if opersize == 1: rm = self.byteRegOffset(rm, prefixes=prefixes) elif opersize == 2: rm += RMETA_LOW16 #print "OPERSIZE",opersize,rm return (size, i386RegOper(rm+regbase, opersize)) elif mod == 0: # means we are [reg] unless rm == 4 (SIB) or rm == 5 ([imm32]) if rm == 5: imm = e_bits.parsebytes(bytez, offset + size, 4) size += 4 # NOTE: in 64 bit mode, *this* is where we differ, (This case is RIP relative) return(size, i386ImmMemOper(imm, opersize)) elif rm == 4: sibsize, scale, index, base, imm = self.parse_sib(bytez, offset+size, mod, prefixes=prefixes) size += sibsize if base != None: base += regbase # Adjust for different register addressing modes if index != None: index += regbase # Adjust for different register addressing modes oper = i386SibOper(opersize, reg=base, imm=imm, index=index, scale=scale_lookup[scale]) return (size, oper) else: return(size, i386RegMemOper(regbase+rm, opersize)) elif mod == 1: # mod 1 means we are [ reg + disp8 ] (unless rm == 4 which means sib + disp8) if rm == 4: sibsize, scale, index, base, imm = self.parse_sib(bytez, offset+size, mod, prefixes=prefixes) size += sibsize disp = e_bits.parsebytes(bytez, offset+size, 1, sign=True) size += 1 if base != None: base += regbase # Adjust for different register addressing modes if index != None: index += regbase # Adjust for different register addressing modes oper = i386SibOper(opersize, reg=base, index=index, scale=scale_lookup[scale], disp=disp) return (size,oper) else: x = e_bits.signed(ord(bytez[offset+size]), 1) size += 1 return(size, i386RegMemOper(regbase+rm, opersize, disp=x)) elif mod == 2: # Means we are [ reg + disp32 ] (unless rm == 4 which means SIB + disp32) if rm == 4: sibsize, scale, index, base, imm = self.parse_sib(bytez,offset+size,mod, prefixes=prefixes) size += sibsize disp = e_bits.parsebytes(bytez, offset + size, 4, sign=True) size += 4 if base != None: base += regbase # Adjust for different register addressing modes if index != None: index += regbase # Adjust for different register addressing modes oper = i386SibOper(opersize, reg=base, imm=imm, index=index, scale=scale_lookup[scale], disp=disp) return (size, oper) else: # NOTE: Immediate displacements in SIB are still 4 bytes in 64 bit mode disp = e_bits.parsebytes(bytez, offset+size, 4, sign=True) size += 4 return(size, i386RegMemOper(regbase+rm, opersize, disp=disp)) else: raise Exception("How does mod == %d" % mod)
def rt_pc_imm8(va, value): # ldr rt = shmaskval(value, 8, 0x7) imm = e_bits.signed((value & 0xff), 1) << 2 oper0 = ArmRegOper(rt, va=va) oper1 = ArmImmOffsetOper(REG_PC, imm, (va & 0xfffffffc)) return oper0, oper1
def pc_imm11(va, value): # b imm = e_bits.signed(((value & 0x7ff) << 1), 3) oper0 = ArmPcOffsetOper(imm, va=va) return oper0,
def pc_imm8(va, value): # b imm = e_bits.signed(shmaskval(value, 0, 0xff), 1) * 2 oper0 = ArmPcOffsetOper(imm, va=va) return oper0,
def rt_pc_imm8(va, value): # ldr rt = shmaskval(value, 8, 0x7) imm = e_bits.signed(shmaskval(value, 0, 0xff), 1) * 4 oper0 = ArmRegOper(rt, va=va) oper1 = ArmImmOffsetOper(REG_PC, imm, (va&0xfffffffc)+4) return oper0,oper1
def pc_imm11(va, value): # b imm = e_bits.signed(((value & 0x7ff)<<1), 3) oper0 = ArmPcOffsetOper(imm, va=va) return oper0,