def i_add(self, op): dst = self.getOperValue(op, 0) src = self.getOperValue(op, 1) dsize = op.opers[0].tsize ssize = op.opers[1].tsize if dsize > ssize: src = e_bits.sign_extend(src, ssize, dsize) ssize = dsize udst = e_bits.unsigned(dst, dsize) usrc = e_bits.unsigned(src, ssize) sdst = e_bits.signed(dst, dsize) ssrc = e_bits.signed(src, ssize) ures = udst + usrc sres = sdst + ssrc self.setFlag(EFLAGS_CF, e_bits.is_unsigned_carry(ures, dsize)) self.setFlag(EFLAGS_PF, e_bits.is_parity_byte(ures)) self.setFlag(EFLAGS_AF, e_bits.is_aux_carry(src, dst)) self.setFlag(EFLAGS_ZF, not ures) self.setFlag(EFLAGS_SF, e_bits.is_signed(ures, dsize)) self.setFlag(EFLAGS_OF, e_bits.is_signed_overflow(sres, dsize)) self.setOperValue(op, 0, ures)
def i_adc(self, op): dst = self.getOperValue(op, 0) src = self.getOperValue(op, 1) # PDE if dst == None or src == None: self.undefFlags() self.setOperValue(op, 0, None) return cf = 0 if self.getFlag(EFLAGS_CF): cf = 1 dstsize = op.opers[0].tsize srcsize = op.opers[1].tsize if (isinstance(op.opers[1], i386ImmOper) and srcsize < dstsize): src = e_bits.sign_extend(src, srcsize, dstsize) srcsize = dstsize #FIXME perhaps unify the add/adc flags/arith code res = dst + src + cf tsize = op.opers[0].tsize self.setFlag(EFLAGS_CF, e_bits.is_unsigned_carry(res, tsize)) self.setFlag(EFLAGS_PF, e_bits.is_parity_byte(res)) self.setFlag(EFLAGS_AF, e_bits.is_aux_carry(src, dst)) self.setFlag(EFLAGS_ZF, not res) self.setFlag(EFLAGS_SF, e_bits.is_signed(res, tsize)) self.setFlag(EFLAGS_OF, e_bits.is_signed_overflow(res, tsize)) self.setOperValue(op, 0, res)
def i_adc(self, op): dst = self.getOperValue(op, 0) src = self.getOperValue(op, 1) cf = 0 if self.getFlag(EFLAGS_CF): cf = 1 dsize = op.opers[0].tsize ssize = op.opers[1].tsize sdst = e_bits.signed(dst, dsize) ssrc = e_bits.signed(src, ssize) if (isinstance(op.opers[1], i386ImmOper) and ssize < dsize): src = e_bits.sign_extend(src, ssize, dsize) ssize = dsize #FIXME perhaps unify the add/adc flags/arith code res = dst + src + cf sres = sdst + ssrc + cf tsize = op.opers[0].tsize self.setFlag(EFLAGS_CF, e_bits.is_unsigned_carry(res, tsize)) self.setFlag(EFLAGS_PF, e_bits.is_parity_byte(res)) self.setFlag(EFLAGS_AF, e_bits.is_aux_carry(src, dst)) self.setFlag(EFLAGS_ZF, not res) self.setFlag(EFLAGS_SF, e_bits.is_signed(res, tsize)) self.setFlag(EFLAGS_OF, e_bits.is_signed_overflow(sres, dsize)) self.setOperValue(op, 0, res)
def i_shld(self, op): dsize = op.opers[0].tsize bsize = dsize * 8 dst = self.getOperValue(op, 0) src = self.getOperValue(op, 1) cnt = self.getOperValue(op, 2) cnt &= 0x1f # Reg gets masked down if cnt == 0: return if cnt > bsize: return res = dst << cnt res |= src >> (bsize - cnt) ret = e_bits.unsigned(res, dsize) if cnt == 1: # Set OF on sign change dsign = e_bits.is_signed(dst, dsize) rsign = e_bits.is_signed(ret, dsize) self.setFlag(EFLAGS_OF, dsign != rsign) # set carry to last shifted bit self.setFlag(EFLAGS_CF, (dst << (cnt-1)) & 1) self.setFlag(EFLAGS_SF, e_bits.is_signed(ret, dsize)) self.setFlag(EFLAGS_ZF, not ret) self.setFlag(EFLAGS_PF, e_bits.is_parity_byte(ret)) self.setOperValue(op, 0, ret)
def i_shr(self, op): dsize = op.opers[0].tsize dst = self.getOperValue(op, 0) src = self.getOperValue(op, 1) src = src & 0x1f # According to intel manual, if src == 0 eflags are not changed if src == 0: return res = dst >> src cf = (dst >> (src - 1)) & 1 res = e_bits.unsigned(res, dsize) self.setFlag(EFLAGS_CF, cf) self.setFlag(EFLAGS_SF, e_bits.is_signed(res, dsize)) self.setFlag(EFLAGS_ZF, not res) self.setFlag(EFLAGS_PF, e_bits.is_parity_byte(res)) if src == 1: self.setFlag(EFLAGS_OF, False) else: self.setFlag(EFLAGS_OF, 0) # Undefined, but zero'd on core2 duo self.setOperValue(op, 0, res)
def i_add(self, op): dst = self.getOperValue(op, 0) src = self.getOperValue(op, 1) dsize = op.opers[0].tsize ssize = op.opers[1].tsize #FIXME PDE and flags if dst == None or src == None: self.undefFlags() self.setOperValue(op, 0, None) return if dsize > ssize: src = e_bits.sign_extend(src, ssize, dsize) ssize = dsize udst = e_bits.unsigned(dst, dsize) usrc = e_bits.unsigned(src, ssize) sdst = e_bits.signed(dst, dsize) ssrc = e_bits.signed(src, ssize) ures = udst + usrc sres = sdst + ssrc self.setFlag(EFLAGS_CF, e_bits.is_unsigned_carry(ures, dsize)) self.setFlag(EFLAGS_PF, e_bits.is_parity_byte(ures)) self.setFlag(EFLAGS_AF, e_bits.is_aux_carry(src, dst)) self.setFlag(EFLAGS_ZF, not ures) self.setFlag(EFLAGS_SF, e_bits.is_signed(ures, dsize)) self.setFlag(EFLAGS_OF, e_bits.is_signed_overflow(sres, dsize)) self.setOperValue(op, 0, ures)
def logicalAnd(self, op): dst = self.getOperValue(op, 0) src = self.getOperValue(op, 1) # PDE if dst == None or src == None: self.undefFlags() self.setOperValue(op, 0, None) return dsize = op.opers[0].tsize ssize = op.opers[1].tsize # sign-extend an immediate if needed if dsize != ssize: src = e_bits.sign_extend(src, ssize, dsize) ssize = dsize # Make sure everybody's on the same bit page. dst = e_bits.unsigned(dst, dsize) src = e_bits.unsigned(src, ssize) res = src & dst self.setFlag(EFLAGS_AF, 0) # AF is undefined, but it seems like it is zeroed self.setFlag(EFLAGS_OF, 0) self.setFlag(EFLAGS_CF, 0) self.setFlag(EFLAGS_SF, e_bits.is_signed(res, dsize)) self.setFlag(EFLAGS_ZF, not res) self.setFlag(EFLAGS_PF, e_bits.is_parity_byte(res)) return res
def i_shld(self, op): dsize = op.opers[0].tsize bsize = dsize * 8 dst = self.getOperValue(op, 0) src = self.getOperValue(op, 1) cnt = self.getOperValue(op, 2) cnt &= 0x1f # Reg gets masked down if cnt == 0: return if cnt > bsize: return res = dst << cnt res |= src >> (bsize - cnt) ret = e_bits.unsigned(res, dsize) if cnt == 1: # Set OF on sign change dsign = e_bits.is_signed(dst, dsize) rsign = e_bits.is_signed(ret, dsize) self.setFlag(EFLAGS_OF, dsign != rsign) # set carry to last shifted bit self.setFlag(EFLAGS_CF, (dst << (cnt - 1)) & 1) self.setFlag(EFLAGS_SF, e_bits.is_signed(ret, dsize)) self.setFlag(EFLAGS_ZF, not ret) self.setFlag(EFLAGS_PF, e_bits.is_parity_byte(ret)) self.setOperValue(op, 0, ret)
def i_sar(self, op): dsize = op.opers[0].tsize dst = self.getOperValue(op, 0) src = self.getOperValue(op, 1) src = src & 0x1f # According to intel manual, if src == 0 eflags are not changed if src == 0: return signed = e_bits.msb(dst, dsize) res = dst >> src cf = (dst >> (src - 1)) & 1 # If it was signed, we need to fill in all those bits we # shifted off with ones. if signed: x = (8 * dsize) - src umax = e_bits.u_maxes[dsize] res |= (umax >> x) << x res = e_bits.unsigned(res, dsize) self.setFlag(EFLAGS_CF, cf) self.setFlag(EFLAGS_SF, e_bits.is_signed(res, dsize)) self.setFlag(EFLAGS_ZF, not res) self.setFlag(EFLAGS_PF, e_bits.is_parity_byte(res)) if src == 1: self.setFlag(EFLAGS_OF, False) else: self.setFlag(EFLAGS_OF, 0) # Undefined, but zero'd on core2 duo self.setOperValue(op, 0, res)
def logicalAnd(self, op): dst = self.getOperValue(op, 0) src = self.getOperValue(op, 1) dsize = op.opers[0].tsize ssize = op.opers[1].tsize # sign-extend an immediate if needed if dsize != ssize: src = e_bits.sign_extend(src, ssize, dsize) ssize = dsize # Make sure everybody's on the same bit page. dst = e_bits.unsigned(dst, dsize) src = e_bits.unsigned(src, ssize) res = src & dst self.setFlag(EFLAGS_AF, 0) # AF is undefined, but it seems like it is zeroed self.setFlag(EFLAGS_OF, 0) self.setFlag(EFLAGS_CF, 0) self.setFlag(EFLAGS_SF, e_bits.is_signed(res, dsize)) self.setFlag(EFLAGS_ZF, not res) self.setFlag(EFLAGS_PF, e_bits.is_parity_byte(res)) return res
def i_shr(self, op): dsize = op.opers[0].tsize dst = self.getOperValue(op, 0) src = self.getOperValue(op, 1) src = src & 0x1f # According to intel manual, if src == 0 eflags are not changed if src == 0: return res = dst >> src cf = (dst >> (src-1)) & 1 res = e_bits.unsigned(res, dsize) self.setFlag(EFLAGS_CF, cf) self.setFlag(EFLAGS_SF, e_bits.is_signed(res, dsize)) self.setFlag(EFLAGS_ZF, not res) self.setFlag(EFLAGS_PF, e_bits.is_parity_byte(res)) if src == 1: self.setFlag(EFLAGS_OF, False) else: self.setFlag(EFLAGS_OF, 0) # Undefined, but zero'd on core2 duo self.setOperValue(op, 0, res)
def i_sar(self, op): dsize = op.opers[0].tsize dst = self.getOperValue(op, 0) src = self.getOperValue(op, 1) src = src & 0x1f # According to intel manual, if src == 0 eflags are not changed if src == 0: return signed = e_bits.msb(dst, dsize) res = dst >> src cf = (dst >> (src-1)) & 1 # If it was signed, we need to fill in all those bits we # shifted off with ones. if signed: x = (8*dsize) - src umax = e_bits.u_maxes[dsize] res |= (umax >> x) << x res = e_bits.unsigned(res, dsize) self.setFlag(EFLAGS_CF, cf) self.setFlag(EFLAGS_SF, e_bits.is_signed(res, dsize)) self.setFlag(EFLAGS_ZF, not res) self.setFlag(EFLAGS_PF, e_bits.is_parity_byte(res)) if src == 1: self.setFlag(EFLAGS_OF, False) else: self.setFlag(EFLAGS_OF, 0) # Undefined, but zero'd on core2 duo self.setOperValue(op, 0, res)
def i_dec(self, op): val = self.getOperValue(op, 0) if val == None: self.undefFlags() return val -= 1 self.setOperValue(op, 0, val) #FIXME change over to integer subtraction self.setFlag(EFLAGS_OF, 0) #FIXME OF self.setFlag(EFLAGS_SF, e_bits.is_signed(val, op.opers[0].tsize)) self.setFlag(EFLAGS_ZF, not val) self.setFlag(EFLAGS_AF, 0) #FIXME AF... self.setFlag(EFLAGS_PF, e_bits.is_parity_byte(val))
def i_inc(self, op): size = op.opers[0].tsize val = self.getOperValue(op, 0) sval = e_bits.signed(val, size) sval += 1 self.setOperValue(op, 0, sval) # Another arithmetic op where doing signed and unsigned is easier ;) self.setFlag(EFLAGS_OF, e_bits.is_signed_overflow(sval, size)) self.setFlag(EFLAGS_SF, e_bits.is_signed(sval, size)) self.setFlag(EFLAGS_ZF, not sval) self.setFlag(EFLAGS_AF, (sval & 0xf == 0)) self.setFlag(EFLAGS_PF, e_bits.is_parity_byte(sval))
def i_xor(self, op): dsize = op.opers[0].tsize ssize = op.opers[1].tsize dst = self.getOperValue(op, 0) src = self.getOperValue(op, 1) ret = src ^ dst self.setOperValue(op, 0, ret) self.setFlag(EFLAGS_CF, 0) self.setFlag(EFLAGS_OF, 0) self.setFlag(EFLAGS_SF, e_bits.is_signed(ret, dsize)) self.setFlag(EFLAGS_ZF, not ret) self.setFlag(EFLAGS_PF, e_bits.is_parity_byte(ret)) self.setFlag(EFLAGS_AF, False) # Undefined but actually cleared on amd64 X2
def i_or(self, op): dst = self.getOperValue(op, 0) dsize = op.opers[0].tsize src = self.getOperValue(op, 1) ssize = op.opers[1].tsize if dsize != ssize: src = e_bits.sign_extend(src, ssize, dsize) ssize = dsize res = dst | src self.setOperValue(op, 0, res) self.setFlag(EFLAGS_OF, 0) self.setFlag(EFLAGS_CF, 0) self.setFlag(EFLAGS_SF, e_bits.is_signed(res, dsize)) self.setFlag(EFLAGS_ZF, not res) self.setFlag(EFLAGS_PF, e_bits.is_parity_byte(res))
def i_xor(self, op): # NOTE: This is pre-emptive for partially defined emulation dsize = op.opers[0].tsize ssize = op.opers[1].tsize if op.opers[0] == op.opers[1]: ret = 0 else: dst = self.getOperValue(op, 0) src = self.getOperValue(op, 1) if dsize != ssize: src = e_bits.sign_extend(src, ssize, dsize) ssize = dsize ret = src ^ dst self.setOperValue(op, 0, ret) self.setFlag(EFLAGS_CF, 0) self.setFlag(EFLAGS_OF, 0) self.setFlag(EFLAGS_SF, e_bits.is_signed(ret, dsize)) self.setFlag(EFLAGS_ZF, not ret) self.setFlag(EFLAGS_PF, e_bits.is_parity_byte(ret)) self.setFlag(EFLAGS_AF, False) # Undefined but actually cleared on amd64 X2
def intSubBase(self, src, dst, ssize, dsize): usrc = e_bits.unsigned(src, ssize) udst = e_bits.unsigned(dst, dsize) ssrc = e_bits.signed(src, ssize) sdst = e_bits.signed(dst, dsize) ures = udst - usrc sres = sdst - ssrc #print "dsize/ssize: %d %d" % (dsize, ssize) #print "unsigned: %d %d %d" % (usrc, udst, ures) #print "signed: %d %d %d" % (ssrc, sdst, sres) self.setFlag(EFLAGS_OF, e_bits.is_signed_overflow(sres, dsize)) self.setFlag(EFLAGS_AF, e_bits.is_aux_carry(usrc, udst)) self.setFlag(EFLAGS_CF, e_bits.is_unsigned_carry(ures, dsize)) self.setFlag(EFLAGS_SF, e_bits.is_signed(ures, dsize)) self.setFlag(EFLAGS_ZF, not sres) self.setFlag(EFLAGS_PF, e_bits.is_parity_byte(ures)) return ures
def intSubBase(self, src, dst, ssize, dsize): usrc = e_bits.unsigned(src, ssize) udst = e_bits.unsigned(dst, dsize) ssrc = e_bits.signed(src, ssize) sdst = e_bits.signed(dst, dsize) ures = udst - usrc sres = sdst - ssrc #print "dsize/ssize: %d %d" % (dsize, ssize) #print "unsigned: %d %d %d" % (usrc, udst, ures) #print "signed: %d %d %d" % (ssrc, sdst, sres) self.setFlag(EFLAGS_OF, e_bits.is_signed_overflow(sres, dsize)) self.setFlag(EFLAGS_AF, e_bits.is_aux_carry_sub(usrc, udst)) self.setFlag(EFLAGS_CF, e_bits.is_unsigned_carry(ures, dsize)) self.setFlag(EFLAGS_SF, e_bits.is_signed(ures, dsize)) self.setFlag(EFLAGS_ZF, not sres) self.setFlag(EFLAGS_PF, e_bits.is_parity_byte(ures)) return ures
def i_shrd(self, op): dsize = op.opers[0].tsize bsize = dsize * 8 dst = self.getOperValue(op, 0) src = self.getOperValue(op, 1) cnt = self.getOperValue(op, 2) cnt &= 0x1f # Reg gets masked down if cnt == 0: return if cnt > bsize: # result is "undfined" return res = dst >> cnt res |= src << (bsize - cnt) # We now have the bits masked into res, but it has become # wider than the original operand. # Ret is masked down to size ret = e_bits.unsigned(res, dsize) if cnt == 1: # Set OF on sign change dsign = e_bits.is_signed(dst, dsize) rsign = e_bits.is_signed(ret, dsize) self.setFlag(EFLAGS_OF, dsign != rsign) # set carry to last shifted bit self.setFlag(EFLAGS_CF, (res << bsize) & 1) self.setFlag(EFLAGS_SF, e_bits.is_signed(ret, dsize)) self.setFlag(EFLAGS_ZF, not ret) self.setFlag(EFLAGS_PF, e_bits.is_parity_byte(ret)) self.setOperValue(op, 0, ret)