Esempio n. 1
0
def x86_movs(ctx, i, size):
    # This is to handle the mnemonic overload (SSE movsd) for 'move scalar
    # double-precision floating-point value' since capstone doesn't
    # distinguish. That instruction is just a mov into/from the SSE
    # registers.
    if not operand.is_memory(ctx, i, 0) or not operand.is_memory(ctx, i, 1):
      # so basically, if one of the operands is not a memory address, then we
      # know that this is the SSE version, which x86_mov can handle.
      return x86_mov(ctx, i)

    value = ctx.tmp(size)

    if i.mnemonic.startswith('rep'):
        rep_prologue(ctx, i)

    ctx.emit(  ldm_  (ctx.source, value))
    ctx.emit(  stm_  (value, ctx.destination))
    ctx.emit(  jcc_  (r('df', 8), 'decrement'))
    ctx.emit('increment')
    ctx.emit(  add_  (ctx.destination, imm(value.size // 8, ctx.word_size), ctx.destination))
    ctx.emit(  add_  (ctx.source, imm(value.size // 8, ctx.word_size), ctx.source))
    ctx.emit(  jcc_  (imm(1, 8), 'done'))
    ctx.emit('decrement')
    ctx.emit(  sub_  (ctx.destination, imm(value.size // 8, ctx.word_size), ctx.destination))
    ctx.emit(  sub_  (ctx.source, imm(value.size // 8, ctx.word_size), ctx.source))
    ctx.emit('done')
    ctx.emit(  nop_  ())

    if i.mnemonic.startswith('rep'):
        rep_epilogue(ctx, i)
Esempio n. 2
0
def _write_bit(ctx, i, base_index, offset_index, bit):
    if operand.is_memory(ctx, i, base_index):
        # nasty case, indexing into in-memory bitstring; offset can be
        # > word_size

        base = operand.get_address(ctx, i, base_index)
        offset = operand.get(ctx, i, offset_index)
        offset_sign = ctx.tmp(8)
        byte_offset = ctx.tmp(base.size)
        tmp0 = ctx.tmp(offset.size)
        byte = ctx.tmp(8)
        bitmask = ctx.tmp(8)

        ctx.emit(and_(offset, imm(sign_bit(offset.size), offset.size), tmp0))
        ctx.emit(bisnz_(tmp0, offset_sign))
        ctx.emit(and_(offset, imm(~sign_bit(offset.size), offset.size),
                      offset))
        ctx.emit(div_(offset, imm(8, offset.size), byte_offset))
        ctx.emit(mod_(offset, imm(8, offset.size), offset))

        ctx.emit(jcc_(offset_sign, 'negative_offset'))
        ctx.emit(add_(base, byte_offset, base))
        ctx.emit(jcc_(imm(1, 8), 'base_calculated'))

        ctx.emit('negative_offset')
        ctx.emit(sub_(base, byte_offset, base))

        ctx.emit('base_calculated')
        ctx.emit(ldm_(base, byte))
        ctx.emit(lshl_(imm(1, 8), offset, bitmask))
        ctx.emit(xor_(bitmask, imm(mask(8), 8), bitmask))
        ctx.emit(and_(byte, bitmask, byte))
        ctx.emit(lshl_(bit, offset, bitmask))
        ctx.emit(or_(byte, bit, byte))
        ctx.emit(stm_(byte, base))

    else:
        # simple case, it's a register
        a = operand.get(ctx, i, base_index)
        offset = operand.get(ctx, i, offset_index)
        bitmask = ctx.tmp(a.size)
        tmp0 = ctx.tmp(a.size)
        tmp1 = ctx.tmp(a.size)

        ctx.emit(lshl_(imm(1, a.size), offset, bitmask))
        ctx.emit(xor_(bitmask, imm(mask(a.size), a.size), bitmask))
        ctx.emit(and_(a, bitmask, tmp0))
        ctx.emit(str_(bit, tmp1))
        ctx.emit(lshl_(tmp1, offset, tmp1))
        ctx.emit(or_(tmp0, tmp1, tmp1))

        operand.set(ctx, i, base_index, tmp1)
Esempio n. 3
0
def _write_bit(ctx, i, base_index, offset_index, bit):
    if operand.is_memory(ctx, i, base_index):
        # nasty case, indexing into in-memory bitstring; offset can be
        # > word_size

        base = operand.get_address(ctx, i, base_index)
        offset = operand.get(ctx, i, offset_index)
        offset_sign = ctx.tmp(8)
        byte_offset = ctx.tmp(base.size)
        tmp0 = ctx.tmp(offset.size)
        byte = ctx.tmp(8)
        bitmask = ctx.tmp(8)

        ctx.emit(  and_  (offset, imm(sign_bit(offset.size), offset.size), tmp0))
        ctx.emit(  bisnz_(tmp0, offset_sign))
        ctx.emit(  and_  (offset, imm(~sign_bit(offset.size), offset.size), offset))
        ctx.emit(  div_  (offset, imm(8, offset.size), byte_offset))
        ctx.emit(  mod_  (offset, imm(8, offset.size), offset))

        ctx.emit(  jcc_  (offset_sign, 'negative_offset'))
        ctx.emit(  add_  (base, byte_offset, base))
        ctx.emit(  jcc_  (imm(1, 8), 'base_calculated'))

        ctx.emit('negative_offset')
        ctx.emit(  sub_  (base, byte_offset, base))

        ctx.emit('base_calculated')
        ctx.emit(  ldm_  (base, byte))
        ctx.emit(  lshl_ (imm(1, 8), offset, bitmask))
        ctx.emit(  xor_  (bitmask, imm(mask(8), 8), bitmask))
        ctx.emit(  and_  (byte, bitmask, byte))
        ctx.emit(  lshl_ (bit, offset, bitmask))
        ctx.emit(  or_   (byte, bit, byte))
        ctx.emit(  stm_  (byte, base))

    else:
        # simple case, it's a register
        a = operand.get(ctx, i, base_index)
        offset = operand.get(ctx, i, offset_index)
        bitmask = ctx.tmp(a.size)
        tmp0 = ctx.tmp(a.size)
        tmp1 = ctx.tmp(a.size)

        ctx.emit(  lshl_ (imm(1, a.size), offset, bitmask))
        ctx.emit(  xor_  (bitmask, imm(mask(a.size), a.size), bitmask))
        ctx.emit(  and_  (a, bitmask, tmp0))
        ctx.emit(  str_  (bit, tmp1))
        ctx.emit(  lshl_ (tmp1, offset, tmp1))
        ctx.emit(  or_   (tmp0, tmp1, tmp1))

        operand.set(ctx, i, base_index, tmp1)