Ejemplo n.º 1
0
 def translate_uncond(self,ins,mapping):
   op = ins.operands[0] #Get operand
   if op.type == X86_OP_REG: # e.g. call eax or jmp ebx
     target = ins.reg_name(op.reg)
     return self.get_indirect_uncond_code(ins,mapping,target)
   elif op.type == X86_OP_MEM: # e.g. call [eax + ecx*4 + 0xcafebabe] or jmp [ebx+ecx]
     target = ins.op_str
     return self.get_indirect_uncond_code(ins,mapping,target)
   elif op.type == X86_OP_IMM: # e.g. call 0xdeadbeef or jmp 0xcafebada
     target = op.imm
     code = b''
     inserted = self.before_inst_callback(ins)
     if inserted is not None:
       code += inserted
     # Again, there is no thunk special case for 64-bit code
     if self.context.no_pic: # and target != self.context.get_pc_thunk:
       #push nothing if no_pic UNLESS it's the thunk
       #We only support DIRECT calls to the thunk
       if ins.mnemonic == 'call':
         self.context.stat['dircall']+=1
       else:
         self.context.stat['dirjmp']+=1
     elif ins.mnemonic == 'call': #If it's a call, push the original address of the next instruction
       self.context.stat['dircall']+=1
       exec_call = '''
       push %s
       '''
       so_call = '''
       push rbx
       lea rbx,[rip - 0x%x]
       xchg rbx,[rsp]
       '''
       if self.context.write_so:
         if mapping is not None:
           # 8 is the length of push rbx;lea rbx,[rip-%s]
           code += asm(so_call%( (self.context.newbase+(mapping[ins.address]+8)) - (ins.address+len(ins.bytes)) ) )
         else:
           code += asm(so_call%( (self.context.newbase) - (ins.address+len(ins.bytes)) ) )
       else:
         code += asm(exec_call%(ins.address+len(ins.bytes)))
     else:
       self.context.stat['dirjmp']+=1
     newtarget = self.remap_target(ins.address,mapping,target,len(code))
     #print "(pre)new length: %s"%len(callback_code)
     #print "target: %s"%hex(target)
     #print "newtarget: %s"%newtarget
     # Again, there is no thunk special case for 64-bit code
     if self.context.no_pic: # and target != self.context.get_pc_thunk:
       code += asm( '%s $+%s'%(ins.mnemonic,newtarget) )
     else:
       patched = asm('jmp $+%s'%newtarget)
       if len(patched) == 2: #Short encoding, which we do not want
         patched+='\x90\x90\x90' #Add padding of 3 NOPs
       code += patched
     #print "new length: %s"%len(callback_code+patched)
     return code
   return None
Ejemplo n.º 2
0
  def translate_one(self,ins,mapping):
    if ins.mnemonic in ['call','jmp']: #Unconditional jump
      return self.translate_uncond(ins,mapping)
    elif ins.mnemonic in self.JCC: #Conditional jump
      return self.translate_cond(ins,mapping)
    elif ins.mnemonic == 'ret':
      return self.translate_ret(ins,mapping)
    elif ins.mnemonic in ['retn','retf','repz']: #I think retn is not used in Capstone
      #print 'WARNING: unimplemented %s %s'%(ins.mnemonic,ins.op_str)
      return '\xf4\xf4\xf4\xf4' #Create obvious cluster of hlt instructions
    else: #Any other instruction
      inserted = self.before_inst_callback(ins)
      #Even for non-control-flow instructions, we need to replace all references to rip
      #with the address pointing directly after the instruction.
      #TODO: This will NOT work for shared libraries or any PIC, because it depends on
      #knowing the static instruction address.  For all shared objects, we would need to
      #subtract off the offset between the original and new text; as long as the offset is
      #fixed, then we should be able to just precompute that offset, without it being affected
      #by the position of the .so code
      #TODO: abandon rewriting ljmp instructions for now because the assembler doesn't like them
      #and we haven't been rewriting their destinations anyway; if they *are* used, they were already
      #broken before this 
      #TODO: I have also abandoned rewriting the following instructions because I can't get it to
      #re-assemble with the current assembler:
      #  fstp
      #  fldenv
      #  fld
      #TODO: Since I am now doing a crazy optimization in which I use the original instruction's bytes
      #and only change the last 4 bytes (the offset), I should actually be able to support these incompatible
      #instructions by saving their original bytes in the assembler cache and therefore never actually sending
      #the disassembled instruction to the assembler at all.
      incompatible = ['ljmp', 'fstp', 'fldenv', 'fld', 'fbld']
      if 'rip' in ins.op_str:# and (ins.mnemonic not in incompatible):
        '''asm1 = asm( '%s %s' % (ins.mnemonic, self.replace_rip(ins,mapping) ) )
        asm2 = asm( '%s %s' % (ins.mnemonic, self.replace_rip(ins,None) ) )
        if len(asm1) != len(asm2):
          print '%s %s @ 0x%x LENGTH FAIL1: %s vs %s' % (ins.mnemonic, ins.op_str, ins.address, str(asm1).encode('hex'), str(asm2).encode('hex') )
          newone = len( asm( '%s %s' % (ins.mnemonic, self.replace_rip(ins,mapping) ) ) )
          oldone = len( asm( '%s %s' % (ins.mnemonic, self.replace_rip(ins,None) ) ) )
          print '%d vs %d, %d vs %d' % (newone,oldone,len(asm1),len(asm2))'''
        code = b''
        if inserted is not None:
          code = asm( '%s %s' % (ins.mnemonic, self.replace_rip(ins,mapping,len(inserted) + len(ins.bytes) ) ) )
          code = inserted + code
        else:
          code = asm( '%s %s' % (ins.mnemonic, self.replace_rip(ins,mapping,len(ins.bytes) ) ) )
        return code
      else:
	'''if 'rip' in ins.op_str and (ins.mnemonic in incompatible):
          print 'NOT rewriting %s instruction with rip: %s %s' % (ins.mnemonic,ins.mnemonic,ins.op_str) 
        if ins.mnemonic == 'ljmp':
          print 'WARNING: unhandled %s %s @ %x'%(ins.mnemonic,ins.op_str,ins.address)'''
        if inserted is not None:
          return inserted + str(ins.bytes)
      return None #No translation needs to be done
Ejemplo n.º 3
0
 def translate_ret(self,ins,mapping):
   '''
   mov [esp-28], eax	;save old eax value
   pop eax		;pop address from stack from which we will get destination
   call $+%s		;call lookup function
   mov [esp-4], eax	;save new eax value (destination mapping)
   mov eax, [esp-32]	;restore old eax value (the pop has shifted our stack so we must look at 28+4=32)
   jmp [esp-4]		;jmp/call to new address
   '''
   template_before = '''
   mov [rsp-56], rax
   pop rax
   '''
   template_after = '''
   call $+%s
   %s
   mov [rsp-8], rax
   mov rax, [rsp-%d]
   jmp [rsp-8]
   '''
   self.context.stat['ret']+=1
   code = b''
   inserted = self.before_inst_callback(ins)
   if inserted is not None:
     code += inserted
   # Since thunks do not need to be used for 64-bit code, there is no specific
   # place we need to treat as a special case.  It is unlikely that code will
   # try to use the pushed return address to obtain the instruction pointer 
   # (after all, it can just access it directly!), but should it TRY to do this,
   # the program will crash!  Thus the no_pic optimization is a heuristic that
   # won't work for some code (in this case only very unusual code?)
   if self.context.no_pic: # and ins.address != self.context.get_pc_thunk + 3:
     #Perform a normal return UNLESS this is the ret for the thunk.
     #Currently its position is hardcoded as three bytes after the thunk entry.
     code = asm( 'ret %s'%ins.op_str )
   else:
     code = asm(template_before)
     size = len(code)
     lookup_target = b''
     if self.context.exec_only:
       #Special lookup for not rewriting arguments when going outside new main text address space
       lookup_target = self.remap_target(ins.address,mapping,self.context.secondary_lookup_function_offset,size)
     else:
       lookup_target = self.remap_target(ins.address,mapping,self.context.lookup_function_offset,size)
     if ins.op_str == '':
       code+=asm(template_after%(lookup_target,'',64)) #64 because of the value we popped
     else: #For ret instructions that pop imm16 bytes from the stack, add that many bytes to esp
       pop_amt = int(ins.op_str,16) #We need to retrieve the right eax value from where we saved it
       code+=asm(template_after%(lookup_target,'add rsp,%d'%pop_amt,64+pop_amt))
   return code
Ejemplo n.º 4
0
 def get_popgm_code(self):
     #pushad and popad do NOT exist in x64,
     #so we must choose which registers must be preserved at program start
     #TODO: For now we skip actually calling popgm, because it will have to be
     #completely re-engineered, so we will need to change the offset to 0x11
     #once we have fixed popgm for x64
     call_popgm = '''
 push rax
 push rcx
 push rdx
 push rbx
 push rbp
 push rsi
 push rdi
 mov rdi, %s
 call $+0x0d
 pop rdi
 pop rsi
 pop rbp
 pop rbx
 pop rdx
 pop rcx
 pop rax
 ret
 '''
     popgmbytes = asm(call_popgm % (self.context.global_sysinfo + 8))
     with open('x64_%s' % self.context.popgm) as f:
         popgmbytes += f.read()
     return popgmbytes
Ejemplo n.º 5
0
 def translate_cond(self,ins,mapping):
   self.context.stat['jcc']+=1
   patched = b''
   inserted = self.before_inst_callback(ins)
   if inserted is not None:
     patched += inserted
   if ins.mnemonic in ['jrcxz','jecxz']: #These instructions have no long encoding (and jcxz is not allowed in 64-bit)
     jrcxz_template = '''
     test rcx,rcx
     '''
     jecxz_template = '''
     test ecx,ecx
     '''
     target = ins.operands[0].imm # int(ins.op_str,16) The destination of this instruction
     #newtarget = remap_target(ins.address,mapping,target,0)
     if ins.mnemonic == 'jrcxz':
       patched+=asm(jrcxz_template)
     else:
       patched+=asm(jecxz_template)
     newtarget = self.remap_target(ins.address,mapping,target,len(patched))
     #print 'want %s, but have %s instead'%(remap_target(ins.address,mapping,target,len(patched)), newtarget)
     #Apparently the offset for jcxz and jecxz instructions may have been wrong?  How did it work before?
     patched += asm('jz $+%s'%newtarget)
     #print 'code length: %d'%len(patched)
     
     #TODO: some instructions encode to 6 bytes, some to 5, some to 2.  How do we know which?
     #For example, for CALL, it seems to only be 5 or 2 depending on offset.
     #But for jg, it can be 2 or 6 depending on offset, I think because it has a 2-byte opcode.
     #while len(patched) < 6: #Short encoding, which we do not want
     #  patched+='\x90' #Add padding of NOPs
     #The previous commented out code wouldn't even WORK now, since we insert another instruction
     #at the MINIMUM.  I'm amazed the jcxz/jecxz code even worked at all before
   else:
     target = ins.operands[0].imm # int(ins.op_str,16) The destination of this instruction
     newtarget = self.remap_target(ins.address,mapping,target,len(patched))
     patched+=asm(ins.mnemonic + ' $+' + newtarget)
     #TODO: some instructions encode to 6 bytes, some to 5, some to 2.  How do we know which?
     #For example, for CALL, it seems to only be 5 or 2 depending on offset.
     #But for jg, it can be 2 or 6 depending on offset, I think because it has a 2-byte opcode.
     #while len(patched) < 6: #Short encoding, which we do not want
     #  patched+='\x90' #Add padding of NOPs
   return patched
Ejemplo n.º 6
0
 def get_callback_code(self,address,mapping,cbargs):
   '''Remaps each callback argument based on index.  cbargs is an array of argument indices
      that let us know which argument (a register in x64) we must rewrite.
      We insert code for each we must rewrite.'''
   arg_registers = ['rdi','rsi','rdx','rcx','r8','r9'] #Order of arguments in x86-64
   callback_template_before = '''
   mov rax, %s
   '''
   callback_template_after = '''
   call $+%s
   mov %s, rax
   '''
   code = asm('push rax') #Save rax, use to hold callback address
   for ind in cbargs:
     #Move value in register for that argument to rax
     cb_before = callback_template_before%( arg_registers[ind] )
     code += asm(cb_before) #Assemble this part first so we will know the offset to the lookup function
     size = len(code)
     #Use secondary lookup function so it won't try to rewrite arguments if the callback is outside the main binary
     lookup_target = self.remap_target( address, mapping, self.context.secondary_lookup_function_offset, size )
     cb_after = callback_template_after%( lookup_target, arg_registers[ind] )
     code += asm(cb_after) #Save the new address over the original
   code += asm('pop rax') #Restore rax
   return code
Ejemplo n.º 7
0
 def get_indirect_uncond_code(self,ins,mapping,target):
   #Commented assembly
   '''
   mov [esp-28], eax	;save old eax value (very far above the stack because of future push/call)
   mov eax, %s		;read location in memory from which we will get destination
   %s			;if a call, we push return address here
   call $+%s		;call lookup function
   mov [esp-4], eax	;save new eax value (destination mapping)
   mov eax, [esp-%s]	;restore old eax value (offset depends on whether return address pushed)
   jmp [esp-4]		;jmp to new address
   '''
   #If the argument is an offset from rip, then we must change the reference to rip.  Any rip-relative
   #addressing is destroyed because all the offsets are completely different; we need the 
   #original address that rip WOULD have pointed to, so we must replace any references to it.
   template_before = '''
   mov [rsp-64], rax
   mov rax, %s
   %s
   '''
   exec_call = '''
   push %s
   '''
   so_call_before = '''
   push rbx
   '''
   so_call_after = '''
   lea rbx,[rip - 0x%x]
   xchg rbx,[rsp]
   '''
   template_after = '''
   call $+%s
   mov [rsp-8], rax
   mov rax, [rsp-%s]
   jmp [rsp-8]
   '''
   template_nopic = '''
   call $+%s
   mov [rsp-8], rax
   mov rax, [rsp-%s]
   %s [rsp-8]
   '''
   #TODO: This is somehow still the bottleneck, so this needs to be optimized
   code = b''
   if self.context.exec_only:
     code += self.get_remap_callbacks_code(ins,mapping,target)
   #NOTE: user instrumentation code comes after callbacks code.  No particular reason to put it either way,
   #other than perhaps consistency, but for now this is easier.
   inserted = self.before_inst_callback(ins)
   if inserted is not None:
     code += inserted
   #Replace references to rip with the original address after this instruction so that we
   #can look up the new address using the original
   if 'rip' in target:
     '''if len( asm( '%s %s' % (ins.mnemonic, self.replace_rip(ins,mapping) ) ) ) != len( asm( '%s %s' % (ins.mnemonic, self.replace_rip(ins,None) ) ) ):
       print '%s %s @ 0x%x LENGTH FAIL2: %s vs %s' % (ins.mnemonic, ins.op_str, ins.address, str(asm('%s %s' % (ins.mnemonic, self.replace_rip(ins,mapping) ))).encode('hex'), str(asm('%s %s' % (ins.mnemonic, self.replace_rip(ins,None)) )).encode('hex') )
       newone = len( asm( '%s %s' % (ins.mnemonic, self.replace_rip(ins,mapping) ) ) )
       oldone = len( asm( '%s %s' % (ins.mnemonic, self.replace_rip(ins,None) ) ) )
       print '%d vs %d, %s' % (newone,oldone,newone == oldone)'''
     # The new "instruction length" is the length of all preceding code, plus the instructions up through the one referencing rip
     target = self.replace_rip(ins,mapping,len(code) + len(asm('mov [rsp-64],rax\nmov rax,[rip]')) )
   if self.context.no_pic:
     if ins.mnemonic == 'call':
       self.context.stat['indcall']+=1
     else:
       self.context.stat['indjmp']+=1
     code += asm( template_before%(target,'') )
   elif ins.mnemonic == 'call':
     self.context.stat['indcall']+=1
     if self.context.write_so:
       code += asm( template_before%(target,so_call_before) )
       if mapping is not None:
         # 7 is the length of the lea rbx,[rip-%s] instruction, which needs to be added to the length of the code preceding where we access RIP
         code += asm(so_call_after%( (mapping[ins.address]+len(code)+7+self.context.newbase) - (ins.address+len(ins.bytes)) ) )
       else:
         code += asm(so_call_after%( (0x8f+self.context.newbase) - (ins.address+len(ins.bytes)) ) )
     else:
       code += asm(template_before%(target,exec_call%(ins.address+len(ins.bytes)) ))
   else:
     self.context.stat['indjmp']+=1
     code += asm(template_before%(target,''))
   size = len(code)
   lookup_target = self.remap_target(ins.address,mapping,self.context.lookup_function_offset,size)
   #Always transform an unconditional control transfer to a jmp, but
   #for a call, insert a push instruction to push the original return address on the stack.
   #At runtime, our rewritten ret will look up the right address to return to and jmp there.
   #If we push a value on the stack, we have to store even FURTHER away from the stack.
   #Note that calling the lookup function can move the stack pointer temporarily up to
   #20 bytes, which will obliterate anything stored too close to the stack pointer.  That, plus
   #the return value we push on the stack, means we need to put it at least 28 bytes away.
   if self.context.no_pic:
     #Change target to secondary lookup function instead
     lookup_target = self.remap_target(ins.address,mapping,self.context.secondary_lookup_function_offset,size)
     code += asm( template_nopic%(lookup_target,64,ins.mnemonic) )
   elif ins.mnemonic == 'call':
     code += asm(template_after%(lookup_target,56))
   else:  
     code += asm(template_after%(lookup_target,64))
   return code