def get_graph(self, addr): from capstone import CS_OP_IMM ARCH_UTILS = self.load_arch_module().utils curr = self.lazy_disasm(addr) gph = Graph(self, addr) rest = [] start = time.clock() while 1: if not gph.exists(curr): if ARCH_UTILS.is_uncond_jump(curr) and len(curr.operands) > 0: if curr.operands[0].type == CS_OP_IMM: addr = curr.operands[0].value.imm nxt = self.lazy_disasm(addr) gph.set_next(curr, nxt) rest.append(nxt.address) else: # Can't interpret jmp ADDR|reg gph.add_node(curr) gph.uncond_jumps_set.add(curr.address) elif ARCH_UTILS.is_cond_jump(curr) and len(curr.operands) > 0: if curr.operands[0].type == CS_OP_IMM: nxt_jump = self.lazy_disasm(curr.operands[0].value.imm) direct_nxt = self.lazy_disasm(curr.address + curr.size) gph.set_cond_next(curr, nxt_jump, direct_nxt) rest.append(nxt_jump.address) rest.append(direct_nxt.address) else: # Can't interpret jmp ADDR|reg gph.add_node(curr) gph.cond_jumps_set.add(curr.address) elif ARCH_UTILS.is_ret(curr): gph.add_node(curr) else: try: nxt = self.lazy_disasm(curr.address + curr.size) gph.set_next(curr, nxt) rest.append(nxt.address) except: gph.add_node(curr) pass try: curr = self.lazy_disasm(rest.pop()) except IndexError: break if self.binary.type == T_BIN_PE: self.binary.pe_reverse_stripped_symbols(self) elapsed = time.clock() elapsed = elapsed - start debug__("Graph built in %fs" % elapsed) return gph
def __extract_func(self, addr): curr = self.code[addr] gph = Graph(self, addr) rest = [] while 1: if not gph.exists(curr): if is_uncond_jump(curr) and len(curr.operands) > 0: if curr.operands[0].type == X86_OP_IMM: addr = curr.operands[0].value.imm nxt = self.code[addr] gph.set_next(curr, nxt) rest.append(nxt.address) else: if not forcejmp: self.__error_jmp_reg(curr) gph.add_node(curr) elif is_cond_jump(curr) and len(curr.operands) > 0: if curr.operands[0].type == X86_OP_IMM: nxt_jump = self.code[curr.operands[0].value.imm] direct_nxt = self.code[curr.address + curr.size] gph.set_cond_next(curr, nxt_jump, direct_nxt) rest.append(nxt_jump.address) rest.append(direct_nxt.address) else: if not forcejmp: self.__error_jmp_reg(curr) gph.add_node(curr) elif is_ret(curr): gph.add_node(curr) else: try: nxt = self.code[curr.address + curr.size] gph.set_next(curr, nxt) rest.append(nxt.address) except: gph.add_node(curr) pass try: curr = self.code[rest.pop()] except IndexError: break return gph
def get_graph(self, entry_addr): from capstone import CS_OP_IMM, CS_ARCH_MIPS ARCH_UTILS = self.load_arch_module().utils gph = Graph(self, entry_addr) stack = [entry_addr] start = time() prefetch = None addresses = set() # WARNING: this assume that on every architectures the jump # address is the last operand (operands[-1]) # Here each instruction is a node. Blocks will be created in the # function __simplify. while stack: ad = stack.pop() inst = self.lazy_disasm(ad) if inst is None: # Remove all previous instructions which have a link # to this instruction. if ad in gph.link_in: for i in gph.link_in[ad]: gph.link_out[i].remove(ad) for i in gph.link_in[ad]: if not gph.link_out[i]: del gph.link_out[i] del gph.link_in[ad] continue if gph.exists(inst): continue addresses.add(ad) if ARCH_UTILS.is_ret(inst): if self.arch == CS_ARCH_MIPS: prefetch = self.__prefetch_inst(inst) addresses.add(prefetch.address) gph.new_node(inst, prefetch, None) elif ARCH_UTILS.is_uncond_jump(inst): if self.arch == CS_ARCH_MIPS: prefetch = self.__prefetch_inst(inst) addresses.add(prefetch.address) gph.uncond_jumps_set.add(ad) op = inst.operands[-1] if op.type == CS_OP_IMM: nxt = op.value.imm stack.append(nxt) gph.new_node(inst, prefetch, [nxt]) else: if inst.address in self.jmptables: table = self.jmptables[inst.address].table stack += table gph.new_node(inst, prefetch, table) else: # Can't interpret jmp ADDR|reg gph.new_node(inst, prefetch, None) elif ARCH_UTILS.is_cond_jump(inst): if self.arch == CS_ARCH_MIPS: prefetch = self.__prefetch_inst(inst) addresses.add(prefetch.address) gph.cond_jumps_set.add(ad) op = inst.operands[-1] if op.type == CS_OP_IMM: if self.arch == CS_ARCH_MIPS: direct_nxt = prefetch.address + prefetch.size else: direct_nxt = inst.address + inst.size nxt_jmp = op.value.imm stack.append(direct_nxt) stack.append(nxt_jmp) gph.new_node(inst, prefetch, [direct_nxt, nxt_jmp]) else: # Can't interpret jmp ADDR|reg gph.new_node(inst, prefetch, None) else: nxt = inst.address + inst.size stack.append(nxt) gph.new_node(inst, None, [nxt]) if len(gph.nodes) == 0: return None, 0 if self.binary.type == T_BIN_PE: nb_new_syms = self.binary.pe_reverse_stripped_symbols(self, addresses) else: nb_new_syms = 0 elapsed = time() elapsed = elapsed - start debug__("Graph built in %fs (%d instructions)" % (elapsed, len(gph.nodes))) return gph, nb_new_syms
def get_graph(self, addr): from capstone import CS_OP_IMM, CS_ARCH_MIPS ARCH_UTILS = self.load_arch_module().utils curr = self.lazy_disasm(addr) if curr == None: return None gph = Graph(self, addr) rest = [] start = time.clock() prefetch = None # WARNING: this assume that on every architectures the jump # address is the last operand (operands[-1]) while 1: if not gph.exists(curr): if self.arch == CS_ARCH_MIPS: prefetch = self.__prefetch_inst(curr) if ARCH_UTILS.is_uncond_jump(curr) and len(curr.operands) > 0: if curr.operands[-1].type == CS_OP_IMM: addr = curr.operands[-1].value.imm nxt = self.lazy_disasm(addr) gph.set_next(curr, nxt, prefetch) rest.append(nxt.address) else: # Can't interpret jmp ADDR|reg gph.add_node(curr, prefetch) gph.uncond_jumps_set.add(curr.address) elif ARCH_UTILS.is_cond_jump(curr) and len(curr.operands) > 0: if curr.operands[-1].type == CS_OP_IMM: nxt_jump = self.lazy_disasm(curr.operands[-1].value.imm) if self.arch == CS_ARCH_MIPS: direct_nxt = \ self.lazy_disasm(prefetch.address + prefetch.size) else: direct_nxt = \ self.lazy_disasm(curr.address + curr.size) gph.set_cond_next(curr, nxt_jump, direct_nxt, prefetch) rest.append(nxt_jump.address) rest.append(direct_nxt.address) else: # Can't interpret jmp ADDR|reg gph.add_node(curr, prefetch) gph.cond_jumps_set.add(curr.address) elif ARCH_UTILS.is_ret(curr): gph.add_node(curr, prefetch) else: try: nxt = self.lazy_disasm(curr.address + curr.size) gph.set_next(curr, nxt) rest.append(nxt.address) except: gph.add_node(curr) pass try: curr = self.lazy_disasm(rest.pop()) except IndexError: break if self.binary.type == T_BIN_PE: self.binary.pe_reverse_stripped_symbols(self) elapsed = time.clock() elapsed = elapsed - start debug__("Graph built in %fs" % elapsed) return gph
def get_graph(self, entry_addr): from capstone import CS_OP_IMM, CS_ARCH_MIPS ARCH_UTILS = self.load_arch_module().utils gph = Graph(self, entry_addr) stack = [entry_addr] start = time() prefetch = None # WARNING: this assume that on every architectures the jump # address is the last operand (operands[-1]) # Here each instruction is a node. Blocks will be created in the # function __simplify. while stack: ad = stack.pop() inst = self.lazy_disasm(ad) if inst is None: # Remove all previous instructions which have a link # to this instruction. if ad in gph.link_in: for i in gph.link_in[ad]: gph.link_out[i].remove(ad) for i in gph.link_in[ad]: if not gph.link_out[i]: del gph.link_out[i] del gph.link_in[ad] continue if gph.exists(inst): continue if ARCH_UTILS.is_ret(inst): if self.arch == CS_ARCH_MIPS: prefetch = self.__prefetch_inst(inst) gph.new_node(inst, prefetch, None) elif ARCH_UTILS.is_uncond_jump(inst): if self.arch == CS_ARCH_MIPS: prefetch = self.__prefetch_inst(inst) gph.uncond_jumps_set.add(ad) op = inst.operands[-1] if op.type == CS_OP_IMM: nxt = op.value.imm stack.append(nxt) gph.new_node(inst, prefetch, [nxt]) else: if inst.address in self.jmptables: table = self.jmptables[inst.address].table stack += table gph.new_node(inst, prefetch, table) else: # Can't interpret jmp ADDR|reg gph.new_node(inst, prefetch, None) elif ARCH_UTILS.is_cond_jump(inst): if self.arch == CS_ARCH_MIPS: prefetch = self.__prefetch_inst(inst) gph.cond_jumps_set.add(ad) op = inst.operands[-1] if op.type == CS_OP_IMM: if self.arch == CS_ARCH_MIPS: direct_nxt = prefetch.address + prefetch.size else: direct_nxt = inst.address + inst.size nxt_jmp = op.value.imm stack.append(direct_nxt) stack.append(nxt_jmp) gph.new_node(inst, prefetch, [direct_nxt, nxt_jmp]) else: # Can't interpret jmp ADDR|reg gph.new_node(inst, prefetch, None) else: nxt = inst.address + inst.size stack.append(nxt) gph.new_node(inst, None, [nxt]) if len(gph.nodes) == 0: return None, 0 if self.binary.type == T_BIN_PE: nb_new_syms = self.binary.pe_reverse_stripped_symbols(self) else: nb_new_syms = 0 elapsed = time() elapsed = elapsed - start debug__("Graph built in %fs (%d instructions)" % (elapsed, len(gph.nodes))) return gph, nb_new_syms