def get_graph(self, addr): from capstone import CS_OP_IMM ARCH_UTILS = self.load_arch_module().utils curr = self.lazy_disasm(addr) gph = Graph(self, addr) rest = [] start = time.clock() while 1: if not gph.exists(curr): if ARCH_UTILS.is_uncond_jump(curr) and len(curr.operands) > 0: if curr.operands[0].type == CS_OP_IMM: addr = curr.operands[0].value.imm nxt = self.lazy_disasm(addr) gph.set_next(curr, nxt) rest.append(nxt.address) else: # Can't interpret jmp ADDR|reg gph.add_node(curr) gph.uncond_jumps_set.add(curr.address) elif ARCH_UTILS.is_cond_jump(curr) and len(curr.operands) > 0: if curr.operands[0].type == CS_OP_IMM: nxt_jump = self.lazy_disasm(curr.operands[0].value.imm) direct_nxt = self.lazy_disasm(curr.address + curr.size) gph.set_cond_next(curr, nxt_jump, direct_nxt) rest.append(nxt_jump.address) rest.append(direct_nxt.address) else: # Can't interpret jmp ADDR|reg gph.add_node(curr) gph.cond_jumps_set.add(curr.address) elif ARCH_UTILS.is_ret(curr): gph.add_node(curr) else: try: nxt = self.lazy_disasm(curr.address + curr.size) gph.set_next(curr, nxt) rest.append(nxt.address) except: gph.add_node(curr) pass try: curr = self.lazy_disasm(rest.pop()) except IndexError: break if self.binary.type == T_BIN_PE: self.binary.pe_reverse_stripped_symbols(self) elapsed = time.clock() elapsed = elapsed - start debug__("Graph built in %fs" % elapsed) return gph
def __extract_func(self, addr): curr = self.code[addr] gph = Graph(self, addr) rest = [] while 1: if not gph.exists(curr): if is_uncond_jump(curr) and len(curr.operands) > 0: if curr.operands[0].type == X86_OP_IMM: addr = curr.operands[0].value.imm nxt = self.code[addr] gph.set_next(curr, nxt) rest.append(nxt.address) else: if not forcejmp: self.__error_jmp_reg(curr) gph.add_node(curr) elif is_cond_jump(curr) and len(curr.operands) > 0: if curr.operands[0].type == X86_OP_IMM: nxt_jump = self.code[curr.operands[0].value.imm] direct_nxt = self.code[curr.address + curr.size] gph.set_cond_next(curr, nxt_jump, direct_nxt) rest.append(nxt_jump.address) rest.append(direct_nxt.address) else: if not forcejmp: self.__error_jmp_reg(curr) gph.add_node(curr) elif is_ret(curr): gph.add_node(curr) else: try: nxt = self.code[curr.address + curr.size] gph.set_next(curr, nxt) rest.append(nxt.address) except: gph.add_node(curr) pass try: curr = self.code[rest.pop()] except IndexError: break return gph
def get_graph(self, addr): from capstone import CS_OP_IMM, CS_ARCH_MIPS ARCH_UTILS = self.load_arch_module().utils curr = self.lazy_disasm(addr) if curr == None: return None gph = Graph(self, addr) rest = [] start = time.clock() prefetch = None # WARNING: this assume that on every architectures the jump # address is the last operand (operands[-1]) while 1: if not gph.exists(curr): if self.arch == CS_ARCH_MIPS: prefetch = self.__prefetch_inst(curr) if ARCH_UTILS.is_uncond_jump(curr) and len(curr.operands) > 0: if curr.operands[-1].type == CS_OP_IMM: addr = curr.operands[-1].value.imm nxt = self.lazy_disasm(addr) gph.set_next(curr, nxt, prefetch) rest.append(nxt.address) else: # Can't interpret jmp ADDR|reg gph.add_node(curr, prefetch) gph.uncond_jumps_set.add(curr.address) elif ARCH_UTILS.is_cond_jump(curr) and len(curr.operands) > 0: if curr.operands[-1].type == CS_OP_IMM: nxt_jump = self.lazy_disasm(curr.operands[-1].value.imm) if self.arch == CS_ARCH_MIPS: direct_nxt = \ self.lazy_disasm(prefetch.address + prefetch.size) else: direct_nxt = \ self.lazy_disasm(curr.address + curr.size) gph.set_cond_next(curr, nxt_jump, direct_nxt, prefetch) rest.append(nxt_jump.address) rest.append(direct_nxt.address) else: # Can't interpret jmp ADDR|reg gph.add_node(curr, prefetch) gph.cond_jumps_set.add(curr.address) elif ARCH_UTILS.is_ret(curr): gph.add_node(curr, prefetch) else: try: nxt = self.lazy_disasm(curr.address + curr.size) gph.set_next(curr, nxt) rest.append(nxt.address) except: gph.add_node(curr) pass try: curr = self.lazy_disasm(rest.pop()) except IndexError: break if self.binary.type == T_BIN_PE: self.binary.pe_reverse_stripped_symbols(self) elapsed = time.clock() elapsed = elapsed - start debug__("Graph built in %fs" % elapsed) return gph
from lib.graph import Graph, GraphEncoder, GraphDecoder from json import loads, dumps from bsonrpc import JSONRpc from bsonrpc.exceptions import FramingError from bsonrpc.framing import (JSONFramingNetstring, JSONFramingNone, JSONFramingRFC7464) # Cut-the-corners TCP Client: s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) s.connect(('localhost', 50001)) rpc = JSONRpc(s, framing_cls=JSONFramingNone) server = rpc.get_peer_proxy() graph = Graph() graph.add_node("leaf1") graph.add_node("leaf2") graph.add_node( "root", [graph.nodes["leaf1"], graph.nodes["leaf2"], graph.nodes["leaf1"]]) encoder = GraphEncoder() # Execute in server: result = loads(server.increment(encoder.default(graph)), cls=GraphDecoder) result.nodes["root"].show() result = loads(server.increment(encoder.default(result)), cls=GraphDecoder) result.nodes["root"].show() rpc.close() # Closes the socket 's' also
def get_graph(self, entry_addr): from capstone import CS_OP_IMM, CS_ARCH_MIPS, CS_OP_REG ARCH_UTILS = self.load_arch_module().utils gph = Graph(self, entry_addr) stack = [entry_addr] start = time.clock() prefetch = None # WARNING: this assume that on every architectures the jump # address is the last operand (operands[-1]) while stack: ad = stack.pop() inst = self.lazy_disasm(ad) if inst is None: # Remove all previous instructions which have a link # to this instruction. if ad in gph.link_in: for i in gph.link_in[ad]: gph.link_out[i].remove(ad) for i in gph.link_in[ad]: if not gph.link_out[i]: del gph.link_out[i] del gph.link_in[ad] continue if gph.exists(inst): continue if ARCH_UTILS.is_ret(inst): if self.arch == CS_ARCH_MIPS: prefetch = self.__prefetch_inst(inst) gph.add_node(inst, prefetch) elif ARCH_UTILS.is_uncond_jump(inst): if self.arch == CS_ARCH_MIPS: prefetch = self.__prefetch_inst(inst) gph.uncond_jumps_set.add(ad) op = inst.operands[-1] if op.type == CS_OP_IMM: nxt = op.value.imm stack.append(nxt) gph.set_next(inst, nxt, prefetch) else: if inst.address in self.jmptables: table = self.jmptables[inst.address].table gph.set_jmptable_next(inst, table, prefetch) for ad in table: stack.append(ad) else: # Can't interpret jmp ADDR|reg gph.add_node(inst, prefetch) elif ARCH_UTILS.is_cond_jump(inst): if self.arch == CS_ARCH_MIPS: prefetch = self.__prefetch_inst(inst) gph.cond_jumps_set.add(ad) op = inst.operands[-1] if op.type == CS_OP_IMM: if self.arch == CS_ARCH_MIPS: direct_nxt = prefetch.address + prefetch.size else: direct_nxt = inst.address + inst.size nxt_jmp = op.value.imm stack.append(direct_nxt) stack.append(nxt_jmp) gph.set_cond_next(inst, nxt_jmp, direct_nxt, prefetch) else: # Can't interpret jmp ADDR|reg gph.add_node(inst, prefetch) else: nxt = inst.address + inst.size stack.append(nxt) gph.set_next(inst, nxt) if len(gph.nodes) == 0: return None if self.binary.type == T_BIN_PE: self.binary.pe_reverse_stripped_symbols(self) elapsed = time.clock() elapsed = elapsed - start debug__("Graph built in %fs" % elapsed) return gph