def main(): print("[*] loading crypto constants") for const in non_sparse_consts: const["byte_array"] = convert_to_byte_array(const) for start in idautils.Segments(): print("[*] searching for crypto constants in %s" % idc.get_segm_name(start)) ea = start while ea < idc.get_segm_end(start): bbbb = list(struct.unpack("BBBB", idc.get_bytes(ea, 4))) for const in non_sparse_consts: if bbbb != const["byte_array"][:4]: continue if map(lambda x:ord(x), idc.get_bytes(ea, len(const["byte_array"]))) == const["byte_array"]: print(("0x%0" + str(digits) + "X: found const array %s (used in %s)") % (ea, const["name"], const["algorithm"])) idc.set_name(ea, const["name"]) if const["size"] == "B": idc.create_byte(ea) elif const["size"] == "L": idc.create_dword(ea) elif const["size"] == "Q": idc.create_qword(ea) idc.make_array(ea, len(const["array"])) ea += len(const["byte_array"]) - 4 break ea += 4 ea = start if idc.get_segm_attr(ea, idc.SEGATTR_TYPE) == 2: while ea < idc.get_segm_end(start): d = ida_bytes.get_dword(ea) for const in sparse_consts: if d != const["array"][0]: continue tmp = ea + 4 for val in const["array"][1:]: for i in range(8): if ida_bytes.get_dword(tmp + i) == val: tmp = tmp + i + 4 break else: break else: print(("0x%0" + str(digits) + "X: found sparse constants for %s") % (ea, const["algorithm"])) cmt = idc.get_cmt(idc.prev_head(ea), 0) if cmt: idc.set_cmt(idc.prev_head(ea), cmt + ' ' + const["name"], 0) else: idc.set_cmt(idc.prev_head(ea), const["name"], 0) ea = tmp break ea += 1 print("[*] finished")
def main(): print("[*] loading crypto constants") for const in non_sparse_consts: const["byte_array"] = convert_to_byte_array(const) for start in Segments(): print("[*] searching for crypto constants in %s" % get_segm_name(start)) ea = start while ea < get_segm_end(start): bbbb = list(struct.unpack("BBBB", get_bytes(ea, 4))) for const in non_sparse_consts: if bbbb != const["byte_array"][:4]: continue if map(lambda x: ord(x), get_bytes(ea, len( const["byte_array"]))) == const["byte_array"]: print("0x%08X: found const array %s (used in %s)" % (ea, const["name"], const["algorithm"])) set_name(ea, const["name"]) if const["size"] == "B": idc.create_byte(ea) elif const["size"] == "L": idc.create_dword(ea) elif const["size"] == "Q": idc.create_qword(ea) make_array(ea, len(const["array"])) ea += len(const["byte_array"]) - 4 break ea += 4 ea = start if get_segm_attr(ea, SEGATTR_TYPE) == 2: while ea < get_segm_end(start): d = ida_bytes.get_dword(ea) for const in sparse_consts: if d != const["array"][0]: continue tmp = ea + 4 for val in const["array"][1:]: for i in range(8): if ida_bytes.get_dword(tmp + i) == val: tmp = tmp + i + 4 break else: break else: print("0x%08X: found sparse constants for %s" % (ea, const["algorithm"])) ea = tmp break ea += 1 print("[*] finished")
def convert_pointer_to_offset(ea): # If this is code, skip it. flags = idc.get_full_flags(ea) if idc.is_code(flags): return # If the value at this address does not point into the kernelcache, skip it. value = idc.get_qword(ea) if not is_mapped(value, 8): return # Convert this value to a qword (in case it's unaligned) and then convert it into an # offset. idc.create_qword(ea) idc.op_plain_offset(ea, 0, 0)
def main(): print("[*] loading crypto constants") for const in non_sparse_consts: const["byte_array"] = convert_to_byte_array(const) for start in idautils.Segments(): print("[*] searching for crypto constants in %s" % idc.get_segm_name(start)) ea = start while ea < idc.get_segm_end(start): bbbb = list(struct.unpack("BBBB", idc.get_bytes(ea, 4))) for const in non_sparse_consts: if bbbb != const["byte_array"][:4]: continue if list( map(lambda x: x if type(x) == int else ord(x), idc.get_bytes(ea, len( const["byte_array"])))) == const["byte_array"]: print(("0x%0" + str(digits) + "X: found const array %s (used in %s)") % (ea, const["name"], const["algorithm"])) idc.set_name(ea, const["name"], ida_name.SN_FORCE) if const["size"] == "B": idc.create_byte(ea) elif const["size"] == "L": idc.create_dword(ea) elif const["size"] == "Q": idc.create_qword(ea) idc.make_array(ea, len(const["array"])) ea += len(const["byte_array"]) - 4 break ea += 4 ea = start if idc.get_segm_attr(ea, idc.SEGATTR_TYPE) == idc.SEG_CODE: while ea < idc.get_segm_end(start): d = ida_bytes.get_dword(ea) for const in sparse_consts: if d != const["array"][0]: continue tmp = ea + 4 for val in const["array"][1:]: for i in range(8): if ida_bytes.get_dword(tmp + i) == val: tmp = tmp + i + 4 break else: break else: print(("0x%0" + str(digits) + "X: found sparse constants for %s") % (ea, const["algorithm"])) cmt = idc.get_cmt(idc.prev_head(ea), 0) if cmt: idc.set_cmt(idc.prev_head(ea), cmt + ' ' + const["name"], 0) else: idc.set_cmt(idc.prev_head(ea), const["name"], 0) ea = tmp break ea += 1 print("[*] searching for crypto constants in immediate operand") funcs = idautils.Functions() for f in funcs: flags = idc.get_func_flags(f) if (not flags & (idc.FUNC_LIB | idc.FUNC_THUNK)): ea = f f_end = idc.get_func_attr(f, idc.FUNCATTR_END) while (ea < f_end): imm_operands = [] insn = ida_ua.insn_t() ida_ua.decode_insn(insn, ea) for i in range(len(insn.ops)): if insn.ops[i].type == ida_ua.o_void: break if insn.ops[i].type == ida_ua.o_imm: imm_operands.append(insn.ops[i].value) if len(imm_operands) == 0: ea = idc.find_code(ea, idc.SEARCH_DOWN) continue for const in operand_consts: if const["value"] in imm_operands: print(("0x%0" + str(digits) + "X: found immediate operand constants for %s") % (ea, const["algorithm"])) cmt = idc.get_cmt(ea, 0) if cmt: idc.set_cmt(ea, cmt + ' ' + const["name"], 0) else: idc.set_cmt(ea, const["name"], 0) break ea = idc.find_code(ea, idc.SEARCH_DOWN) print("[*] finished")
def _addSegments(self, mem=0x4000000): """Create emulation stub segments. Includes generous memory allocation space by default. """ arena_seg_size = mem code_seg_size = 0x1000 use32 = 1 fmt_ptr_width = '<I' if self.bits == 64: use32 = 2 fmt_ptr_width = '<Q' def le_hex(n): # Little-endian N-bit hex return binascii.hexlify(struct.pack(fmt_ptr_width, n)) seg_plan = SegPlanner() # Pick locations for the code and the malloc "arena" # # Note 1: Find space for stub code segment first, making use of # SegPlanner's bias in planning new segments close to other segments if # possible. This violates the Franklin-Covey principle of fitting big # stones in first before smaller stones create spatial fragmentation in # the jar. But in a 64-bit IDB, depending on the size of the malloc # arena, doing this in the opposite order could increase the chance of # a stub function residing at a distance from its callsite that cannot # be represented in 32 bits. # # Note 2: SegPlanner ensures segments won't start at 0, which otherwise # could result in a NULL return from an allocator stub like malloc # erroneously signifying failure. code = seg_plan.addSegAnywhere(code_seg_size) arena = seg_plan.addSegAnywhere(arena_seg_size) for seg in (code, arena): idc.AddSeg(seg.start, seg.end, 0, use32, 0, idc.scPub) idc.set_cmt(seg.start, g_seg_sig_code_grafter, 1) # Designate location for the malloc "arena" va_arena = arena.start # Designate location for and write the malloc next-index pointer. # # Placing this before our fake heap would misalign the first allocation # or waste space when the allocator skips bytes to compensate for # alignment. # # Placing it at the end of our fake heap would risk corrupting it in # the event of a buffer overrun (or heap overrun). # # Assuming 64-bit to provide enough space irrespective of architecture va_malloc_next = code.start idc.patch_qword(va_malloc_next, 0) idc.create_qword(va_malloc_next) mykutils.makename_safe(va_malloc_next, self._stubname('malloc_next')) va_next_code = code.start + 0x10 def next_addr_align4(base, sc): return mykutils.align(base + (len(sc) / 2), 4) def add_stub_func(va, sc, nm): idaapi.patch_bytes(va, binascii.unhexlify(sc)) idc.create_insn(va) idc.add_func(va) mykutils.makename_safe(va, self._stubname(nm)) cmt = ('%s implementation generated by FLARE Code Grafter' % (nm)) idc.set_cmt(va, cmt, 1) # Allocators are handled specially because their templates must be # filled with addresses for the global data they access for allocator_name in g_allocators_aliases: code = self.get_fnbytes_allocator(allocator_name).format( next_=le_hex(va_malloc_next), arena=le_hex(va_arena)) add_stub_func(va_next_code, code, allocator_name) va_next_code = next_addr_align4(va_next_code, code) # Functions not referencing data or other code are simpler: for names, sc in self._emu_stubs.items(): for nm in names: add_stub_func(va_next_code, sc, nm) va_next_code = next_addr_align4(va_next_code, sc)