def patch_bin(self, address, new_content): # since the content could theoretically be split into different segments we will handle it here ndata_pos = 0 for start, end in self.get_memory_translation_list( address, len(new_content)): ndata = new_content[ndata_pos:ndata_pos + (end - start)] self.ncontent = utils.bytes_overwrite(self.ncontent, ndata, start) ndata_pos += len(ndata)
def setup_headers(self, segments): if self.replace_note_segment: current_hdr = self.structs.Elf_Ehdr.parse(self.ncontent) note_segment_header_loc = current_hdr["e_phoff"] for segment in segments: if segment["p_type"] == "PT_NOTE": segment = Container( **{ "p_type": 1, "p_offset": self.added_data_file_start, "p_vaddr": self.name_map["ADDED_DATA_START"], "p_paddr": self.name_map["ADDED_DATA_START"], "p_filesz": self.added_code_file_start - self.added_data_file_start + len(self.added_code), "p_memsz": self.added_code_file_start - self.added_data_file_start + len(self.added_code), "p_flags": 0x7, "p_align": 0x1000 }) self.ncontent = utils.bytes_overwrite( self.ncontent, self.structs.Elf_Phdr.build(segment), note_segment_header_loc) break note_segment_header_loc += current_hdr["e_phentsize"] else: #if self.is_patched(): # return # copying original program headers (potentially modified by patches) # in the new place (at the end of the file) load_segments_rounded = [] for segment in segments: if segment["p_type"] == "PT_LOAD": if self.first_load is None: self.first_load = segment load_segments_rounded.append(( # start of the segment, round down to multiple of 0x1000 (segment["p_vaddr"] - self.first_load["p_vaddr"]) - ((segment["p_vaddr"] - self.first_load["p_vaddr"]) % 0x1000), # end of the segment, round up to multiple of 0x1000 int((segment["p_vaddr"] + segment["p_memsz"] - self.first_load["p_vaddr"] + 0x1000 - 1) / 0x1000 ) * 0x1000)) for segment in segments: if segment["p_type"] == "PT_PHDR": if self.phdr_segment is not None: raise ValueError("Multiple PHDR segments!") self.phdr_segment = segment segment["p_filesz"] += self.additional_headers_size segment["p_memsz"] += self.additional_headers_size phdr_size = max(segment["p_filesz"], segment["p_memsz"]) load_segments_rounded = sorted(load_segments_rounded, key=lambda x: x[0]) # combine overlapping load segments while True: new_load_segments_rounded = [] i = 0 while i < len(load_segments_rounded) - 1: prev_seg = load_segments_rounded[i] next_seg = load_segments_rounded[i + 1] if prev_seg[1] > next_seg[ 0]: # two segments overlap new_load_segments_rounded.append( (prev_seg[0], next_seg[1] )) # append combine of two segments i += 2 else: new_load_segments_rounded.append( prev_seg) # append segment without overlap i += 1 if i == len(load_segments_rounded) - 1: new_load_segments_rounded.append( load_segments_rounded[i] ) # append last segment if without overlapping if new_load_segments_rounded == load_segments_rounded: # if no overlap break load_segments_rounded = new_load_segments_rounded # combined segments, run again for prev_seg, next_seg in zip(load_segments_rounded[:-1], load_segments_rounded[1:]): potential_base = ( (max(prev_seg[1], len(self.ncontent)) + 0xfff) & ~0xfff) # round up to 0x1000 if next_seg[ 0] - potential_base > phdr_size: # if there is space between segments, put phdr here self.phdr_start = potential_base break else: self.phdr_start = load_segments_rounded[-1][ 1] # otherwise put it after the last load segment # try to map self.phdr_start to the next page-aligned position so that p_offset is the same as # phdr_start if the base address of this binary is 0 # this is to workaround a weird issue in the dynamic linker of glibc # Note taht self.phdr_start is page-aligned at this moment. # and now we want to make sure p_vaddr (self.phdr_start) == p_offset (len(self.ncontent)) if self.phdr_start > len(self.ncontent): # p_vaddr > p_offset: pad the file (p_offset) if self.phdr_start - len(self.ncontent) > 1_000_000: raise Exception( "Cannot align the file offset and vaddr of PHDR without increasing the " "file size by more than 1 MB.") self.ncontent = self.ncontent.ljust( self.phdr_start, b"\x00") else: # p_vaddr <= p_offset: pad the file (p_offset) to page size, and let p_vaddr = p_offset self.ncontent += b"\x00" * ( 0x1000 - (len(self.ncontent) % 0x1000)) self.phdr_start = len(self.ncontent) segment["p_offset"] = self.phdr_start segment["p_vaddr"] = self.phdr_start + self.first_load[ "p_vaddr"] segment["p_paddr"] = self.phdr_start + self.first_load[ "p_vaddr"] self.ncontent = self.ncontent.ljust(self.phdr_start, b"\x00") # change pointer to program headers to point at the end of the elf current_hdr = self.structs.Elf_Ehdr.parse(self.ncontent) old_phoff = current_hdr["e_phoff"] current_hdr["e_phoff"] = len(self.ncontent) self.ncontent = utils.bytes_overwrite( self.ncontent, self.structs.Elf_Ehdr.build(current_hdr), 0) print("putting them at %#x" % self.phdr_start) print("current len: %#x" % len(self.ncontent)) for segment in segments: if segment["p_type"] == "PT_PHDR": segment = self.phdr_segment self.ncontent = utils.bytes_overwrite( self.ncontent, self.structs.Elf_Phdr.build(segment)) self.original_header_end = len(self.ncontent) # we overwrite the first original program header, # we do not need it anymore since we have moved original program headers at the bottom of the file self.ncontent = utils.bytes_overwrite(self.ncontent, self.patched_tag, old_phoff) # adding space for the additional headers # I add two of them, no matter what, if the data one will be used only in case of the fallback solution # Additionally added program headers have been already copied by the for loop above self.ncontent = self.ncontent.ljust( len(self.ncontent) + self.additional_headers_size, b"\x00")
def set_oep(self, new_oep): # set original entry point current_hdr = self.structs.Elf_Ehdr.parse(self.ncontent) current_hdr["e_entry"] = new_oep self.ncontent = utils.bytes_overwrite( self.ncontent, self.structs.Elf_Ehdr.build(current_hdr), 0)
def set_added_segment_headers(self): if self.replace_note_segment: return l.debug("added_data_file_start: %#x", self.added_data_file_start) added_segments = 0 # add a LOAD segment for the PHDR segment phdr_segment_header = Container( **{ "p_type": 1, "p_offset": self.phdr_segment["p_offset"], "p_vaddr": self.phdr_segment["p_vaddr"], "p_paddr": self.phdr_segment["p_paddr"], "p_filesz": self.phdr_segment["p_filesz"], "p_memsz": self.phdr_segment["p_memsz"], "p_flags": 0x4, "p_align": 0x1000 }) self.ncontent = utils.bytes_overwrite( self.ncontent, self.structs.Elf_Phdr.build(phdr_segment_header), self.original_header_end + (2 * self.structs.Elf_Phdr.sizeof())) added_segments += 1 # add a LOAD segment for the DATA segment data_segment_header = Container( **{ "p_type": 1, "p_offset": self.added_data_file_start, "p_vaddr": self.name_map["ADDED_DATA_START"], "p_paddr": self.name_map["ADDED_DATA_START"], "p_filesz": len(self.added_data), "p_memsz": len(self.added_data), "p_flags": 0x6, "p_align": 0x1000 }) self.ncontent = utils.bytes_overwrite( self.ncontent, self.structs.Elf_Phdr.build(data_segment_header), self.original_header_end + self.structs.Elf_Phdr.sizeof()) added_segments += 1 # add a LOAD segment for the CODE segment code_segment_header = Container( **{ "p_type": 1, "p_offset": self.added_code_file_start, "p_vaddr": self.name_map["ADDED_CODE_START"], "p_paddr": self.name_map["ADDED_CODE_START"], "p_filesz": len(self.added_code), "p_memsz": len(self.added_code), "p_flags": 0x5, "p_align": 0x1000 }) self.ncontent = utils.bytes_overwrite( self.ncontent, self.structs.Elf_Phdr.build(code_segment_header), self.original_header_end) added_segments += 1 current_hdr = self.structs.Elf_Ehdr.parse(self.ncontent) current_hdr["e_phnum"] += added_segments self.ncontent = utils.bytes_overwrite( self.ncontent, self.structs.Elf_Ehdr.build(current_hdr), 0)
def apply_patches(self, patches): # deal with stackable patches # add stackable patches to the one with highest priority insert_code_patches = [ p for p in patches if isinstance(p, InsertCodePatch) ] insert_code_patches_dict = defaultdict(list) for p in insert_code_patches: insert_code_patches_dict[p.addr].append(p) insert_code_patches_dict_sorted = defaultdict(list) for k, v in insert_code_patches_dict.items(): insert_code_patches_dict_sorted[k] = sorted( v, key=lambda x: -1 * x.priority) insert_code_patches_stackable = [ p for p in patches if isinstance(p, InsertCodePatch) and p.stackable ] for sp in insert_code_patches_stackable: assert len(sp.dependencies) == 0 if sp.addr in insert_code_patches_dict_sorted: highest_priority_at_addr = insert_code_patches_dict_sorted[ sp.addr][0] if highest_priority_at_addr != sp: highest_priority_at_addr.asm_code += "\n" + sp.asm_code + "\n" patches.remove(sp) #deal with AddLabel patches lpatches = [p for p in patches if (isinstance(p, AddLabelPatch))] for p in lpatches: self.name_map[p.name] = p.addr # check for duplicate labels, it is not very necessary for this backend # but it is better to behave in the same way of the reassembler backend relevant_patches = [ p for p in patches if isinstance(p, (AddCodePatch, AddEntryPointPatch, InsertCodePatch)) ] all_code = "" for p in relevant_patches: if isinstance(p, InsertCodePatch): code = p.code else: code = p.asm_code all_code += "\n" + code + "\n" labels = utils.string_to_labels(all_code) duplicates = set(x for x in labels if labels.count(x) > 1) if len(duplicates) > 1: raise DuplicateLabelsException( "found duplicate assembly labels: %s" % (str(duplicates))) # for now any added code will be executed by jumping out and back ie CGRex # apply all add code patches self.added_code_file_start = len(self.ncontent) self.name_map.force_insert("ADDED_CODE_START", (len(self.ncontent) % 0x1000) + self.added_code_segment) # 0) RawPatch: for patch in patches: if isinstance(patch, RawFilePatch): self.ncontent = utils.bytes_overwrite(self.ncontent, patch.data, patch.file_addr) self.added_patches.append(patch) l.info("Added patch: %s", str(patch)) for patch in patches: if isinstance(patch, RawMemPatch): self.patch_bin(patch.addr, patch.data) self.added_patches.append(patch) l.info("Added patch: %s", str(patch)) for patch in patches: if isinstance(patch, RemoveInstructionPatch): if patch.ins_size is None: size = 4 else: size = patch.ins_size self.patch_bin(patch.ins_addr, b"\x60\x00\x00\x00" * int( (size + 4 - 1) / 4)) self.added_patches.append(patch) l.info("Added patch: %s", str(patch)) # 5.5) ReplaceFunctionPatch (preprocessing rodata) for patch in patches: if isinstance(patch, ReplaceFunctionPatch): patches += self.compile_function(patch.asm_code, entry=patch.addr, symbols=patch.symbols, data_only=True, prefix="_RFP" + str(patches.index(patch))) # 1) Add{RO/RW/RWInit}DataPatch self.added_data_file_start = len(self.ncontent) curr_data_position = self.name_map["ADDED_DATA_START"] for patch in patches: if isinstance( patch, (AddRWDataPatch, AddRODataPatch, AddRWInitDataPatch)): if hasattr(patch, "data"): final_patch_data = patch.data else: final_patch_data = b"\x00" * patch.len self.added_data += final_patch_data if patch.name is not None: self.name_map[patch.name] = curr_data_position curr_data_position += len(final_patch_data) self.ncontent = utils.bytes_overwrite(self.ncontent, final_patch_data) self.added_patches.append(patch) l.info("Added patch: %s", str(patch)) self.ncontent = utils.pad_bytes( self.ncontent, 0x10) # some minimal alignment may be good self.added_code_file_start = len(self.ncontent) if self.replace_note_segment: self.name_map.force_insert( "ADDED_CODE_START", int((curr_data_position + 0x10 - 1) / 0x10) * 0x10) else: self.name_map.force_insert("ADDED_CODE_START", (len(self.ncontent) % 0x1000) + self.added_code_segment) # 2) AddCodePatch # resolving symbols current_symbol_pos = self.get_current_code_position() for patch in patches: if isinstance(patch, AddCodePatch): if patch.is_c: code_len = len( self.compile_c(patch.asm_code, optimization=patch.optimization, compiler_flags=patch.compiler_flags)) else: code_len = len( self.compile_asm(patch.asm_code, current_symbol_pos)) if patch.name is not None: self.name_map[patch.name] = current_symbol_pos current_symbol_pos += code_len # now compile for real for patch in patches: if isinstance(patch, AddCodePatch): if patch.is_c: new_code = self.compile_c( patch.asm_code, optimization=patch.optimization, compiler_flags=patch.compiler_flags) else: new_code = self.compile_asm( patch.asm_code, self.get_current_code_position(), self.name_map) self.added_code += new_code self.ncontent = utils.bytes_overwrite(self.ncontent, new_code) self.added_patches.append(patch) l.info("Added patch: %s", str(patch)) # 3) AddEntryPointPatch # basically like AddCodePatch but we detour by changing oep # and we jump at the end of all of them # resolving symbols for patch in patches: if isinstance(patch, AddEntryPointPatch): old_oep = self.get_oep() new_oep = self.get_current_code_position() # ref: glibc/sysdeps/{ARCH}/start.S instructions = patch.asm_code instructions += "\nb {}".format(hex(int(old_oep))) new_code = self.compile_asm(instructions, self.get_current_code_position(), self.name_map) self.added_code += new_code self.added_patches.append(patch) self.ncontent = utils.bytes_overwrite(self.ncontent, new_code) self.set_oep(new_oep) l.info("Added patch: %s", str(patch)) # 4) InlinePatch # we assume the patch never patches the added code for patch in patches: if isinstance(patch, InlinePatch): new_code = self.compile_asm(patch.new_asm, patch.instruction_addr, self.name_map) # Limiting the inline patch to a single block is not necessary # assert len(new_code) <= self.project.factory.block(patch.instruction_addr, num_inst=patch.num_instr, max_size=).size file_offset = self.project.loader.main_object.addr_to_offset( patch.instruction_addr) self.ncontent = utils.bytes_overwrite(self.ncontent, new_code, file_offset) self.added_patches.append(patch) l.info("Added patch: %s", str(patch)) # 5) InsertCodePatch # these patches specify an address in some basic block, In general we will move the basic block # and fix relative offsets # With this backend heer we can fail applying a patch, in case, resolve dependencies insert_code_patches = [ p for p in patches if isinstance(p, InsertCodePatch) ] insert_code_patches = sorted(insert_code_patches, key=lambda x: -1 * x.priority) applied_patches = [] while True: name_list = [ str(p) if (p is None or p.name is None) else p.name for p in applied_patches ] l.info("applied_patches is: |%s|", "-".join(name_list)) assert all(a == b for a, b in zip(applied_patches, insert_code_patches)) for patch in insert_code_patches[len(applied_patches):]: self.save_state(applied_patches) try: l.info("Trying to add patch: %s", str(patch)) if patch.name is not None: self.name_map[ patch.name] = self.get_current_code_position() new_code = self.insert_detour(patch) self.added_code += new_code self.ncontent = utils.bytes_overwrite( self.ncontent, new_code) applied_patches.append(patch) self.added_patches.append(patch) l.info("Added patch: %s", str(patch)) except (DetourException, MissingBlockException, DoubleDetourException) as e: l.warning(e) insert_code_patches, removed = self.handle_remove_patch( insert_code_patches, patch) #print map(str,removed) applied_patches = self.restore_state( applied_patches, removed) l.warning( "One patch failed, rolling back InsertCodePatch patches. Failed patch: %s", str(patch)) break # TODO: right now rollback goes back to 0 patches, we may want to go back less # the solution is to save touched_bytes and ncontent indexed by applied patfch # and go back to the biggest compatible list of patches else: break #at this point we applied everything in current insert_code_patches # TODO symbol name, for now no name_map for InsertCode patches header_patches = [InsertCodePatch,InlinePatch,AddEntryPointPatch,AddCodePatch, \ AddRWDataPatch,AddRODataPatch,AddRWInitDataPatch] # 5.5) ReplaceFunctionPatch for patch in patches: if isinstance(patch, ReplaceFunctionPatch): if self.structs.elfclass == 64: # reloc type not supported (TOC info is in executables but not in object file, but relocs in object file will need TOC info.) raise Exception( "ReplaceFunctionPatch: PPC64 not yet supported") for k, v in self.name_map.items(): if k.startswith("_RFP" + str(patches.index(patch))): patch.symbols[k[len("_RFP" + str(patches.index(patch))):]] = v new_code = self.compile_function( patch.asm_code, bits=self.structs.elfclass, little_endian=self.structs.little_endian, entry=patch.addr, symbols=patch.symbols) file_offset = self.project.loader.main_object.addr_to_offset( patch.addr) self.ncontent = utils.bytes_overwrite( self.ncontent, b"\x60\x00\x00\x00" * (patch.size // 4), file_offset) if (patch.size >= len(new_code)): file_offset = self.project.loader.main_object.addr_to_offset( patch.addr) self.ncontent = utils.bytes_overwrite( self.ncontent, new_code, file_offset) else: header_patches.append(ReplaceFunctionPatch) detour_pos = self.get_current_code_position() offset = self.project.loader.main_object.mapped_base if self.project.loader.main_object.pic else 0 new_code = self.compile_function( patch.asm_code, bits=self.structs.elfclass, little_endian=self.structs.little_endian, entry=detour_pos + offset, symbols=patch.symbols) self.added_code += new_code self.ncontent = utils.bytes_overwrite( self.ncontent, new_code) # compile jmp jmp_code = self.compile_jmp(patch.addr, detour_pos + offset) self.patch_bin(patch.addr, jmp_code) self.added_patches.append(patch) l.info("Added patch: %s", str(patch)) if any(isinstance(p,ins) for ins in header_patches for p in self.added_patches) or \ any(isinstance(p,SegmentHeaderPatch) for p in patches): # either implicitly (because of a patch adding code or data) or explicitly, we need to change segment headers # 6) SegmentHeaderPatch segment_header_patches = [ p for p in patches if isinstance(p, SegmentHeaderPatch) ] if len(segment_header_patches) > 1: msg = "more than one patch tries to change segment headers: %s", "|".join( [str(p) for p in segment_header_patches]) raise IncompatiblePatchesException(msg) if len(segment_header_patches) == 1: segment_patch = segment_header_patches[0] segments = segment_patch.segment_headers l.info("Added patch: %s", str(segment_patch)) else: segments = self.modded_segments for patch in [ p for p in patches if isinstance(p, AddSegmentHeaderPatch) ]: # add after the first segments = [segments[0]] + [patch.new_segment] + segments[1:] self.setup_headers(segments) self.set_added_segment_headers() l.debug("final symbol table: %s", repr([(k, hex(v)) for k, v in self.name_map.items()])) else: l.info("no patches, the binary will not be touched")
def compile_function(code, compiler_flags="", bits=32, little_endian=False, entry=0x0, symbols=None, data_only=False, prefix=""): with utils.tempdir() as td: c_fname = os.path.join(td, "code.c") object_fname = os.path.join(td, "code.o") object2_fname = os.path.join(td, "code.2.o") linker_script_fname = os.path.join(td, "code.lds") data_fname = os.path.join(td, "data") rodata_sec_index = rodata_sym_index_old = rodata_sym_index_new = -1 # C -> Object File with open(c_fname, 'w') as fp: fp.write(code) target = ("powerpcle-linux-gnu" if little_endian else "powerpc-linux-gnu") if bits == 32 else ( "powerpc64le-linux-gnu" if little_endian else "powerpc64-linux-gnu") res = utils.exec_cmd("clang -target %s -o %s -c %s %s" \ % (target, object_fname, c_fname, compiler_flags), shell=True) if res[2] != 0: raise CLangException("CLang error: " + str(res[0] + res[1], 'utf-8')) # Setup Linker Script linker_script = "SECTIONS { .text : { *(.text) " if symbols: for i in symbols: if i == ".rodata": linker_script += i + " = " + hex(symbols[i] - ( (entry - 0x10700000) & ~0xFFFF)) + ";" else: linker_script += i + " = " + hex(symbols[i] - entry) + ";" linker_script += "} .rodata : { *(.rodata*) } }" with open(linker_script_fname, 'w') as fp: fp.write(linker_script) # Object File --LinkerScript--> Object File res = utils.exec_cmd( "ld.lld -relocatable %s -T %s -o %s" % (object_fname, linker_script_fname, object2_fname), shell=True) if res[2] != 0: raise Exception("Linking Error: " + str(res[0] + res[1], 'utf-8')) # Load Object File ld = cle.Loader(object2_fname, main_opts={"base_addr": 0x0}, perform_relocations=False) # Figure Out .text Section Size for section in ld.all_objects[0].sections: if section.name == ".text": text_section_size = section.filesize break # Modify Symbols in Object File to Trick Loader with open(object2_fname, "rb+") as f: elf = ELFFile(f) # Find the Index of .rodata Section for i in range(elf.num_sections()): if elf.get_section(i).name == ".rodata": rodata_sec_index = i break # Find the Index of the src and dest Symbol symtab_section = elf.get_section_by_name(".symtab") for i in range(symtab_section.num_symbols()): if symtab_section.get_symbol( i )['st_shndx'] == rodata_sec_index and symtab_section.get_symbol( i)['st_info']['type'] == 'STT_SECTION': rodata_sym_index_old = i if symtab_section.get_symbol(i).name == ".rodata": rodata_sym_index_new = i # Rewrite the Symbol if rodata_sym_index_new != -1 and rodata_sec_index != -1 and rodata_sym_index_old != -1: for i in range(elf.num_sections()): if elf.get_section(i).header[ 'sh_name'] == symtab_section.header['sh_name']: f.seek(0) content = f.read() f.seek(symtab_section['sh_offset'] + rodata_sym_index_new * symtab_section['sh_entsize']) rodata_sym_new = f.read( symtab_section['sh_entsize']) content = utils.bytes_overwrite( content, rodata_sym_new, symtab_section['sh_offset'] + rodata_sym_index_old * symtab_section['sh_entsize']) f.seek(0) f.write(content) f.truncate() break # Replace all R_PPC_PLTREL24 to R_PPC_REL24 rela_section = elf.get_section_by_name(".rela.text") if rela_section is not None: for i in range(rela_section.num_relocations()): if rela_section.get_relocation(i)['r_info_type'] == 18: reloc = rela_section.get_relocation(i).entry reloc['r_info'] -= 8 for j in range(elf.num_sections()): if elf.get_section(j).header[ 'sh_name'] == rela_section.header[ 'sh_name']: f.seek(0) content = f.read() content = utils.bytes_overwrite( content, elf.structs.Elf_Rela.build(reloc), rela_section['sh_offset'] + i * rela_section['sh_entsize']) f.seek(0) f.write(content) f.truncate() break # Load the Modified Object File and Return compiled Data or Code ld = cle.Loader(object2_fname, main_opts={ "base_addr": 0x0, "entry_point": 0x0 }) if data_only: patches = [] for section in ld.all_objects[0].sections: if section.name == ".rodata": res = utils.exec_cmd( "objcopy -B i386 -O binary -j %s %s %s" % (section.name, object2_fname, data_fname), shell=True) if res[2] != 0: raise ObjcopyException("Objcopy Error: " + str(res[0] + res[1], 'utf-8')) with open(data_fname, "rb") as fp: patches.append( AddRODataPatch(fp.read(), name=prefix + section.name)) break return patches else: compiled = ld.memory.load(ld.all_objects[0].entry, text_section_size) return compiled
def apply_patches(self, patches): # deal with stackable patches # add stackable patches to the one with highest priority insert_code_patches = [ p for p in patches if isinstance(p, InsertCodePatch) ] insert_code_patches_dict = defaultdict(list) for p in insert_code_patches: insert_code_patches_dict[p.addr].append(p) insert_code_patches_dict_sorted = defaultdict(list) for k, v in insert_code_patches_dict.items(): insert_code_patches_dict_sorted[k] = sorted( v, key=lambda x: -1 * x.priority) insert_code_patches_stackable = [ p for p in patches if isinstance(p, InsertCodePatch) and p.stackable ] for sp in insert_code_patches_stackable: assert len(sp.dependencies) == 0 if sp.addr in insert_code_patches_dict_sorted: highest_priority_at_addr = insert_code_patches_dict_sorted[ sp.addr][0] if highest_priority_at_addr != sp: highest_priority_at_addr.asm_code += "\n" + sp.asm_code + "\n" patches.remove(sp) #deal with AddLabel patches for patch in patches: if isinstance(patch, AddLabelPatch): self.name_map[patch.name] = patch.addr # check for duplicate labels, it is not very necessary for this backend # but it is better to behave in the same way of the reassembler backend relevant_patches = [ p for p in patches if isinstance(p, (AddCodePatch, InsertCodePatch)) ] all_code = "" for p in relevant_patches: if isinstance(p, InsertCodePatch): code = p.code else: code = p.asm_code all_code += "\n" + code + "\n" labels = utils.string_to_labels(all_code) duplicates = set(x for x in labels if labels.count(x) > 1) if len(duplicates) > 1: raise DuplicateLabelsException( "found duplicate assembly labels: %s" % (str(duplicates))) for patch in patches: if isinstance(patch, (ReplaceFunctionPatch, AddEntryPointPatch, AddSegmentHeaderPatch, SegmentHeaderPatch)): raise NotImplementedError() # 0) RawPatch: for patch in patches: if isinstance(patch, RawFilePatch): self.ncontent = utils.bytes_overwrite(self.ncontent, patch.data, patch.file_addr) self.added_patches.append(patch) l.info("Added patch: %s", str(patch)) for patch in patches: if isinstance(patch, RawMemPatch): self.patch_bin(patch.addr, patch.data) self.added_patches.append(patch) l.info("Added patch: %s", str(patch)) for patch in patches: if isinstance(patch, RemoveInstructionPatch): if patch.ins_size is None: size = 2 else: size = patch.ins_size self.patch_bin(patch.ins_addr, b"\x00\x00" * int( (size + 2 - 1) / 2)) self.added_patches.append(patch) l.info("Added patch: %s", str(patch)) # 1) Add{RO/RW/RWInit}DataPatch curr_data_position = self.name_map["ADDED_DATA_START"] for patch in patches: if isinstance( patch, (AddRWDataPatch, AddRODataPatch, AddRWInitDataPatch)): if hasattr(patch, "data"): final_patch_data = patch.data else: final_patch_data = b"\x00" * patch.len self.added_data += final_patch_data if patch.name is not None: self.name_map[patch.name] = curr_data_position curr_data_position += len(final_patch_data) self.added_patches.append(patch) l.info("Added patch: %s", str(patch)) if ((len(self.added_data) + self.added_data_file_start) % 2 == 1): self.added_data += b"\x00" self.ncontent = self.insert_bytes(self.ncontent, self.added_data, self.added_data_file_start) self.added_code_file_start = self.added_data_file_start + len( self.added_data) self.name_map.force_insert( "ADDED_CODE_START", self.added_code_file_start - (self.text_section_offset - self.text_section_addr)) # __do_copy_data # FIXME: not working properly if len(self.added_data) > 0: data_start = self.name_map["ADDED_DATA_START"] data_end = curr_data_position data_load_start = self.name_map["ADDED_CODE_START"] - len( self.added_data) data_start_hi8, data_start_lo8 = data_start >> 8, data_start & 0xFF data_end_hi8, data_end_lo8 = data_end >> 8, data_end & 0xFF data_load_start_hi8, data_load_start_lo8 = data_load_start >> 8, data_load_start & 0xFF do_copy_data_code = ''' ldi r17, %d ldi r26, %d ldi r27, %d ldi r30, %d ldi r31, %d rjmp +0x16 lpm r0, z+ st x+, r0 cpi r26, %d cpc r27, r17 brne 0x2 ''' % (data_end_hi8, data_start_lo8, data_start_hi8, data_load_start_lo8, data_load_start_hi8, data_end_lo8) # TODO: should not be hardcoded to 0x8c # we are assuming that 0x8c is end of orginal __do_copy_data and start of __do_clear_bss patches.insert( 0, InsertCodePatch(0x8c, code=do_copy_data_code, name="__do_copy_data", priority=1000)) # 2) AddCodePatch # resolving symbols current_symbol_pos = self.get_current_code_position() for patch in patches: if isinstance(patch, AddCodePatch): if patch.is_c: code_len = len( self.compile_c(patch.asm_code, optimization=patch.optimization, compiler_flags=patch.compiler_flags)) else: code_len = len( self.compile_asm(patch.asm_code, current_symbol_pos)) if patch.name is not None: self.name_map[patch.name] = current_symbol_pos current_symbol_pos += code_len # now compile for real self.added_code = b"" for patch in patches: if isinstance(patch, AddCodePatch): if patch.is_c: new_code = self.compile_c( patch.asm_code, optimization=patch.optimization, compiler_flags=patch.compiler_flags) else: new_code = self.compile_asm(patch.asm_code, self.name_map) self.added_code += new_code self.added_patches.append(patch) l.info("Added patch: %s", str(patch)) # 4) InlinePatch # we assume the patch never patches the added code for patch in patches: if isinstance(patch, InlinePatch): new_code = self.compile_asm(patch.new_asm, self.name_map) file_offset = self.project.loader.main_object.addr_to_offset( patch.instruction_addr) self.ncontent = utils.bytes_overwrite(self.ncontent, new_code, file_offset) self.added_patches.append(patch) l.info("Added patch: %s", str(patch)) # 5) InsertCodePatch # these patches specify an address in some basic block, In general we will move the basic block # and fix relative offsets # With this backend heer we can fail applying a patch, in case, resolve dependencies insert_code_patches = [ p for p in patches if isinstance(p, InsertCodePatch) ] insert_code_patches = sorted(insert_code_patches, key=lambda x: -1 * x.priority) applied_patches = [] while True: name_list = [ str(p) if (p is None or p.name is None) else p.name for p in applied_patches ] l.info("applied_patches is: |%s|", "-".join(name_list)) assert all(a == b for a, b in zip(applied_patches, insert_code_patches)) for patch in insert_code_patches[len(applied_patches):]: self.save_state(applied_patches) try: l.info("Trying to add patch: %s", str(patch)) if patch.name is not None: self.name_map[ patch.name] = self.get_current_code_position() new_code = self.insert_detour(patch) self.added_code += new_code applied_patches.append(patch) self.added_patches.append(patch) l.info("Added patch: %s", str(patch)) except (DetourException, MissingBlockException, DoubleDetourException) as e: l.warning(e) insert_code_patches, removed = self.handle_remove_patch( insert_code_patches, patch) #print map(str,removed) applied_patches = self.restore_state( applied_patches, removed) l.warning( "One patch failed, rolling back InsertCodePatch patches. Failed patch: %s", str(patch)) break # TODO: right now rollback goes back to 0 patches, we may want to go back less # the solution is to save touched_bytes and ncontent indexed by applied patfch # and go back to the biggest compatible list of patches else: break #at this point we applied everything in current insert_code_patches # TODO symbol name, for now no name_map for InsertCode patches self.ncontent = self.insert_bytes(self.ncontent, self.added_code, self.added_code_file_start) # Modifiy sections if needed if (len(self.added_data) + len(self.added_code) > 0): # update ELF header current_Ehdr = self.structs.Elf_Ehdr.parse(self.ncontent) current_Ehdr['e_shoff'] += len(self.added_code) + len( self.added_data) self.ncontent = utils.bytes_overwrite( self.ncontent, self.structs.Elf_Ehdr.build(current_Ehdr), 0) # update section headers current_Shdr_index = -1 for section in self.sections: current_Shdr_index += 1 current_Shdr = section.header if section.name == ".text": pass elif section.name == ".data": current_Shdr['sh_size'] += len(self.added_code) + len( self.added_data) current_Shdr['sh_addr'] = self.text_section_size else: current_Shdr['sh_offset'] += len(self.added_code) + len( self.added_data) self.ncontent = utils.bytes_overwrite( self.ncontent, self.structs.Elf_Shdr.build(current_Shdr), current_Ehdr['e_shoff'] + current_Ehdr['e_shentsize'] * current_Shdr_index)
def apply_patches(self, patches): # deal with stackable patches # add stackable patches to the one with highest priority insert_code_patches = [ p for p in patches if isinstance(p, InsertCodePatch) ] insert_code_patches_dict = defaultdict(list) for p in insert_code_patches: insert_code_patches_dict[p.addr].append(p) insert_code_patches_dict_sorted = defaultdict(list) for k, v in insert_code_patches_dict.items(): insert_code_patches_dict_sorted[k] = sorted( v, key=lambda x: -1 * x.priority) insert_code_patches_stackable = [ p for p in patches if isinstance(p, InsertCodePatch) and p.stackable ] for sp in insert_code_patches_stackable: assert len(sp.dependencies) == 0 if sp.addr in insert_code_patches_dict_sorted: highest_priority_at_addr = insert_code_patches_dict_sorted[ sp.addr][0] if highest_priority_at_addr != sp: highest_priority_at_addr.asm_code += "\n" + sp.asm_code + "\n" patches.remove(sp) #deal with AddLabel patches for patch in patches: if isinstance(patch, AddLabelPatch): self.name_map[patch.name] = patch.addr # check for duplicate labels, it is not very necessary for this backend # but it is better to behave in the same way of the reassembler backend relevant_patches = [ p for p in patches if (isinstance(p, (AddCodePatch, InsertCodePatch))) ] all_code = "" for p in relevant_patches: if isinstance(p, InsertCodePatch): code = p.code else: code = p.asm_code all_code += "\n" + code + "\n" labels = utils.string_to_labels(all_code) duplicates = set(x for x in labels if labels.count(x) > 1) if len(duplicates) > 1: raise DuplicateLabelsException( "found duplicate assembly labels: %s" % (str(duplicates))) for patch in patches: if isinstance(patch, (AddEntryPointPatch, AddSegmentHeaderPatch, SegmentHeaderPatch)): raise NotImplementedError() # 0) RawPatch: for patch in patches: if isinstance(patch, RawFilePatch): self.ncontent = utils.bytes_overwrite(self.ncontent, patch.data, patch.file_addr) self.added_patches.append(patch) l.info("Added patch: %s", str(patch)) for patch in patches: if isinstance(patch, RawMemPatch): self.patch_bin(patch.addr, patch.data) self.added_patches.append(patch) l.info("Added patch: %s", str(patch)) for patch in patches: if isinstance(patch, RemoveInstructionPatch): if patch.ins_size is None: ins = self.read_mem_from_file(patch.ins_addr, 4) size = self.disassemble(ins, 0, is_thumb=self.check_if_thumb( patch.ins_addr))[0].size else: size = patch.ins_size self.patch_bin( patch.ins_addr, b"\x00\xbf" * int( (size + 2 - 1) / 2) if self.check_if_thumb( patch.ins_addr) else b"\x00\xF0\x20\xE3" * int( (size + 4 - 1) / 4)) self.added_patches.append(patch) l.info("Added patch: %s", str(patch)) # 1) Add{RO/RW/RWInit}DataPatch curr_data_position = self.name_map["ADDED_DATA_START"] for patch in patches: if isinstance( patch, (AddRWDataPatch, AddRODataPatch, AddRWInitDataPatch)): if hasattr(patch, "data"): final_patch_data = patch.data else: final_patch_data = b"\x00" * patch.len self.added_data += final_patch_data if patch.name is not None: self.name_map[patch.name] = curr_data_position curr_data_position += len(final_patch_data) self.added_patches.append(patch) l.info("Added patch: %s", str(patch)) if ((len(self.added_data) + self.added_data_file_start) % 2 == 1): self.added_data += b"\x00" self.ncontent = self.insert_bytes(self.ncontent, self.added_data, self.added_data_file_start) self.added_code_file_start = self.added_data_file_start + len( self.added_data) self.name_map.force_insert( "ADDED_CODE_START", self.name_map['ADDED_DATA_START'] + len(self.added_data)) # 2) AddCodePatch # resolving symbols current_symbol_pos = self.get_current_code_position() for patch in patches: if isinstance(patch, AddCodePatch): if patch.is_c: code_len = len( self.compile_c(patch.asm_code, optimization=patch.optimization, compiler_flags=patch.compiler_flags, is_thumb=patch.is_thumb)) else: code_len = len( self.compile_asm(patch.asm_code, current_symbol_pos, is_thumb=patch.is_thumb)) if patch.name is not None: self.name_map[patch.name] = current_symbol_pos current_symbol_pos += code_len # now compile for real for patch in patches: if isinstance(patch, AddCodePatch): if patch.is_c: new_code = self.compile_c( patch.asm_code, optimization=patch.optimization, compiler_flags=patch.compiler_flags, is_thumb=patch.is_thumb) else: new_code = self.compile_asm( patch.asm_code, self.get_current_code_position(), self.name_map, is_thumb=patch.is_thumb) self.added_code += new_code self.added_patches.append(patch) l.info("Added patch: %s", str(patch)) # 4) InlinePatch # we assume the patch never patches the added code for patch in patches: if isinstance(patch, InlinePatch): new_code = self.compile_asm(patch.new_asm, patch.instruction_addr, self.name_map, is_thumb=self.check_if_thumb( patch.instruction_addr)) # Limiting the inline patch to a single block is not necessary # assert len(new_code) <= self.project.factory.block(patch.instruction_addr, num_inst=patch.num_instr, max_size=).size file_offset = self.project.loader.main_object.addr_to_offset( patch.instruction_addr) self.ncontent = utils.bytes_overwrite(self.ncontent, new_code, file_offset) self.added_patches.append(patch) l.info("Added patch: %s", str(patch)) # 5) InsertCodePatch # these patches specify an address in some basic block, In general we will move the basic block # and fix relative offsets # With this backend heer we can fail applying a patch, in case, resolve dependencies insert_code_patches = [ p for p in patches if isinstance(p, InsertCodePatch) ] insert_code_patches = sorted(insert_code_patches, key=lambda x: -1 * x.priority) applied_patches = [] while True: name_list = [ str(p) if (p is None or p.name is None) else p.name for p in applied_patches ] l.info("applied_patches is: |%s|", "-".join(name_list)) assert all(a == b for a, b in zip(applied_patches, insert_code_patches)) for patch in insert_code_patches[len(applied_patches):]: self.save_state(applied_patches) try: l.info("Trying to add patch: %s", str(patch)) if patch.name is not None: self.name_map[ patch.name] = self.get_current_code_position() new_code = self.insert_detour(patch) self.added_code += new_code applied_patches.append(patch) self.added_patches.append(patch) l.info("Added patch: %s", str(patch)) except (DetourException, MissingBlockException, DoubleDetourException) as e: l.warning(e) insert_code_patches, removed = self.handle_remove_patch( insert_code_patches, patch) #print map(str,removed) applied_patches = self.restore_state( applied_patches, removed) l.warning( "One patch failed, rolling back InsertCodePatch patches. Failed patch: %s", str(patch)) break # TODO: right now rollback goes back to 0 patches, we may want to go back less # the solution is to save touched_bytes and ncontent indexed by applied patfch # and go back to the biggest compatible list of patches else: break #at this point we applied everything in current insert_code_patches # TODO symbol name, for now no name_map for InsertCode patches # 5.5) ReplaceFunctionPatch for patch in patches: if isinstance(patch, ReplaceFunctionPatch): l.warning( "ReplaceFunctionPatch: ARM/Thumb interworking is not yet supported." ) is_thumb = self.check_if_thumb(patch.addr) patch.addr = patch.addr - (patch.addr % 2) new_code = self.compile_function( patch.asm_code, compiler_flags="-fPIE" if self.project.loader.main_object.pic else "", is_thumb=is_thumb, entry=patch.addr, symbols=patch.symbols) file_offset = self.project.loader.main_object.addr_to_offset( patch.addr) self.ncontent = utils.bytes_overwrite( self.ncontent, (b"\x00\xBF" * (patch.size // 2)) if is_thumb else (b"\x00\xF0\x20\xE3" * (patch.size // 4)), file_offset) if (patch.size >= len(new_code)): file_offset = self.project.loader.main_object.addr_to_offset( patch.addr) self.ncontent = utils.bytes_overwrite( self.ncontent, new_code, file_offset) else: detour_pos = self.get_current_code_position() offset = self.project.loader.main_object.mapped_base if self.project.loader.main_object.pic else 0 new_code = self.compile_function( patch.asm_code, compiler_flags="-fPIE" if self.project.loader.main_object.pic else "", is_thumb=is_thumb, entry=detour_pos + offset, symbols=patch.symbols) self.added_code += new_code # compile jmp jmp_code = self.compile_jmp(patch.addr, detour_pos + offset, is_thumb=is_thumb) self.patch_bin(patch.addr, jmp_code) self.added_patches.append(patch) l.info("Added patch: %s", str(patch)) self.ncontent = self.insert_bytes(self.ncontent, self.added_code, self.added_code_file_start) # Modifiy sections and 3rd LOAD segment if needed if (len(self.added_data) + len(self.added_code) > 0): # update ELF header current_Ehdr = self.structs.Elf_Ehdr.parse(self.ncontent) current_Ehdr['e_shoff'] += len(self.added_code) + len( self.added_data) self.ncontent = utils.bytes_overwrite( self.ncontent, self.structs.Elf_Ehdr.build(current_Ehdr), 0) # update section headers current_Shdr_index = -1 for section in self.sections: current_Shdr_index += 1 current_Shdr = section.header if current_Shdr['sh_offset'] >= self.added_data_file_start: current_Shdr['sh_offset'] += len(self.added_code) + len( self.added_data) elif section.name == ".data": current_Shdr['sh_size'] += len(self.added_code) + len( self.added_data) else: pass self.ncontent = utils.bytes_overwrite( self.ncontent, self.structs.Elf_Shdr.build(current_Shdr), current_Ehdr['e_shoff'] + current_Ehdr['e_shentsize'] * current_Shdr_index) # update 2nd & 3rd segment header current_Phdr = self.modded_segments[1] current_Phdr['p_filesz'] += len(self.added_code) + len( self.added_data) current_Phdr['p_memsz'] += len(self.added_code) + len( self.added_data) self.ncontent = utils.bytes_overwrite( self.ncontent, self.structs.Elf_Phdr.build(current_Phdr), current_Ehdr['e_phoff'] + current_Ehdr['e_phentsize'] * 1) current_Phdr = self.modded_segments[2] current_Phdr['p_offset'] += len(self.added_code) + len( self.added_data) current_Phdr['p_vaddr'] += len(self.added_code) + len( self.added_data) current_Phdr['p_paddr'] += len(self.added_code) + len( self.added_data) self.ncontent = utils.bytes_overwrite( self.ncontent, self.structs.Elf_Phdr.build(current_Phdr), current_Ehdr['e_phoff'] + current_Ehdr['e_phentsize'] * 2)