def dup_sec(sec, name=None, size=0): s = pefile.SectionStructure(pe.__IMAGE_SECTION_HEADER_format__, pe=pe) s.__unpack__(sec.__pack__()) if name: s.Name = name else: s.Name = (new_name(sec)).ljust(8, '\x00') # skip .reloc if necessary diff = get_sec_by_name( '.reloc').SizeOfRawData if '.reloc' in pe.sections[-1].Name else 0 s.VirtualAddress = get_last_section('rva') - align_sec(diff) s.PointerToRawData = get_last_section('fa') - align_file(diff) virtualsize = size if size else sec.Misc_VirtualSize * args.enlarge rawsize = size if size else sec.SizeOfRawData * args.enlarge s.Misc_VirtualSize = align_sec(virtualsize) s.SizeOfRawData = align_file(rawsize) diff = s.sizeof() if '.reloc' in pe.sections[-1].Name else 0 s.set_file_offset(get_last_section('tbl') - diff) s.next_section_virtual_address = s.VirtualAddress + s.Misc_VirtualSize assert s.get_file_offset() + 40 <= pe.OPTIONAL_HEADER.SizeOfHeaders, 'TODO' pe.__structures__.append(s) pe.sections.append(s) print('added section', s.Name.strip('\x00')) LOG(str(s)) return s
def pe_add_section(pe, data: bytes, name): file_alignment = pe.OPTIONAL_HEADER.FileAlignment section_alignment = pe.OPTIONAL_HEADER.SectionAlignment def align(value, alignment): return int(math.ceil(float(value) / alignment)) * alignment section_size = pe.sections[0].sizeof() section_offset = pe.OPTIONAL_HEADER.get_file_offset() + pe.FILE_HEADER.SizeOfOptionalHeader # First section section_offset += pe.FILE_HEADER.NumberOfSections * section_size # New section if (section_offset + section_size) >= pe.OPTIONAL_HEADER.SizeOfHeaders: raise ValueError('There is not enough free space for new section.') section = pefile.SectionStructure(pe.__IMAGE_SECTION_HEADER_format__, pe=pe) section.Name = name.encode() section.VirtualAddress = align(pe.sections[-1].Misc_VirtualSize + pe.sections[-1].VirtualAddress, section_alignment) section.Misc = section.Misc_VirtualSize = len(data) # Pad data to be aligned to FileAlignment. data += b'\0' * (align(len(data), file_alignment) - len(data)) # section.RawSize = len(data) section.PointerToRawData = pe.sections[-1].PointerToRawData + pe.sections[-1].SizeOfRawData section.SizeOfRawData = len(data) section.Characteristics = 0x40000040 # Readable | Initialized section.PointerToRelocations = 0 section.PointerToLinenumbers = 0 section.NumberOfRelocations = 0 section.NumberOfLinenumbers = 0 header_data = section.__pack__() # Add new section to PE header. pe.__data__ = pe.__data__[:section_offset] + header_data + pe.__data__[section_offset + len(header_data):] # Add section data to the end of file. pe.__data__ = pe.__data__[:section.PointerToRawData] + data + pe.__data__[section.PointerToRawData:] # pe.merge_modified_section_data() pe.sections.append(section) pe.FILE_HEADER.NumberOfSections += 1 pe.OPTIONAL_HEADER.SizeOfImage = align( pe.OPTIONAL_HEADER.SizeOfHeaders + pe.sections[-1].VirtualAddress + pe.sections[-1].Misc_VirtualSize, section_alignment ) pe.OPTIONAL_HEADER.SizeOfCode = 0 pe.OPTIONAL_HEADER.SizeOfInitializedData = 0 pe.OPTIONAL_HEADER.SizeOfUninitializedData = 0 # Recalculating the sizes by iterating over every section and checking if # the appropriate characteristics are set. for section in pe.sections: if section.Characteristics & 0x00000020: # Code pe.OPTIONAL_HEADER.SizeOfCode += section.SizeOfRawData elif section.Characteristics & 0x00000040: # Initialized data pe.OPTIONAL_HEADER.SizeOfInitializedData += section.SizeOfRawData elif section.Characteristics & 0x00000080: # Uninitialized data pe.OPTIONAL_HEADER.SizeOfUninitializedData += section.SizeOfRawData
def creation_HeaderSection(pe): new_section = pefile.SectionStructure(pe.__IMAGE_SECTION_HEADER_format__) number_sections = pe.FILE_HEADER.NumberOfSections - 1 last_section = pe.sections[number_sections] new_section.__unpack__(bytearray(new_section.sizeof())) new_section.set_file_offset( pe.sections[number_sections].get_file_offset() + 40) initialization_HeaderSection(pe, new_section, last_section)
def add_section(pe, name, size, characteristics=DEFAULT_CHARACTERISTICS): # Sanity checks if len(name) > SECTION_NAME: raise Exception('[!] Section name is too long') section_header_size = pefile.Structure( pefile.PE.__IMAGE_SECTION_HEADER_format__).sizeof() section_header_off = pe.sections[-1].get_file_offset( ) + section_header_size if section_header_off + section_header_size > pe.OPTIONAL_HEADER.SizeOfHeaders: raise Exception('[!] Not enough room for another SECTION_HEADER') # Calculate/Align sizes virtual_size = align_up(size, pe.OPTIONAL_HEADER.SectionAlignment) virtual_addr = align_up( pe.sections[-1].VirtualAddress + pe.sections[-1].Misc_VirtualSize, pe.OPTIONAL_HEADER.SectionAlignment) raw_size = align_up(size, pe.OPTIONAL_HEADER.FileAlignment) raw_ptr = align_up( pe.sections[-1].PointerToRawData + pe.sections[-1].SizeOfRawData, pe.OPTIONAL_HEADER.FileAlignment) # Configure section properties section = pefile.SectionStructure(pe.__IMAGE_SECTION_HEADER_format__, pe=pe) section.set_file_offset(section_header_off) section.Name = name.encode().ljust(SECTION_NAME, b'\x00') section.VirtualAddress = virtual_addr section.PointerToRawData = raw_ptr section.Misc = section.Misc_VirtualSize = virtual_size section.SizeOfRawData = raw_size section.Characteristics = characteristics section.PointerToRelocations = 0 section.NumberOfRelocations = 0 section.NumberOfLinenumbers = 0 section.PointerToLinenumbers = 0 # Correct headers pe.FILE_HEADER.NumberOfSections += 1 pe.OPTIONAL_HEADER.SizeOfImage = virtual_addr + virtual_size # Add buffer padding pe.__data__ += b'\x00' * raw_size # Append to ensure overwrite pe.__structures__.append(section) # Recreate to save our changes pe = pefile.PE(data=pe.write()) return pe, section
def add_new_section(pe): section = pefile.SectionStructure(pe.__IMAGE_SECTION_HEADER_format__) section_offset = pe.sections[-1].get_file_offset() + section.sizeof() section.set_file_offset(section_offset) section.__unpack__('\0'*section.sizeof()) pe.__structures__.append(section) # Change the section's properties # section.Misc_VirtualSize = 0x1000 section.VirtualAddress = 0x9000 section.SizeOfRawData = 0 section.PointerToRawData = 0 section.Name = '.nzight' pe.FILE_HEADER.NumberOfSections += 1
b"\x61\x31\xdb\x88\x5c\x24\x0f\x89\xe3\x68\x65\x58\x20" b"\x20\x68\x20\x63\x6f\x64\x68\x6e\x20\x75\x72\x68\x27" b"\x6d\x20\x69\x68\x6f\x2c\x20\x49\x68\x48\x65\x6c\x6c" b"\x31\xc9\x88\x4c\x24\x15\x89\xe1\x31\xd2\x6a\x40\x53" b"\x51\x52\xff\xd0\xB8\x96\xFE\x46\x00\xFF\xD0") def adjust_SectionSize(size, align): if size % align: size = ((size + align) // align) * align return size pe = pefile.PE('putty.exe') new_section = pefile.SectionStructure(pe.__IMAGE_SECTION_HEADER_format__) number_sections = pe.FILE_HEADER.NumberOfSections - 1 last_section = pe.sections[number_sections] new_section.__unpack__(bytearray(new_section.sizeof())) new_section.set_file_offset(pe.sections[number_sections].get_file_offset() + 40) new_section.Name = b'.ESGI' new_section.SizeOfRawData = adjust_SectionSize( 0x1000, pe.OPTIONAL_HEADER.FileAlignment) new_section.Misc_VirtualSize = 0x1000 new_section.Misc = 0x1000 new_section.Misc_PhysicalAddress = 0x1000 new_section.VirtualAddress = last_section.VirtualAddress + \ adjust_SectionSize(last_section.Misc_VirtualSize,
def pe_patch(pe): writable_sections = [] for section in pe.sections: if section.Name == b'.restore': raise RuntimeError( 'the .restore section is already present in the file') if section.Characteristics & IMAGE_SCN_MEM_DISCARDABLE: section.Characteristics ^= IMAGE_SCN_MEM_DISCARDABLE if section.Characteristics & IMAGE_SCN_MEM_WRITE and section.SizeOfRawData: writable_sections.append(section) if not writable_sections: raise RuntimeError('no writable sections were found') first_raw_section = sorted([s for s in pe.sections if s.SizeOfRawData], key=lambda s: s.PointerToRawData)[0] last_raw_section = next( reversed( sorted([s for s in pe.sections if s.SizeOfRawData], key=lambda s: s.PointerToRawData)) ) # as the data is ordered in the file last_vir_section = next( reversed( sorted([s for s in pe.sections if s.SizeOfRawData], key=lambda s: s.VirtualAddress)) ) # as the data is ordered in memory backup_section = pefile.SectionStructure( pefile.PE.__IMAGE_SECTION_HEADER_format__, file_offset=pe.sections[-1].get_file_offset() + SIZEOF_SECTION_HEADER, pe=pe) backup_section.Name = b'.restore' backup_section.Misc = 0 backup_section.VirtualAddress = align_up( last_vir_section.VirtualAddress + last_vir_section.SizeOfRawData, pe.OPTIONAL_HEADER.SectionAlignment) backup_section.SizeOfRawData = 0 backup_section.PointerToRawData = last_raw_section.PointerToRawData + last_raw_section.SizeOfRawData backup_section.PointerToRelocations = 0 backup_section.PointerToLinenumbers = 0 backup_section.NumberOfRelocations = 0 backup_section.NumberOfLinenumbers = 0 backup_section.Characteristics = (IMAGE_SCN_MEM_READ | IMAGE_SCN_CNT_INITLIALIZED_DATA) if last_raw_section.__file_offset__ + ( SIZEOF_SECTION_HEADER * 2) > first_raw_section.PointerToRawData: offset = align_up( last_raw_section.__file_offset__ + (SIZEOF_SECTION_HEADER * 2), pe. OPTIONAL_HEADER.FileAlignment) - first_raw_section.PointerToRawData pe.OPTIONAL_HEADER.SizeOfImage += SIZEOF_SECTION_HEADER # need to insert data here to make room for the new section, then update # each section raise RuntimeError('insufficient space for the new section header') pe.sections.append(backup_section) pe.__structures__.append(backup_section) pe.FILE_HEADER.NumberOfSections += 1 new_section = bytearray() for section_idx, section in enumerate( writable_sections, -1 ): # start at -1 to account for the null-terminator section header section = copy.copy(section) section.Characteristics ^= IMAGE_SCN_MEM_WRITE section.PointerToRawData = backup_section.PointerToRawData + ( (len(writable_sections) - section_idx) * SIZEOF_SECTION_HEADER) new_section += section.__pack__() new_section += bytearray( SIZEOF_SECTION_HEADER) # add the null-terminator section header for section in writable_sections: new_section += section.get_data() backup_section.Misc = len(new_section) new_section += bytearray( int( align_up(len(new_section), pe.OPTIONAL_HEADER.FileAlignment) - len(new_section))) pe_insert(pe, new_section, backup_section.PointerToRawData) backup_section.SizeOfRawData = len(new_section) pe.OPTIONAL_HEADER.SizeOfImage = align_up( pe.OPTIONAL_HEADER.SizeOfImage + len(new_section), pe.OPTIONAL_HEADER.SectionAlignment) return pe
def dump(self): """Dump PE file""" pe = self.pe # Some samples might mess with the PE headers after loading, attempt to fix! if self.size_of_optional_header != 0: if self.size_of_optional_header != pe.FILE_HEADER.SizeOfOptionalHeader: pe.FILE_HEADER.SizeOfOptionalHeader = self.size_of_optional_header pe = pefile.PE(data=pe.write()) ep = pe.OPTIONAL_HEADER.AddressOfEntryPoint if self.new_ep == 0 else self.new_ep # Fix section pointers and sizes for section in pe.sections: if pe_tree.form.HAVE_IDA or pe_tree.form.HAVE_GHIDRA: section.PointerToRawData = section.VirtualAddress section.SizeOfRawData = section.Misc_VirtualSize # Ensure entry-point section is executable if section.contains_offset(ep): section.Characteristics |= pefile.SECTION_CHARACTERISTICS[ "IMAGE_SCN_MEM_EXECUTE"] # Is the segment in which the entry-point resides writable? (likely from a packer) if self.runtime.is_writable(self.image_base + ep): # Make the entry-point section writable section.Characteristics |= pefile.SECTION_CHARACTERISTICS[ "IMAGE_SCN_MEM_WRITE"] # Find IAT pointers if self.find_iat_ptrs: self.iat_ptrs = self.runtime.find_iat_ptrs(pe, self.image_base, self.size, self.get_word) # Found anything? if len(self.iat_ptrs) == 0 and not self.keep_iat_ptrs: # Attempt to use existing IAT if specified/present iat_rva = self.iat_rva iat_size = self.iat_size # IAT RVA specified? if iat_rva == 0 or iat_size == 0: # Use IAT RVA from DATA_DIRECTORY iat_rva = pe.OPTIONAL_HEADER.DATA_DIRECTORY[ pefile.DIRECTORY_ENTRY[ "IMAGE_DIRECTORY_ENTRY_IAT"]].VirtualAddress iat_size = pe.OPTIONAL_HEADER.DATA_DIRECTORY[ pefile.DIRECTORY_ENTRY["IMAGE_DIRECTORY_ENTRY_IAT"]].Size if iat_rva != 0 and iat_size != 0: # Read IAT iat_data = self.runtime.get_bytes(self.image_base + iat_rva, iat_size) if len(iat_data) == iat_size: last_module = "" next_module = None next_api = None # Resolve IAT pointers for i in range(0, iat_size, self.ptr_size): # Get IAT pointer iat_ptr = struct.unpack( self.ptr_format, iat_data[i:i + self.ptr_size])[0] # Last entry for module? if iat_ptr == 0: if i < iat_size - self.ptr_size: j = i + self.ptr_size next_iat_ptr = struct.unpack( self.ptr_format, iat_data[j:j + self.ptr_size])[0] # Get the next module name next_module, next_api = self.runtime.resolve_address( next_iat_ptr) next_module = next_module.lower().replace( "kernelbase", "kernel32") # Does the module after the null match our current module? if next_module == last_module or next_iat_ptr == 0: # Insert a dummy "nullptr" import to keep things nicely aligned self.iat_ptrs.append( (iat_ptr, i, last_module, "nullptr")) continue last_module = "" next_module = None continue # Add IAT pointer to list if not next_module or not next_api: module, api = self.runtime.resolve_address(iat_ptr) module = module.lower().replace( "kernelbase", "kernel32") else: module, api = next_module, next_api next_module, next_api = None, None # After imports are loaded they may well have been forwarded to other modules if last_module == "": # Use a common module name when resolving imports in-place, otherwise the IAT/IDT will not be in sync last_module = module self.iat_ptrs.append((iat_ptr, 0, last_module, api)) else: # Create empty IAT pointer table iat_data = len(self.iat_ptrs) * (b"\x00" * self.ptr_size) if len(self.iat_ptrs) > 0: # Build list of modules/API names modules = [] for iat_ptr, offset, module, api in self.iat_ptrs: if len(modules) == 0: modules.append({ "name": module, "apis": [], "offset": offset }) if self.find_iat_ptrs: # Building our own IAT we can combine all modules/APIs found_module = False for _module in modules: if _module["name"] == module: if api not in _module["apis"]: _module["apis"].append(api) found_module = True break if not found_module: modules.append({ "name": module, "apis": [api], "offset": offset }) else: # Using an existing IAT we need to keep modules/APIs in order prev_module = modules[-1] if prev_module["name"] == module: prev_module["apis"].append(api) else: modules.append({ "name": module, "apis": [api], "offset": offset }) # Construct hint/name table name_table = bytearray() for module in modules: # Add module name and align name_table += module["name"].encode() + b"\0" name_table += (len(name_table) % 2) * b"\0" name_table += b"\0\0" for api in module["apis"]: # Add hint and API name and align name_table += b"\0\0" + api.encode() + b"\0" name_table += (len(name_table) % 2) * b"\0" # Construct IDT import_rva = self.align((pe.sections[-1].VirtualAddress + pe.sections[-1].Misc_VirtualSize), pe.OPTIONAL_HEADER.SectionAlignment) idt_data = bytearray() name_table_offset = 0 name_table_len = len(name_table) name_to_iat = {} iat_index = 0 if self.find_iat_ptrs: # IAT will be at the end of the image in a new section iat_base = import_rva else: # IAT remains wherever it is in the image iat_base = iat_rva for module in modules: #iat_index = module["offset"] # Add descriptor - OriginalFirstThunk, TimeDateStamp, ForwarderChain, Name, FirstThunk idt_data += struct.pack( "<LLLLL", import_rva + len(iat_data) + name_table_len + iat_index, 0, 0, import_rva + len(iat_data) + name_table.find(module["name"].encode()), iat_base + iat_index) name_table_offset += len(module["name"].encode() + b"\0") name_table_offset += name_table_offset % 2 name_table_offset += 2 for api in module["apis"]: # Add name table offset name_table += struct.pack( self.ptr_format, import_rva + len(iat_data) + name_table_offset) name_table_offset += len(api.encode()) + 1 name_table_offset += name_table_offset % 2 name_table_offset += 2 name_to_iat["{}!{}".format(module["name"], api)] = iat_base + iat_index iat_index += self.ptr_size name_table += struct.pack(self.ptr_format, 0) pe.set_dword_at_offset(iat_base + iat_index, 0) iat_index += self.ptr_size # Construct import section data - IAT + Name table + IDT import_data = iat_data + name_table + idt_data # Patch addresses to point to new IAT patched = [] for iat_ptr, offset, module, api in self.iat_ptrs: if offset and offset not in patched: # Update IAT pointer in PE self.set_word( offset - self.image_base, self.image_base + name_to_iat["{}!{}".format(module, api)]) patched.append(offset) # Update imports and IAT directory entries pe.OPTIONAL_HEADER.DATA_DIRECTORY[pefile.DIRECTORY_ENTRY[ "IMAGE_DIRECTORY_ENTRY_IMPORT"]].VirtualAddress = import_rva + len( iat_data) + len(name_table) pe.OPTIONAL_HEADER.DATA_DIRECTORY[pefile.DIRECTORY_ENTRY[ "IMAGE_DIRECTORY_ENTRY_IMPORT"]].Size = len(modules) * 20 pe.OPTIONAL_HEADER.DATA_DIRECTORY[pefile.DIRECTORY_ENTRY[ "IMAGE_DIRECTORY_ENTRY_IAT"]].VirtualAddress = iat_base pe.OPTIONAL_HEADER.DATA_DIRECTORY[pefile.DIRECTORY_ENTRY[ "IMAGE_DIRECTORY_ENTRY_IAT"]].Size = len(iat_data) else: if not self.keep_iat_ptrs: pe.OPTIONAL_HEADER.DATA_DIRECTORY[pefile.DIRECTORY_ENTRY[ "IMAGE_DIRECTORY_ENTRY_IMPORT"]].VirtualAddress = 0 pe.OPTIONAL_HEADER.DATA_DIRECTORY[pefile.DIRECTORY_ENTRY[ "IMAGE_DIRECTORY_ENTRY_IMPORT"]].Size = 0 pe.OPTIONAL_HEADER.DATA_DIRECTORY[pefile.DIRECTORY_ENTRY[ "IMAGE_DIRECTORY_ENTRY_IAT"]].VirtualAddress = 0 pe.OPTIONAL_HEADER.DATA_DIRECTORY[pefile.DIRECTORY_ENTRY[ "IMAGE_DIRECTORY_ENTRY_IAT"]].Size = 0 # Update image base and entry-point if self.new_image_base != 0: pe.OPTIONAL_HEADER.ImageBase = self.new_image_base else: pe.OPTIONAL_HEADER.ImageBase = self.image_base if self.new_ep != 0: pe.OPTIONAL_HEADER.AddressOfEntryPoint = self.new_ep # Image cannot be rebased pe.OPTIONAL_HEADER.DllCharacteristics &= ~pefile.DLL_CHARACTERISTICS[ "IMAGE_DLLCHARACTERISTICS_DYNAMIC_BASE"] # Remove base relocs, bound imports and security directories, as they are somewhat meaningless now pe.OPTIONAL_HEADER.DATA_DIRECTORY[pefile.DIRECTORY_ENTRY[ "IMAGE_DIRECTORY_ENTRY_BASERELOC"]].VirtualAddress = 0 pe.OPTIONAL_HEADER.DATA_DIRECTORY[ pefile.DIRECTORY_ENTRY["IMAGE_DIRECTORY_ENTRY_BASERELOC"]].Size = 0 pe.OPTIONAL_HEADER.DATA_DIRECTORY[pefile.DIRECTORY_ENTRY[ "IMAGE_DIRECTORY_ENTRY_BOUND_IMPORT"]].VirtualAddress = 0 pe.OPTIONAL_HEADER.DATA_DIRECTORY[pefile.DIRECTORY_ENTRY[ "IMAGE_DIRECTORY_ENTRY_BOUND_IMPORT"]].Size = 0 pe.OPTIONAL_HEADER.DATA_DIRECTORY[pefile.DIRECTORY_ENTRY[ "IMAGE_DIRECTORY_ENTRY_SECURITY"]].VirtualAddress = 0 pe.OPTIONAL_HEADER.DATA_DIRECTORY[ pefile.DIRECTORY_ENTRY["IMAGE_DIRECTORY_ENTRY_SECURITY"]].Size = 0 overlay = pe.get_overlay() overlay = overlay if overlay is not None else bytearray() if len(self.iat_ptrs) > 0: # Add new .idata section to hold IAT + IDT + hint name table pe_data = pe.trim() # Create empty section section = pefile.SectionStructure( pe.__IMAGE_SECTION_HEADER_format__) section.__unpack__(bytearray(section.sizeof())) section.set_file_offset(pe.sections[-1].get_file_offset() + pe.sections[-1].sizeof()) # Fill in section details section.Name = b".pe_tree" section.Misc_VirtualSize = len(import_data) section.VirtualAddress = pe.sections[ -1].VirtualAddress + self.align( pe.sections[-1].Misc_VirtualSize, pe.OPTIONAL_HEADER.SectionAlignment) section.SizeOfRawData = self.align( len(import_data), pe.OPTIONAL_HEADER.FileAlignment) section.PointerToRawData = self.align( len(pe_data), pe.OPTIONAL_HEADER.FileAlignment) section.Characteristics = pefile.SECTION_CHARACTERISTICS[ "IMAGE_SCN_CNT_INITIALIZED_DATA"] | pefile.SECTION_CHARACTERISTICS[ "IMAGE_SCN_MEM_READ"] | pefile.SECTION_CHARACTERISTICS[ "IMAGE_SCN_MEM_EXECUTE"] # Update PE headers pe.FILE_HEADER.NumberOfSections += 1 pe.OPTIONAL_HEADER.SizeOfImage += section.VirtualAddress + self.align( len(import_data), pe.OPTIONAL_HEADER.SectionAlignment) # Append import data pe_data += ( self.align(len(pe_data), pe.OPTIONAL_HEADER.FileAlignment) - len(pe_data)) * b"\0" pe_data += import_data pe_data += ( self.align(len(pe_data), pe.OPTIONAL_HEADER.FileAlignment) - len(pe_data)) * b"\0" pe.__data__ = pe_data # Add section to pefile pe.sections.append(section) pe.__structures__.append(section) # Recalculate PE checksum if enabled (warning, very slow!) if self.recalculate_pe_checksum: pe.OPTIONAL_HEADER.CheckSum = pe.generate_checksum() # Put any overlay back pe.__data__ += overlay # Return data if sys.version_info > (3, ): return pe.write() return "".join(map(chr, pe.write()))
def add_section(pe, name, data, characteristics=0xE0000020): # READ | WRITE | EXEC | CODE file_alignment = pe.OPTIONAL_HEADER.FileAlignment section_alignment = pe.OPTIONAL_HEADER.SectionAlignment # Remove bound import # XXX: only remove bound import when there's no space for section header for directory in pe.OPTIONAL_HEADER.DATA_DIRECTORY: if directory.name == "IMAGE_DIRECTORY_ENTRY_BOUND_IMPORT" and directory.Size: # FIXME: remove structs pe.set_bytes_at_rva(directory.VirtualAddress, "\x00" * directory.Size) directory.Size = 0 directory.VirtualAddress = 0 print("[!] Bound Import removed") # Check is there enough space for a new section header? section_header_size = pe.sections[0].sizeof() section_header_end = pe.sections[-1].get_file_offset( ) + pe.sections[-1].sizeof() if (section_header_end + section_header_size) > pe.OPTIONAL_HEADER.SizeOfHeaders: raise Exception("No enough space for new section header") # New section header section = pefile.SectionStructure(pe.__IMAGE_SECTION_HEADER_format__, pe=pe) section.set_file_offset(section_header_end) section.Name = name.ljust(8, b"\0") section.Misc = section.Misc_PhysicalAddress = section.Misc_VirtualSize = len( data) section.VirtualAddress = align( pe.sections[-1].VirtualAddress + pe.sections[-1].Misc_VirtualSize, section_alignment) section.SizeOfRawData = align(len(data), file_alignment) section.PointerToRawData = pe.sections[-1].PointerToRawData + pe.sections[ -1].SizeOfRawData section.PointerToRelocations = 0 section.PointerToLinenumbers = 0 section.NumberOfRelocations = 0 section.NumberOfLinenumbers = 0 section.Characteristics = characteristics section.next_section_virtual_address = None pe.sections[-1].next_section_virtual_address = section.VirtualAddress # Add new section header pe.sections.append(section) pe.merge_modified_section_data() pe.__structures__.append(section) pe.FILE_HEADER.NumberOfSections += 1 # Append section data pe.__data__ = (pe.__data__[:section.PointerToRawData].ljust( section.PointerToRawData, b"\x00") + data.ljust(align(len(data), file_alignment), b"\x00") + pe.__data__[section.PointerToRawData:]) # Update SizeOfImage pe.OPTIONAL_HEADER.SizeOfImage = align( pe.sections[-1].VirtualAddress + pe.sections[-1].Misc_VirtualSize, section_alignment) if section.Characteristics & 0x00000020: pe.OPTIONAL_HEADER.SizeOfCode += section.SizeOfRawData if section.Characteristics & 0x00000040: pe.OPTIONAL_HEADER.SizeOfInitializedData += section.SizeOfRawData if section.Characteristics & 0x00000080: pe.OPTIONAL_HEADER.SizeOfUninitializedData += section.SizeOfRawData return pe.sections[-1]
def hash_pe_file(filename, data=None, pe=None, json_dumps=True): """Calculate PE file hashes. Either call directly or invoke via processpool:: processpool = multiprocessing.Pool(10) hashes = json.loads(processpool.apply_async(pe_tree.hash_pe.hash_pe_file, (filename,)).get()) Args: filename (str): Path to file to hash (or specify via data) data (bytes, optional): PE file data pe (pefile.PE, optional): Parsed PE file json_dumps (bool, optional): Return data as JSON Returns: dict: PE file hashes if json_dumps == False str: JSON PE file hashes if json_dumps == True """ if pe is None: pe = pefile.PE(filename) # Calculate entropy (use pefile implementation!) entropy_H = pefile.SectionStructure(pe.__IMAGE_SECTION_HEADER_format__, pe=pe).entropy_H file_hashes = { "file": { "md5": "", "sha1": "", "sha256": "", "entropy": 0.0, "size": 0 }, "file_no_overlay": { "md5": "", "sha1": "", "sha256": "", "entropy": 0.0, "size": 0 }, "dos_stub": { "md5": "", "sha1": "", "sha256": "", "entropy": 0.0, "size": 0 }, "sections": [], "resources": [], "security_directory": { "md5": "", "sha1": "", "sha256": "", "entropy": 0.0, "size": 0 }, "overlay": { "md5": "", "sha1": "", "sha256": "", "entropy": 0.0, "size": 0 } } if not data: with open(filename, "rb") as f: data = f.read() # Hash entire file file_hashes["file"] = hash_data(data, entropy_H) # Hash DOS stub if pe.DOS_HEADER.e_lfanew > 64: file_hashes["dos_stub"] = hash_data(data[64:pe.DOS_HEADER.e_lfanew], entropy_H) # Hash sections for section in pe.sections: file_hashes["sections"].append({ "md5": section.get_hash_md5(), "sha256": section.get_hash_sha256(), "entropy": section.get_entropy() }) # Hash resources if hasattr(pe, "DIRECTORY_ENTRY_RESOURCE"): mapped_data = pe.get_memory_mapped_image() for resource_type in pe.DIRECTORY_ENTRY_RESOURCE.entries: if not hasattr(resource_type, "directory"): continue for resource_id in resource_type.directory.entries: if not hasattr(resource_id, "directory"): continue for resource_language in resource_id.directory.entries: if not hasattr(resource_language, "data"): continue offset = resource_language.data.struct.OffsetToData size = resource_language.data.struct.Size try: resource_data = mapped_data[offset:offset + size] except: resource_data = "" file_hashes["resources"].append( hash_data(resource_data, entropy_H)) overlay_offset = pe.get_overlay_data_start_offset() if overlay_offset: overlay_data = pe.get_overlay() security = pe.OPTIONAL_HEADER.DATA_DIRECTORY[ pefile.DIRECTORY_ENTRY["IMAGE_DIRECTORY_ENTRY_SECURITY"]] if security.VirtualAddress != 0 and security.Size != 0: size = min(security.Size, len(overlay_data)) # Hash security directory file_hashes["security_directory"] = hash_data( overlay_data[:size], entropy_H) overlay_data = overlay_data[size:] overlay_offset += size # Hash overlay file_hashes["overlay"] = hash_data(overlay_data, entropy_H) file_hashes["file_no_overlay"] = hash_data(data[overlay_offset:], entropy_H) # Return JSON if json_dumps: return json.dumps(file_hashes) # Return dict return file_hashes