def __decode_blp1(self): header = StringIO(self.fp.read(28 + 16 * 4 + 16 * 4)) magic, compression = unpack("<4si", header.read(8)) encoding, alphaDepth, alphaEncoding, hasMips = unpack( "<4b", header.read(4)) self.size = unpack("<II", header.read(8)) encoding, subtype = unpack("<ii", header.read(8)) offsets = unpack("<16I", header.read(16 * 4)) lengths = unpack("<16I", header.read(16 * 4)) if compression == 0: from PIL.JpegImagePlugin import JpegImageFile jpegHeaderSize, = unpack("<I", self.fp.read(4)) jpegHeader = self.fp.read(jpegHeaderSize) extraData = self.fp.read(offsets[0] - self.fp.tell()) # What IS this? data = self.fp.read(lengths[0]) data = jpegHeader + data data = StringIO(data) image = JpegImageFile(data) image.show() raw_input() self.tile = image.tile # PIL is terrible self.fp = image.fp self.mode = image.mode return if compression == 1: if encoding in (3, 4): raise NotImplementedError elif encoding == 5: data = [] palette_data = self.fp.read(256 * 4) palette = getpalette(palette_data) _data = StringIO(self.fp.read(lengths[0])) self.mode = "RGB" self.tile = [] while True: try: offset, = unpack("<B", _data.read(1)) except StructError: break b, g, r, a = palette[offset] data.append(pack("<BBB", r, g, b)) data = "".join(data) self.im = Image.core.new(self.mode, self.size) self.fromstring(data) return raise ValueError("Expected encoding 3, 4 or 5, got %i instead" % (encoding)) raise ValueError("Expected compression 0 or 1, got %i instead" % (compression))
def __decode_blp1(self): header = StringIO(self.fp.read(28 + 16*4 + 16*4)) magic, compression = unpack("<4si", header.read(8)) encoding, alphaDepth, alphaEncoding, hasMips = unpack("<4b", header.read(4)) self.size = unpack("<II", header.read(8)) encoding, subtype = unpack("<ii", header.read(8)) offsets = unpack("<16I", header.read(16*4)) lengths = unpack("<16I", header.read(16*4)) if compression == 0: from PIL.JpegImagePlugin import JpegImageFile jpegHeaderSize, = unpack("<I", self.fp.read(4)) jpegHeader = self.fp.read(jpegHeaderSize) extraData = self.fp.read(offsets[0] - self.fp.tell()) # What IS this? data = self.fp.read(lengths[0]) data = jpegHeader + data data = StringIO(data) image = JpegImageFile(data) image.show() raw_input() self.tile = image.tile # PIL is terrible self.fp = image.fp self.mode = image.mode return if compression == 1: if encoding in (3, 4): raise NotImplementedError elif encoding == 5: data = [] palette_data = self.fp.read(256*4) palette = getpalette(palette_data) _data = StringIO(self.fp.read(lengths[0])) self.mode = "RGB" self.tile = [] while True: try: offset, = unpack("<B", _data.read(1)) except StructError: break b, g, r, a = palette[offset] data.append(pack("<BBB", r, g, b)) data = "".join(data) self.im = Image.core.new(self.mode, self.size) self.fromstring(data) return raise ValueError("Expected encoding 3, 4 or 5, got %i instead" % (encoding)) raise ValueError("Expected compression 0 or 1, got %i instead" % (compression))
def get_pe_info(self, lcid): """Dumps the PE header as Results in the FileResult.""" # PE Header pe_header_res = ResultSection(SCORE['NULL'], "PE: HEADER") # PE Header: Header Info pe_header_info_res = ResultSection(SCORE.NULL, "[HEADER INFO]", parent=pe_header_res) pe_header_info_res.add_line("Entry point address: 0x%08X" % self.pe_file.OPTIONAL_HEADER.AddressOfEntryPoint) pe_header_info_res.add_line("Linker Version: %02d.%02d" % (self.pe_file.OPTIONAL_HEADER.MajorLinkerVersion, self.pe_file.OPTIONAL_HEADER.MinorLinkerVersion)) pe_header_info_res.add_line("OS Version: %02d.%02d" % (self.pe_file.OPTIONAL_HEADER.MajorOperatingSystemVersion, self.pe_file.OPTIONAL_HEADER.MinorOperatingSystemVersion)) pe_header_info_res.add_line(["Time Date Stamp: %s (" % time.ctime(self.pe_file.FILE_HEADER.TimeDateStamp), res_txt_tag(str(self.pe_file.FILE_HEADER.TimeDateStamp), TAG_TYPE['PE_LINK_TIME_STAMP']), ")"]) try: pe_header_info_res.add_line("Machine Type: %s (%s)" % ( hex(self.pe_file.FILE_HEADER.Machine), pefile.MACHINE_TYPE[self.pe_file.FILE_HEADER.Machine])) except KeyError: pass # PE Header: Rich Header # noinspection PyBroadException try: if self.pe_file.RICH_HEADER is not None: pe_rich_header_info = ResultSection(SCORE.NULL, "[RICH HEADER INFO]", parent=pe_header_res) values_list = self.pe_file.RICH_HEADER.values pe_rich_header_info.add_line("VC++ tools used:") for i in range(0, len(values_list) / 2): line = "Tool Id: %3d Version: %6d Times used: %3d" % ( values_list[2 * i] >> 16, values_list[2 * i] & 0xFFFF, values_list[2 * i + 1]) pe_rich_header_info.add_line(line) except: self.log.exception("Unable to parse PE Rich Header") # PE Header: Data Directories pe_dd_res = ResultSection(SCORE.NULL, "[DATA DIRECTORY]", parent=pe_header_res) for data_directory in self.pe_file.OPTIONAL_HEADER.DATA_DIRECTORY: if data_directory.Size or data_directory.VirtualAddress: pe_dd_res.add_line("%s - va: 0x%08X - size: 0x%08X" % (data_directory.name[len("IMAGE_DIRECTORY_ENTRY_"):], data_directory.VirtualAddress, data_directory.Size)) # PE Header: Sections pe_sec_res = ResultSection(SCORE.NULL, "[SECTIONS]", parent=pe_header_res) self._init_section_list() try: for (sname, section, sec_md5, sec_entropy) in self._sect_list: txt = [sname, " - Virtual: 0x%08X (0x%08X bytes)" " - Physical: 0x%08X (0x%08X bytes) - " % (section.VirtualAddress, section.Misc_VirtualSize, section.PointerToRawData, section.SizeOfRawData), "hash:", res_txt_tag(sec_md5, TAG_TYPE['PE_SECTION_HASH']), " - entropy:%f (min:0.0, Max=8.0)" % sec_entropy] # add a search tag for the Section Hash make_tag(self.file_res, 'PE_SECTION_HASH', sec_md5, 'HIGH', usage='CORRELATION') pe_sec_res.add_line(txt) except AttributeError: pass self.file_res.add_section(pe_header_res) # debug try: if self.pe_file.DebugTimeDateStamp: pe_debug_res = ResultSection(SCORE['NULL'], "PE: DEBUG") self.file_res.add_section(pe_debug_res) pe_debug_res.add_line("Time Date Stamp: %s" % time.ctime(self.pe_file.DebugTimeDateStamp)) # When it is a unicode, we know we are coming from RSDS which is UTF-8 # otherwise, we come from NB10 and we need to guess the charset. if type(self.pe_file.pdb_filename) != unicode: char_enc_guessed = translate_str(self.pe_file.pdb_filename) pdb_filename = char_enc_guessed['converted'] else: char_enc_guessed = {'confidence': 1.0, 'encoding': 'utf-8'} pdb_filename = self.pe_file.pdb_filename pe_debug_res.add_line(["PDB: '", res_txt_tag_charset(pdb_filename, TAG_TYPE['PE_PDB_FILENAME'], char_enc_guessed['encoding'], char_enc_guessed['confidence']), "'"]) # self.log.debug(u"\tPDB: %s" % pdb_filename) except AttributeError: pass # imports try: if hasattr(self.pe_file, 'DIRECTORY_ENTRY_IMPORT') and len(self.pe_file.DIRECTORY_ENTRY_IMPORT) > 0: pe_import_res = ResultSection(SCORE['NULL'], "PE: IMPORTS") self.file_res.add_section(pe_import_res) for entry in self.pe_file.DIRECTORY_ENTRY_IMPORT: pe_import_dll_res = ResultSection(SCORE.NULL, "[%s]" % entry.dll, parent=pe_import_res) first_element = True line = StringIO() for imp in entry.imports: if first_element: first_element = False else: line.write(", ") if imp.name is None: line.write(str(imp.ordinal)) else: line.write(imp.name) pe_import_dll_res.add_line(line.getvalue()) else: pe_import_res = ResultSection(SCORE['NULL'], "PE: NO IMPORTS DETECTED ") self.file_res.add_section(pe_import_res) except AttributeError: pass # exports try: if self.pe_file.DIRECTORY_ENTRY_EXPORT.struct.TimeDateStamp is not None: pe_export_res = ResultSection(SCORE['NULL'], "PE: EXPORTS") self.file_res.add_section(pe_export_res) # noinspection PyBroadException try: pe_export_res.add_line(["Module Name: ", res_txt_tag(safe_str(self.pe_file.ModuleName), TAG_TYPE['PE_EXPORT_MODULE_NAME'])]) except: pass if self.pe_file.DIRECTORY_ENTRY_EXPORT.struct.TimeDateStamp == 0: pe_export_res.add_line("Time Date Stamp: 0") else: pe_export_res.add_line("Time Date Stamp: %s" % time.ctime(self.pe_file.DIRECTORY_ENTRY_EXPORT.struct.TimeDateStamp)) first_element = True txt = [] for exp in self.pe_file.DIRECTORY_ENTRY_EXPORT.symbols: if first_element: first_element = False else: txt.append(", ") txt.append(str(exp.ordinal)) if exp.name is not None: txt.append(": ") txt.append(res_txt_tag(exp.name, TAG_TYPE['PE_EXPORT_FCT_NAME'])) pe_export_res.add_line(txt) except AttributeError: pass # resources try: if len(self.pe_file.DIRECTORY_ENTRY_RESOURCE.entries) > 0: pe_resource_res = ResultSection(SCORE['NULL'], "PE: RESOURCES") self.file_res.add_section(pe_resource_res) for res_entry in self.pe_file.DIRECTORY_ENTRY_RESOURCE.entries: if res_entry.name is None: # noinspection PyBroadException try: entry_name = pefile.RESOURCE_TYPE[res_entry.id] except: # pylint: disable-msg=W0702 # unfortunately this code was done before we started to really care about which # exception to catch so, I actually don't really know at this point, would need to try # out :-\ entry_name = "UNKNOWN" else: entry_name = res_entry.name for name_id in res_entry.directory.entries: if name_id.name is None: name_id.name = hex(name_id.id) for language in name_id.directory.entries: try: language_desc = lcid[language.id] except KeyError: language_desc = 'Unknown language' line = [] if res_entry.name is None: line.append(entry_name) else: line.append(res_txt_tag(str(entry_name), TAG_TYPE['PE_RESOURCE_NAME'])) line.append(" " + str(name_id.name) + " ") line.append("0x") # this will add a link to search in AL for the value line.append(res_txt_tag("%04X" % language.id, TAG_TYPE['PE_RESOURCE_LANGUAGE'])) line.append(" (%s)" % language_desc) make_tag(self.file_res, 'PE_RESOURCE_LANGUAGE', language.id, weight='LOW', usage='IDENTIFICATION') # get the size of the resource res_size = language.data.struct.Size line.append(" Size: 0x%x" % res_size) pe_resource_res.add_line(line) except AttributeError: pass # Resources-VersionInfo try: if len(self.pe_file.FileInfo) > 2: pass for file_info in self.pe_file.FileInfo: if file_info.name == "StringFileInfo": if len(file_info.StringTable) > 0: pe_resource_verinfo_res = ResultSection(SCORE['NULL'], "PE: RESOURCES-VersionInfo") self.file_res.add_section(pe_resource_verinfo_res) try: if "LangID" in file_info.StringTable[0].entries: lang_id = file_info.StringTable[0].get("LangID") if not int(lang_id, 16) >> 16 == 0: txt = ('LangId: ' + lang_id + " (" + lcid[ int(lang_id, 16) >> 16] + ")") pe_resource_verinfo_res.add_line(txt) else: txt = ('LangId: ' + lang_id + " (NEUTRAL)") pe_resource_verinfo_res.add_line(txt) except (ValueError, KeyError): txt = ('LangId: %s is invalid' % lang_id) pe_resource_verinfo_res.add_line(txt) for entry in file_info.StringTable[0].entries.items(): txt = ['%s: ' % entry[0]] if entry[0] == 'OriginalFilename': txt.append(res_txt_tag(entry[1], TAG_TYPE['PE_VERSION_INFO_ORIGINAL_FILENAME'])) elif entry[0] == 'FileDescription': txt.append(res_txt_tag(entry[1], TAG_TYPE['PE_VERSION_INFO_FILE_DESCRIPTION'])) else: txt.append(entry[1]) pe_resource_verinfo_res.add_line(txt) except AttributeError: pass # Resources Strings try: BYTE = 1 WORD = 2 DWORD = 4 DS_SETFONT = 0x40 DIALOG_LEAD = DWORD + DWORD + WORD + WORD + WORD + WORD + WORD DIALOG_ITEM_LEAD = DWORD + DWORD + WORD + WORD + WORD + WORD + WORD DIALOGEX_LEAD = WORD + WORD + DWORD + DWORD + DWORD + WORD + WORD + WORD + WORD + WORD DIALOGEX_TRAIL = WORD + WORD + BYTE + BYTE DIALOGEX_ITEM_LEAD = DWORD + DWORD + DWORD + WORD + WORD + WORD + WORD + DWORD DIALOGEX_ITEM_TRAIL = WORD ITEM_TYPES = {0x80: "BUTTON", 0x81: "EDIT", 0x82: "STATIC", 0x83: "LIST BOX", 0x84: "SCROLL BAR", 0x85: "COMBO BOX"} if hasattr(self.pe_file, 'DIRECTORY_ENTRY_RESOURCE'): for dir_type in self.pe_file.DIRECTORY_ENTRY_RESOURCE.entries: if dir_type.name is None: if dir_type.id in pefile.RESOURCE_TYPE: dir_type.name = pefile.RESOURCE_TYPE[dir_type.id] for nameID in dir_type.directory.entries: if nameID.name is None: nameID.name = hex(nameID.id) for language in nameID.directory.entries: strings = [] if str(dir_type.name) == "RT_DIALOG": data_rva = language.data.struct.OffsetToData size = language.data.struct.Size data = self.pe_file.get_memory_mapped_image()[data_rva:data_rva + size] offset = 0 if self.pe_file.get_word_at_rva(data_rva + offset) == 0x1 \ and self.pe_file.get_word_at_rva(data_rva + offset + WORD) == 0xFFFF: # Use Extended Dialog Parsing # Remove leading bytes offset += DIALOGEX_LEAD if data[offset:offset + 2] == "\xFF\xFF": offset += DWORD else: offset += WORD if data[offset:offset + 2] == "\xFF\xFF": offset += DWORD else: offset += WORD # Get window title window_title = self.pe_file.get_string_u_at_rva(data_rva + offset) if len(window_title) != 0: strings.append(("DIALOG_TITLE", window_title)) offset += len(window_title) * 2 + WORD # Remove trailing bytes offset += DIALOGEX_TRAIL offset += len(self.pe_file.get_string_u_at_rva(data_rva + offset)) * 2 + WORD # alignment adjustment if (offset % 4) != 0: offset += WORD while True: if offset >= size: break offset += DIALOGEX_ITEM_LEAD # Get item type if self.pe_file.get_word_at_rva(data_rva + offset) == 0xFFFF: offset += WORD item_type = ITEM_TYPES[self.pe_file.get_word_at_rva(data_rva + offset)] offset += WORD else: item_type = self.pe_file.get_string_u_at_rva(data_rva + offset) offset += len(item_type) * 2 + WORD # Get item text item_text = self.pe_file.get_string_u_at_rva(data_rva + offset) if len(item_text) != 0: strings.append((item_type, item_text)) offset += len(item_text) * 2 + WORD extra_bytes = self.pe_file.get_word_at_rva(data_rva + offset) offset += extra_bytes + DIALOGEX_ITEM_TRAIL # Alignment adjustment if (offset % 4) != 0: offset += WORD else: # TODO: Use Non extended Dialog Parsing # Remove leading bytes style = self.pe_file.get_word_at_rva(data_rva + offset) offset += DIALOG_LEAD if data[offset:offset + 2] == "\xFF\xFF": offset += DWORD else: offset += len(self.pe_file.get_string_u_at_rva(data_rva + offset)) * 2 + WORD if data[offset:offset + 2] == "\xFF\xFF": offset += DWORD else: offset += len(self.pe_file.get_string_u_at_rva(data_rva + offset)) * 2 + WORD # Get window title window_title = self.pe_file.get_string_u_at_rva(data_rva + offset) if len(window_title) != 0: strings.append(("DIALOG_TITLE", window_title)) offset += len(window_title) * 2 + WORD if (style & DS_SETFONT) != 0: offset += WORD offset += len(self.pe_file.get_string_u_at_rva(data_rva + offset)) * 2 + WORD # Alignment adjustment if (offset % 4) != 0: offset += WORD while True: if offset >= size: break offset += DIALOG_ITEM_LEAD # Get item type if self.pe_file.get_word_at_rva(data_rva + offset) == 0xFFFF: offset += WORD item_type = ITEM_TYPES[self.pe_file.get_word_at_rva(data_rva + offset)] offset += WORD else: item_type = self.pe_file.get_string_u_at_rva(data_rva + offset) offset += len(item_type) * 2 + WORD # Get item text if self.pe_file.get_word_at_rva(data_rva + offset) == 0xFFFF: offset += DWORD else: item_text = self.pe_file.get_string_u_at_rva(data_rva + offset) if len(item_text) != 0: strings.append((item_type, item_text)) offset += len(item_text) * 2 + WORD extra_bytes = self.pe_file.get_word_at_rva(data_rva + offset) offset += extra_bytes + WORD # Alignment adjustment if (offset % 4) != 0: offset += WORD elif str(dir_type.name) == "RT_STRING": data_rva = language.data.struct.OffsetToData size = language.data.struct.Size data = self.pe_file.get_memory_mapped_image()[data_rva:data_rva + size] offset = 0 while True: if offset >= size: break ustr_length = self.pe_file.get_word_from_data(data[offset:offset + 2], 0) offset += 2 if ustr_length == 0: continue ustr = self.pe_file.get_string_u_at_rva(data_rva + offset, max_length=ustr_length) offset += ustr_length * 2 strings.append((None, ustr)) if len(strings) > 0: success = False try: comment = "%s (id:%s - lang_id:0x%04X [%s])" % ( str(dir_type.name), str(nameID.name), language.id, lcid[language.id]) except KeyError: comment = "%s (id:%s - lang_id:0x%04X [Unknown language])" % ( str(dir_type.name), str(nameID.name), language.id) res = ResultSection(SCORE['NULL'], "PE: STRINGS - %s" % comment) for idx in xrange(len(strings)): # noinspection PyBroadException try: tag_value = strings[idx][1] # The following line crash chardet if a # UPX packed file as packed the resources... chardet.detect(tag_value) # TODO: Find a better way to do this tag_value = tag_value.replace('\r', ' ').replace('\n', ' ') if strings[idx][0] is not None: res.add_line( [strings[idx][0], ": ", res_txt_tag(tag_value, TAG_TYPE['FILE_STRING'])]) else: res.add_line(res_txt_tag(tag_value, TAG_TYPE['FILE_STRING'])) make_tag(self.file_res, 'FILE_STRING', tag_value, weight='NULL', usage='IDENTIFICATION') success = True except: pass if success: self.file_res.add_section(res) else: pass except AttributeError, e: self.log.debug("\t Error parsing output: " + repr(e))
def get_pe_info(self, lcid): """Dumps the PE header as Results in the FileResult.""" # PE Header pe_header_res = ResultSection(SCORE['NULL'], "PE: HEADER") # PE Header: Header Info pe_header_info_res = ResultSection(SCORE.NULL, "[HEADER INFO]", parent=pe_header_res) pe_header_info_res.add_line( "Entry point address: 0x%08X" % self.pe_file.OPTIONAL_HEADER.AddressOfEntryPoint) pe_header_info_res.add_line( "Linker Version: %02d.%02d" % (self.pe_file.OPTIONAL_HEADER.MajorLinkerVersion, self.pe_file.OPTIONAL_HEADER.MinorLinkerVersion)) pe_header_info_res.add_line( "OS Version: %02d.%02d" % (self.pe_file.OPTIONAL_HEADER.MajorOperatingSystemVersion, self.pe_file.OPTIONAL_HEADER.MinorOperatingSystemVersion)) pe_header_info_res.add_line([ "Time Date Stamp: %s (" % time.ctime(self.pe_file.FILE_HEADER.TimeDateStamp), res_txt_tag(str(self.pe_file.FILE_HEADER.TimeDateStamp), TAG_TYPE['PE_LINK_TIME_STAMP']), ")" ]) try: pe_header_info_res.add_line( "Machine Type: %s (%s)" % (hex(self.pe_file.FILE_HEADER.Machine), pefile.MACHINE_TYPE[self.pe_file.FILE_HEADER.Machine])) except KeyError: pass # PE Header: Rich Header # noinspection PyBroadException try: if self.pe_file.RICH_HEADER is not None: pe_rich_header_info = ResultSection(SCORE.NULL, "[RICH HEADER INFO]", parent=pe_header_res) values_list = self.pe_file.RICH_HEADER.values pe_rich_header_info.add_line("VC++ tools used:") for i in range(0, len(values_list) / 2): line = "Tool Id: %3d Version: %6d Times used: %3d" % ( values_list[2 * i] >> 16, values_list[2 * i] & 0xFFFF, values_list[2 * i + 1]) pe_rich_header_info.add_line(line) except: self.log.exception("Unable to parse PE Rich Header") # PE Header: Data Directories pe_dd_res = ResultSection(SCORE.NULL, "[DATA DIRECTORY]", parent=pe_header_res) for data_directory in self.pe_file.OPTIONAL_HEADER.DATA_DIRECTORY: if data_directory.Size or data_directory.VirtualAddress: pe_dd_res.add_line( "%s - va: 0x%08X - size: 0x%08X" % (data_directory.name[len("IMAGE_DIRECTORY_ENTRY_"):], data_directory.VirtualAddress, data_directory.Size)) # PE Header: Sections pe_sec_res = ResultSection(SCORE.NULL, "[SECTIONS]", parent=pe_header_res) self._init_section_list() try: for (sname, section, sec_md5, sec_entropy) in self._sect_list: txt = [ sname, " - Virtual: 0x%08X (0x%08X bytes)" " - Physical: 0x%08X (0x%08X bytes) - " % (section.VirtualAddress, section.Misc_VirtualSize, section.PointerToRawData, section.SizeOfRawData), "hash:", res_txt_tag(sec_md5, TAG_TYPE['PE_SECTION_HASH']), " - entropy:%f (min:0.0, Max=8.0)" % sec_entropy ] # add a search tag for the Section Hash make_tag(self.file_res, 'PE_SECTION_HASH', sec_md5, 'HIGH', usage='CORRELATION') pe_sec_res.add_line(txt) except AttributeError: pass self.file_res.add_section(pe_header_res) # debug try: if self.pe_file.DebugTimeDateStamp: pe_debug_res = ResultSection(SCORE['NULL'], "PE: DEBUG") self.file_res.add_section(pe_debug_res) pe_debug_res.add_line( "Time Date Stamp: %s" % time.ctime(self.pe_file.DebugTimeDateStamp)) # When it is a unicode, we know we are coming from RSDS which is UTF-8 # otherwise, we come from NB10 and we need to guess the charset. if type(self.pe_file.pdb_filename) != unicode: char_enc_guessed = translate_str(self.pe_file.pdb_filename) pdb_filename = char_enc_guessed['converted'] else: char_enc_guessed = {'confidence': 1.0, 'encoding': 'utf-8'} pdb_filename = self.pe_file.pdb_filename pe_debug_res.add_line([ "PDB: '", res_txt_tag_charset(pdb_filename, TAG_TYPE['PE_PDB_FILENAME'], char_enc_guessed['encoding'], char_enc_guessed['confidence']), "'" ]) # self.log.debug(u"\tPDB: %s" % pdb_filename) except AttributeError: pass # imports try: if hasattr(self.pe_file, 'DIRECTORY_ENTRY_IMPORT') and len( self.pe_file.DIRECTORY_ENTRY_IMPORT) > 0: pe_import_res = ResultSection(SCORE['NULL'], "PE: IMPORTS") self.file_res.add_section(pe_import_res) for entry in self.pe_file.DIRECTORY_ENTRY_IMPORT: pe_import_dll_res = ResultSection(SCORE.NULL, "[%s]" % entry.dll, parent=pe_import_res) first_element = True line = StringIO() for imp in entry.imports: if first_element: first_element = False else: line.write(", ") if imp.name is None: line.write(str(imp.ordinal)) else: line.write(imp.name) pe_import_dll_res.add_line(line.getvalue()) else: pe_import_res = ResultSection(SCORE['NULL'], "PE: NO IMPORTS DETECTED ") self.file_res.add_section(pe_import_res) except AttributeError: pass # exports try: if self.pe_file.DIRECTORY_ENTRY_EXPORT.struct.TimeDateStamp is not None: pe_export_res = ResultSection(SCORE['NULL'], "PE: EXPORTS") self.file_res.add_section(pe_export_res) # noinspection PyBroadException try: pe_export_res.add_line([ "Module Name: ", res_txt_tag(safe_str(self.pe_file.ModuleName), TAG_TYPE['PE_EXPORT_MODULE_NAME']) ]) except: pass if self.pe_file.DIRECTORY_ENTRY_EXPORT.struct.TimeDateStamp == 0: pe_export_res.add_line("Time Date Stamp: 0") else: pe_export_res.add_line( "Time Date Stamp: %s" % time.ctime(self.pe_file.DIRECTORY_ENTRY_EXPORT.struct. TimeDateStamp)) first_element = True txt = [] for exp in self.pe_file.DIRECTORY_ENTRY_EXPORT.symbols: if first_element: first_element = False else: txt.append(", ") txt.append(str(exp.ordinal)) if exp.name is not None: txt.append(": ") txt.append( res_txt_tag(exp.name, TAG_TYPE['PE_EXPORT_FCT_NAME'])) pe_export_res.add_line(txt) except AttributeError: pass # resources try: if len(self.pe_file.DIRECTORY_ENTRY_RESOURCE.entries) > 0: pe_resource_res = ResultSection(SCORE['NULL'], "PE: RESOURCES") self.file_res.add_section(pe_resource_res) for res_entry in self.pe_file.DIRECTORY_ENTRY_RESOURCE.entries: if res_entry.name is None: # noinspection PyBroadException try: entry_name = pefile.RESOURCE_TYPE[res_entry.id] except: # pylint: disable-msg=W0702 # unfortunately this code was done before we started to really care about which # exception to catch so, I actually don't really know at this point, would need to try # out :-\ entry_name = "UNKNOWN" else: entry_name = res_entry.name for name_id in res_entry.directory.entries: if name_id.name is None: name_id.name = hex(name_id.id) for language in name_id.directory.entries: try: language_desc = lcid[language.id] except KeyError: language_desc = 'Unknown language' line = [] if res_entry.name is None: line.append(entry_name) else: line.append( res_txt_tag(str(entry_name), TAG_TYPE['PE_RESOURCE_NAME'])) line.append(" " + str(name_id.name) + " ") line.append("0x") # this will add a link to search in AL for the value line.append( res_txt_tag("%04X" % language.id, TAG_TYPE['PE_RESOURCE_LANGUAGE'])) line.append(" (%s)" % language_desc) make_tag(self.file_res, 'PE_RESOURCE_LANGUAGE', language.id, weight='LOW', usage='IDENTIFICATION') # get the size of the resource res_size = language.data.struct.Size line.append(" Size: 0x%x" % res_size) pe_resource_res.add_line(line) except AttributeError: pass # Resources-VersionInfo try: if len(self.pe_file.FileInfo) > 2: pass for file_info in self.pe_file.FileInfo: if file_info.name == "StringFileInfo": if len(file_info.StringTable) > 0: pe_resource_verinfo_res = ResultSection( SCORE['NULL'], "PE: RESOURCES-VersionInfo") self.file_res.add_section(pe_resource_verinfo_res) try: if "LangID" in file_info.StringTable[0].entries: lang_id = file_info.StringTable[0].get( "LangID") if not int(lang_id, 16) >> 16 == 0: txt = ('LangId: ' + lang_id + " (" + lcid[int(lang_id, 16) >> 16] + ")") pe_resource_verinfo_res.add_line(txt) else: txt = ('LangId: ' + lang_id + " (NEUTRAL)") pe_resource_verinfo_res.add_line(txt) except (ValueError, KeyError): txt = ('LangId: %s is invalid' % lang_id) pe_resource_verinfo_res.add_line(txt) for entry in file_info.StringTable[0].entries.items(): txt = ['%s: ' % entry[0]] if entry[0] == 'OriginalFilename': txt.append( res_txt_tag( entry[1], TAG_TYPE[ 'PE_VERSION_INFO_ORIGINAL_FILENAME'] )) elif entry[0] == 'FileDescription': txt.append( res_txt_tag( entry[1], TAG_TYPE[ 'PE_VERSION_INFO_FILE_DESCRIPTION'] )) else: txt.append(entry[1]) pe_resource_verinfo_res.add_line(txt) except AttributeError: pass # Resources Strings try: BYTE = 1 WORD = 2 DWORD = 4 DS_SETFONT = 0x40 DIALOG_LEAD = DWORD + DWORD + WORD + WORD + WORD + WORD + WORD DIALOG_ITEM_LEAD = DWORD + DWORD + WORD + WORD + WORD + WORD + WORD DIALOGEX_LEAD = WORD + WORD + DWORD + DWORD + DWORD + WORD + WORD + WORD + WORD + WORD DIALOGEX_TRAIL = WORD + WORD + BYTE + BYTE DIALOGEX_ITEM_LEAD = DWORD + DWORD + DWORD + WORD + WORD + WORD + WORD + DWORD DIALOGEX_ITEM_TRAIL = WORD ITEM_TYPES = { 0x80: "BUTTON", 0x81: "EDIT", 0x82: "STATIC", 0x83: "LIST BOX", 0x84: "SCROLL BAR", 0x85: "COMBO BOX" } if hasattr(self.pe_file, 'DIRECTORY_ENTRY_RESOURCE'): for dir_type in self.pe_file.DIRECTORY_ENTRY_RESOURCE.entries: if dir_type.name is None: if dir_type.id in pefile.RESOURCE_TYPE: dir_type.name = pefile.RESOURCE_TYPE[dir_type.id] for nameID in dir_type.directory.entries: if nameID.name is None: nameID.name = hex(nameID.id) for language in nameID.directory.entries: strings = [] if str(dir_type.name) == "RT_DIALOG": data_rva = language.data.struct.OffsetToData size = language.data.struct.Size data = self.pe_file.get_memory_mapped_image( )[data_rva:data_rva + size] offset = 0 if self.pe_file.get_word_at_rva(data_rva + offset) == 0x1 \ and self.pe_file.get_word_at_rva(data_rva + offset + WORD) == 0xFFFF: # Use Extended Dialog Parsing # Remove leading bytes offset += DIALOGEX_LEAD if data[offset:offset + 2] == "\xFF\xFF": offset += DWORD else: offset += WORD if data[offset:offset + 2] == "\xFF\xFF": offset += DWORD else: offset += WORD # Get window title window_title = self.pe_file.get_string_u_at_rva( data_rva + offset) if len(window_title) != 0: strings.append( ("DIALOG_TITLE", window_title)) offset += len(window_title) * 2 + WORD # Remove trailing bytes offset += DIALOGEX_TRAIL offset += len( self.pe_file.get_string_u_at_rva( data_rva + offset)) * 2 + WORD # alignment adjustment if (offset % 4) != 0: offset += WORD while True: if offset >= size: break offset += DIALOGEX_ITEM_LEAD # Get item type if self.pe_file.get_word_at_rva( data_rva + offset) == 0xFFFF: offset += WORD item_type = ITEM_TYPES[ self.pe_file.get_word_at_rva( data_rva + offset)] offset += WORD else: item_type = self.pe_file.get_string_u_at_rva( data_rva + offset) offset += len(item_type) * 2 + WORD # Get item text item_text = self.pe_file.get_string_u_at_rva( data_rva + offset) if len(item_text) != 0: strings.append( (item_type, item_text)) offset += len(item_text) * 2 + WORD extra_bytes = self.pe_file.get_word_at_rva( data_rva + offset) offset += extra_bytes + DIALOGEX_ITEM_TRAIL # Alignment adjustment if (offset % 4) != 0: offset += WORD else: # TODO: Use Non extended Dialog Parsing # Remove leading bytes style = self.pe_file.get_word_at_rva( data_rva + offset) offset += DIALOG_LEAD if data[offset:offset + 2] == "\xFF\xFF": offset += DWORD else: offset += len( self.pe_file.get_string_u_at_rva( data_rva + offset)) * 2 + WORD if data[offset:offset + 2] == "\xFF\xFF": offset += DWORD else: offset += len( self.pe_file.get_string_u_at_rva( data_rva + offset)) * 2 + WORD # Get window title window_title = self.pe_file.get_string_u_at_rva( data_rva + offset) if len(window_title) != 0: strings.append( ("DIALOG_TITLE", window_title)) offset += len(window_title) * 2 + WORD if (style & DS_SETFONT) != 0: offset += WORD offset += len( self.pe_file.get_string_u_at_rva( data_rva + offset)) * 2 + WORD # Alignment adjustment if (offset % 4) != 0: offset += WORD while True: if offset >= size: break offset += DIALOG_ITEM_LEAD # Get item type if self.pe_file.get_word_at_rva( data_rva + offset) == 0xFFFF: offset += WORD item_type = ITEM_TYPES[ self.pe_file.get_word_at_rva( data_rva + offset)] offset += WORD else: item_type = self.pe_file.get_string_u_at_rva( data_rva + offset) offset += len(item_type) * 2 + WORD # Get item text if self.pe_file.get_word_at_rva( data_rva + offset) == 0xFFFF: offset += DWORD else: item_text = self.pe_file.get_string_u_at_rva( data_rva + offset) if len(item_text) != 0: strings.append( (item_type, item_text)) offset += len(item_text) * 2 + WORD extra_bytes = self.pe_file.get_word_at_rva( data_rva + offset) offset += extra_bytes + WORD # Alignment adjustment if (offset % 4) != 0: offset += WORD elif str(dir_type.name) == "RT_STRING": data_rva = language.data.struct.OffsetToData size = language.data.struct.Size data = self.pe_file.get_memory_mapped_image( )[data_rva:data_rva + size] offset = 0 while True: if offset >= size: break ustr_length = self.pe_file.get_word_from_data( data[offset:offset + 2], 0) offset += 2 if ustr_length == 0: continue ustr = self.pe_file.get_string_u_at_rva( data_rva + offset, max_length=ustr_length) offset += ustr_length * 2 strings.append((None, ustr)) if len(strings) > 0: success = False try: comment = "%s (id:%s - lang_id:0x%04X [%s])" % ( str(dir_type.name), str(nameID.name), language.id, lcid[language.id]) except KeyError: comment = "%s (id:%s - lang_id:0x%04X [Unknown language])" % ( str(dir_type.name), str( nameID.name), language.id) res = ResultSection( SCORE['NULL'], "PE: STRINGS - %s" % comment) for idx in xrange(len(strings)): # noinspection PyBroadException try: tag_value = strings[idx][1] # The following line crash chardet if a # UPX packed file as packed the resources... chardet.detect( tag_value ) # TODO: Find a better way to do this tag_value = tag_value.replace( '\r', ' ').replace('\n', ' ') if strings[idx][0] is not None: res.add_line([ strings[idx][0], ": ", res_txt_tag( tag_value, TAG_TYPE['FILE_STRING']) ]) else: res.add_line( res_txt_tag( tag_value, TAG_TYPE['FILE_STRING'])) make_tag(self.file_res, 'FILE_STRING', tag_value, weight='NULL', usage='IDENTIFICATION') success = True except: pass if success: self.file_res.add_section(res) else: pass except AttributeError, e: self.log.debug("\t Error parsing output: " + repr(e))
from cStringIO import StringIO import time TIMES = 100000 init = time.clock() value = '' for i in range(TIMES): value += str(i) print "Concatenation: %s" % (init - time.clock()) init = time.clock() value = StringIO() for i in range(TIMES): value.write(str(i)) print "StringIO: %s" % (init - time.clock()) init = time.clock() value = [] for i in range(TIMES): value.append(str(i)) finalValue = ''.join(value) print "List: %s" % (init - time.clock())
from cStringIO import StringIO import time TIMES = 100000 init = time.clock() value = '' for i in range(TIMES): value += str(i) print "Concatenation: %s" % ( init - time.clock()) init = time.clock() value = StringIO() for i in range(TIMES): value.write(str(i)) print "StringIO: %s" % ( init - time.clock()) init = time.clock() value = [] for i in range(TIMES): value.append(str(i)) finalValue = ''.join(value) print "List: %s" % ( init - time.clock())
def send_title(self, text, **keywords): """ An almost verbatim copy of MoinMoin.theme.__init__.ThemeBase.send_title, that replaces hard coded HTML string template with Jinja2. """ req = self.request _ = req.getText rev = req.rev if keywords.has_key('page'): page = keywords['page'] pagename = page.page_name else: pagename = keywords.get('pagename', '') page = Page(req, pagename) if keywords.get('msg', ''): raise DeprecationWarning("Using send_page(msg=) is deprecated! " "Use theme.add_msg() instead!") scriptname = req.script_root # get name of system pages page_front_page = wikiutil.getFrontPage(req).page_name page_help_contents = getLocalizedPage(req, 'HelpContents').page_name page_title_index = getLocalizedPage(req, 'TitleIndex').page_name page_site_navigation = getLocalizedPage(req, 'SiteNavigation').page_name page_word_index = getLocalizedPage(req, 'WordIndex').page_name page_help_formatting = getLocalizedPage(req, 'HelpOnFormatting').page_name page_find_page = getLocalizedPage(req, 'FindPage').page_name home_page = wikiutil.getInterwikiHomePage(req) page_parent_page = getattr(page.getParentPage(), 'page_name', None) # set content_type, including charset, so web server doesn't touch it: req.content_type = "text/html; charset=%s" % config.charset meta_keywords = req.getPragma('keywords') or "" meta_description = req.getPragma('description') or "" rss_link = self.rsslink({'page': page}) universal_edit_button = self.universal_edit_button({'page': page}) stylesheets = self.html_stylesheets({ 'print_media': keywords.get('print_mode', False), 'media': keywords.get('media', 'screen') }) gui_edit_link = self.guiEditorScript({'page': page}) context = { 'title': Markup(wikiutil.escape(text)), 'sitename': wikiutil.escape(req.cfg.html_pagetitle or req.cfg.sitename), 'charset': page.output_charset, 'meta_keywords': wikiutil.escape(meta_keywords, 1), 'meta_description': wikiutil.escape(meta_description, 1), 'robots': None, # might be "index", "noindex", or None 'refresh_seconds': None, 'refresh_url': None, 'static_base': "%s/%s/" % (self.cfg.url_prefix_static, self.name), 'stylesheets': stylesheets, 'rss_link_title': rss_link[0], 'rss_link_href': rss_link[1], 'universal_edit_button_title': universal_edit_button[0], 'universal_edit_button_href': universal_edit_button[1], 'common_js': '%s/common/js/%s.js' % (req.cfg.url_prefix_static, 'common'), 'search_hint': req.getText('Search'), 'gui_editor_link_href': gui_edit_link[0], 'gui_editor_link_text': gui_edit_link[1], 'extra_html_head': Markup(keywords.get('html_head', '')), 'page_start_href': req.href(page_front_page), 'page_alternate_title': None, 'page_alternate_href': '', 'print_alternate_title': None, 'print_alternate_href': '', 'page_up_href': None, } # search engine precautions / optimization: # if it is an action or edit/search, send query headers (noindex,nofollow): if req.query_string or req.method == 'POST': context['robots'] = "noindex" # we don't want to have BadContent stuff indexed: elif pagename in [ 'BadContent', 'LocalBadContent', ]: context['robots'] = "noindex" # if it is a special page, index it and follow the links - we do it # for the original, English pages as well as for (the possibly # modified) frontpage: elif pagename in [ page_front_page, req.cfg.page_front_page, page_title_index, 'TitleIndex', page_find_page, 'FindPage', page_site_navigation, 'SiteNavigation', 'RecentChanges', ]: context['robots'] = "index" if 'pi_refresh' in keywords and keywords['pi_refresh']: context.update({ 'refresh_seconds': keywords['pi_refresh'][0], 'refresh_url': keywords['pi_refresh'][1] }) # Links if pagename: context.update({ 'page_alternate_title': _('Wiki Markup'), 'page_alternate_href': page.url(req, querystr=dict(action='raw')) }) context.update({ 'print_alternate_title': _('Print View'), 'print_alternate_href': page.url(req, querystr=dict(action='print')) }) if page_parent_page: context['page_up'] = req.href(page_parent_page) output = StringIO() write_f_onhold = req.write req.write = lambda s: output.write(s.encode('utf-8')) if pagename and req.user.may.read(pagename): from MoinMoin.action import AttachFile AttachFile.send_link_rel(req, pagename) context['attached_links'] = Markup(output.getvalue()) req.write = write_f_onhold context['extra_links'] = [ { 'rel': "Search", 'href': "%s" % req.href(page_find_page) }, { 'rel': "Index", 'href': "%s" % req.href(page_title_index) }, { 'rel': "Glossary", 'href': "%s" % req.href(page_word_index) }, { 'rel': "Help", 'href': "%s" % req.href(page_help_formatting) }, ] template = self.j2env.get_template('bits/head.html') output = template.render(context) req.write(output) output = [] # start the <body> bodyattr = [] if keywords.has_key('body_attr'): bodyattr.append(' ') bodyattr.append(keywords['body_attr']) # Set body to the user interface language and direction bodyattr.append(' %s' % self.ui_lang_attr()) body_onload = keywords.get('body_onload', '') if body_onload: bodyattr.append(''' onload="%s"''' % body_onload) output.append('\n<body%s>\n' % ''.join(bodyattr)) # Output ----------------------------------------------------------- # If in print mode, start page div and emit the title if keywords.get('print_mode', 0): d = { 'title_text': text, 'page': page, 'page_name': pagename or '', 'rev': rev, } req.themedict = d output.append(self.startPage()) output.append(self.interwiki(d)) output.append(self.title(d)) # In standard mode, emit theme.header else: exists = pagename and page.exists(includeDeleted=False) # prepare dict for theme code: d = { 'theme': self.name, 'script_name': scriptname, 'title_text': text, 'logo_string': req.cfg.logo_string, 'site_name': req.cfg.sitename, 'page': page, 'rev': rev, 'pagesize': pagename and page.size() or 0, # exists checked to avoid creation of empty edit-log for non-existing pages 'last_edit_info': exists and page.lastEditInfo() or '', 'page_name': pagename or '', 'page_find_page': page_find_page, 'page_front_page': page_front_page, 'home_page': home_page, 'page_help_contents': page_help_contents, 'page_help_formatting': page_help_formatting, 'page_parent_page': page_parent_page, 'page_title_index': page_title_index, 'page_word_index': page_word_index, 'user_name': req.user.name, 'user_valid': req.user.valid, 'msg': self._status, 'trail': keywords.get('trail', None), # Discontinued keys, keep for a while for 3rd party theme developers 'titlesearch': 'use self.searchform(d)', 'textsearch': 'use self.searchform(d)', 'navibar': ['use self.navibar(d)'], 'available_actions': ['use self.request.availableActions(page)'], } # add quoted versions of pagenames newdict = {} for key in d: if key.startswith('page_'): if not d[key] is None: newdict['q_' + key] = wikiutil.quoteWikinameURL(d[key]) else: newdict['q_' + key] = None d.update(newdict) req.themedict = d # now call the theming code to do the rendering if keywords.get('editor_mode', 0): output.append(self.editorheader(d)) else: output.append(self.header(d)) # emit it req.write(''.join(output)) output = [] self._send_title_called = True