def read(self, raw, offset): self.data = dict() #uint16_t terrain_restriction_count; #uint16_t terrain_count; header_struct = Struct(endianness + "H H") header = header_struct.unpack_from(raw, offset) offset += header_struct.size self.data["terrain_restriction_count"], self.data["terrain_count"] = header #int32_t terrain_restriction_offset0[terrain_restriction_count]; #int32_t terrain_restriction_offset1[terrain_restriction_count]; tr_offset_struct = Struct(endianness + "%di" % self.data["terrain_restriction_count"]) self.data["terrain_restriction_offset0"] = tr_offset_struct.unpack_from(raw, offset) offset += tr_offset_struct.size self.data["terrain_restriction_offset1"] = tr_offset_struct.unpack_from(raw, offset) offset += tr_offset_struct.size self.data["terrain_restriction"] = list() for i in range(self.data["terrain_restriction_count"]): t = TerrainRestriction(self.data["terrain_count"]) offset = t.read(raw, offset) self.data["terrain_restriction"] += [t.data] return offset
def leapseconds(tzfiles=['/usr/share/zoneinfo/right/UTC', '/usr/lib/zoneinfo/right/UTC'], use_fallback=True): """Extract leap seconds from *tzfiles*.""" for filename in tzfiles: try: file = open(filename, 'rb') except IOError: continue else: break else: # no break if not use_fallback: raise ValueError('Unable to open any tzfile: %s' % (tzfiles,)) else: return _fallback() with file: header = Struct('>4s c 15x 6i') # see struct tzhead above (magic, version, _, _, leapcnt, timecnt, typecnt, charcnt) = header.unpack_from(file.read(header.size)) if magic != "TZif".encode(): raise ValueError('Wrong magic %r in tzfile: %s' % ( magic, file.name)) if version not in '\x0023'.encode(): warn('Unsupported version %r in tzfile: %s' % ( version, file.name), RuntimeWarning) if leapcnt == 0: raise ValueError("No leap seconds in tzfile: %s" % ( file.name)) """# from tzfile.h[2] (the file is in public domain) . . .header followed by. . . tzh_timecnt (char [4])s coded transition times a la time(2) tzh_timecnt (unsigned char)s types of local time starting at above tzh_typecnt repetitions of one (char [4]) coded UT offset in seconds one (unsigned char) used to set tm_isdst one (unsigned char) that's an abbreviation list index tzh_charcnt (char)s '\0'-terminated zone abbreviations tzh_leapcnt repetitions of one (char [4]) coded leap second transition times one (char [4]) total correction after above """ file.read(timecnt * 5 + typecnt * 6 + charcnt) # skip result = [LeapSecond(datetime(1972, 1, 1), timedelta(seconds=10))] nleap_seconds = 10 tai_epoch_as_tai = datetime(1970, 1, 1, 0, 0, 10) buf = Struct(">2i") for _ in range(leapcnt): # read leap seconds t, cnt = buf.unpack_from(file.read(buf.size)) dTAI_UTC = nleap_seconds + cnt utc = tai_epoch_as_tai + timedelta(seconds=t - dTAI_UTC + 1) assert utc - datetime(utc.year, utc.month, utc.day) == timedelta(0) result.append(LeapSecond(utc, timedelta(seconds=dTAI_UTC))) result.append(sentinel) return result
def read(self, raw, offset): self.data = dict() #uint16_t graphic_count; header_struct = Struct(endianness + "H") self.data["graphic_count"], = header_struct.unpack_from(raw, offset) offset += header_struct.size #int32_t graphic_offset[graphic_count]; offset_struct = Struct(endianness + "%di" % self.data["graphic_count"]) self.data["graphic_offset"] = offset_struct.unpack_from(raw, offset) offset += offset_struct.size self.data["graphic"] = list() for i in range(self.data["graphic_count"]): g_offset = self.data["graphic_offset"][i] if g_offset == 0: #dbg("SKIPPING graphic %d" % i) continue t = Graphic() offset = t.read(raw, offset) self.data["graphic"] += [t.data] #int8_t[138] rendering_data; rendering_data_struct = Struct(endianness + "138c") self.data["rendering_data"] = rendering_data_struct.unpack_from(raw, offset) offset += rendering_data_struct.size return offset
def read(self, raw, offset): self.data = dict() #int32_t id; #int8_t unknown; #int32_t upper_building; #int32_t required_researches; #int32_t age; #int32_t unit_or_research0; #int32_t unit_or_research1; #int32_t unknown[8]; #int32_t mode0; #int32_t mode1; #int32_t unknown[7]; #int32_t vertical_line; #int8_t unit_count; unit_connection_struct = Struct(endianness + "i b 5i 8i 2i 7i i b") pc = unit_connection_struct.unpack_from(raw, offset) offset_info(offset, pc, "uconnection: id X upperbuilding req_researches age ur0 ur1 X[8] mode0 mode1 X[7] verticalline unitcount", unit_connection_struct) offset += unit_connection_struct.size self.data["id"] = pc[0] #self.data[""] = pc[1] self.data["upper_building"] = pc[2] self.data["required_researches"] = pc[3] self.data["age"] = pc[4] self.data["unit_or_research0"] = pc[5] self.data["unit_or_research1"] = pc[6] #self.data[""] = pc[7:(7+8)] self.data["mode0"] = pc[15] self.data["mode1"] = pc[16] #self.data[""] = pc[17:(17+7)] self.data["vertical_line"] = pc[24] self.data["unit_count"] = pc[25] #int32_t unit[unit_count]; unit_connection_struct = Struct(endianness + "%di" % self.data["unit_count"]) pc = unit_connection_struct.unpack_from(raw, offset) offset_info(offset, pc, "uconnection: unit", unit_connection_struct, 1) offset += unit_connection_struct.size self.data["unit"] = pc #int32_t location_in_age; #int32_t required_research; #int32_t line_mode; #int32_t enabling_research; unit_connection_struct = Struct(endianness + "4i") pc = unit_connection_struct.unpack_from(raw, offset) offset_info(offset, pc, "uconnection: locationinage requiredresearch linemode enablingresearch", unit_connection_struct) offset += unit_connection_struct.size self.data["location_in_age"] = pc[0] self.data["required_research"] = pc[1] self.data["line_mode"] = pc[2] self.data["enabling_research"] = pc[3] return offset
def unpack(self, data): # structure taken from cryptfs.h in crespo source. s = Struct('<'+'L H H') ftrMagic, majorVersion, minorVersion = s.unpack_from(data) if minorVersion < SCRYPT_ADDED_MINOR: s = Struct('<'+'L H H L L L L L L L 64s L 48s 16s') (self.ftrMagic, self.majorVersion, self.minorVersion, self.ftrSize, self.flags, self.keySize, self.spare1, self.fsSize1, self.fsSize2, self.failedDecrypt, self.cryptoType, self.spare2, self.cryptoKey, self.cryptoSalt) = s.unpack_from(data) self.cryptoKey = self.cryptoKey[0:self.keySize] else: s = Struct('<'+'L H H L L L L L L L 64s L 48s 16s 2Q L B B B B') (self.ftrMagic, self.majorVersion, self.minorVersion, self.ftrSize, self.flags, self.keySize, self.spare1, self.fsSize1, self.fsSize2, self.failedDecrypt, self.cryptoType, self.spare2, self.cryptoKey, self.cryptoSalt, self.persistDataOffset1, self.persistDataOffset2, self.persistDataSize, self.kdf, self.N_factor, self.r_factor, self.p_factor) = s.unpack_from(data) self.cryptoKey = self.cryptoKey[0:self.keySize] self.N = 1 << self.N_factor self.r = 1 << self.r_factor self.p = 1 << self.p_factor
def read(self, raw, offset): self.data = dict() #bool exists; unit_header_header_struct0 = Struct(endianness + "?") pc = unit_header_header_struct0.unpack_from(raw, offset) offset += unit_header_header_struct0.size self.data["exists"] = pc[0] if self.data["exists"] == True: unit_header_header_struct1 = Struct(endianness + "H") pc = unit_header_header_struct1.unpack_from(raw, offset) offset += unit_header_header_struct1.size self.data["unit_command_count"] = pc[0] self.data["unit_command"] = list() for i in range(self.data["unit_command_count"]): t = UnitCommand() offset = t.read(raw, offset) self.data["unit_command"] += [t.data] return offset
def read(self, raw, offset): self.data = dict() self.data["terrain_border"] = list() for i in range(16): t = TerrainBorder() offset = t.read(raw, offset) self.data["terrain_border"] += [t.data] #int8_t zero[28]; #uint16_t terrain_count_additional; zero_terrain_count_struct = Struct(endianness + "28c H") pc = zero_terrain_count_struct.unpack_from(raw, offset) offset += zero_terrain_count_struct.size self.data["terrain_count_additional"] = pc[28] tmp_struct = Struct(endianness + "12722s") t = tmp_struct.unpack_from(raw, offset) offset_begin = offset offset += tmp_struct.size fname = 'raw/terrain_render_data_%d_to_%d.raw' % (offset_begin, offset) filename = file_get_path(fname, write=True) file_write(filename, t[0]) return offset
def read(self, raw, offset): self.data = dict() #int16_t[6] required_techs; research0_struct = Struct(endianness + "6h") pc = research0_struct.unpack_from(raw, offset) offset += research0_struct.size self.data["required_techs"] = pc[0] self.data["research_ressource_cost"] = list() for i in range(3): t = ResearchRessourceCost() offset = t.read(raw, offset) self.data["research_ressource_cost"] += [t.data] #int16_t required_tech_count; #int16_t civilisation_id; #int16_t full_tech_mode; #int16_t research_location_id; #uint16_t language_dll_name; #uint16_t language_dll_description; #int16_t research_time; #int16_t tech_id; #int16_t tech_type; #int16_t icon_id; #int8_t button_id; #int32_t pointers[3]; #uint16_t name_length; research1_struct = Struct(endianness + "4h 2H 4h b 3i H") pc = research1_struct.unpack_from(raw, offset) offset += research1_struct.size self.data["required_tech_count"] = pc[0] self.data["civilisation_id"] = pc[1] self.data["full_tech_mode"] = pc[2] self.data["research_location_id"] = pc[3] self.data["language_dll_name"] = pc[4] self.data["language_dll_description"] = pc[5] self.data["research_time"] = pc[6] self.data["tech_id"] = pc[7] self.data["tech_type"] = pc[8] self.data["icon_id"] = pc[9] self.data["button_id"] = pc[10] self.data["pointers"] = pc[11:(11+3)] self.data["name_length"] = pc[14] research_name_struct = Struct(endianness + "%ds" % self.data["name_length"]) pc = research_name_struct.unpack_from(raw, offset) offset += research_name_struct.size #char name[name_length]; self.data["name"] = zstr(pc[0]) return offset
def parse_sol_files(sol_files): results = [] for sol_file in sol_files: print("Reading Flash state file: {0}\n".format(sol_file)) with open(sol_file, 'rb') as f: data = f.read() # What follows is a limited parser for Flash Local Shared Object files - # a more complete implementation may be found at: # https://pypi.python.org/pypi/PyAMF header = Struct('>HI10s8sI') magic, objlength, magic2, mjinfo, padding = header.unpack_from(data) offset = header.size assert magic == 0xbf assert magic2 == b'TCSO\0\x04\0\0\0\0' assert mjinfo == b'\0\x06mjinfo' assert padding == 0 ushort = Struct('>H') ubyte = Struct('>B') while offset < len(data): length, = ushort.unpack_from(data, offset) offset += ushort.size name = data[offset:offset+length] offset += length amf0_type, = ubyte.unpack_from(data, offset) offset += ubyte.size # Type 2: UTF-8 String, prefixed with 2-byte length if amf0_type == 2: length, = ushort.unpack_from(data, offset) offset += ushort.size value = data[offset:offset+length] offset += length # Type 6: Undefined elif amf0_type == 6: value = None # Type 1: Boolean elif amf0_type == 1: value = bool(data[offset]) offset += 1 # Other types from the AMF0 specification are not implemented, as they # have not been observed in mjinfo.sol files. If required, see # http://download.macromedia.com/pub/labs/amf/amf0_spec_121207.pdf else: print("Unimplemented AMF0 type {} at offset={} (hex {})".format(amf0_type, offset, hex(offset))) trailer_byte = data[offset] assert trailer_byte == 0 offset += 1 if name == b'logstr': results = filter(None, value.split(b'\n')) results = [i.decode('ASCII') for i in results] return results
def _get_font_name(file_path): try: #customize path f = open(file_path, 'rb') #header shead = Struct('>IHHHH') fhead = f.read(shead.size) dhead = shead.unpack_from(fhead, 0) #font directory stable = Struct('>4sIII') ftable = f.read(stable.size * dhead[1]) for i in range(dhead[1]): #directory records dtable = stable.unpack_from(ftable, i * stable.size) if dtable[0] == 'name': break assert dtable[0] == 'name' #name table f.seek(dtable[2]) #at offset fnametable = f.read(dtable[3]) #length snamehead = Struct('>HHH') #name table head dnamehead = snamehead.unpack_from(fnametable, 0) sname = Struct('>HHHHHH') except: return {} NAME_ID = { 1: 'family_name', 4: 'full_name', 6: 'postscript_name' } result = {} for i in range(dnamehead[1]): #name table records dname = sname.unpack_from(fnametable, snamehead.size + i * sname.size) if dname[3] in NAME_ID: _name = unpack_from('%is' % dname[4], fnametable, dnamehead[2] + dname[5])[0] try: if dname[2] > 0: _name = _name.decode('utf-16-be') except: pass try: _name = _name or _name.decode('mbcs') except: pass result.update({ NAME_ID[dname[3]]: _name }) _compact_full_name = result[NAME_ID[4]].replace(' ', '') if NAME_ID[6] not in result or len(_compact_full_name) > len(result[NAME_ID[6]]): result.update({NAME_ID[6]: _compact_full_name }) return result
def read(self, raw, offset): self.data = dict() #int16_t enabled; #char name0[13]; #char name1[13]; #int32_t ressource_id; #int32_t unknown; #int32_t unknown; #uint8_t color[3]; #int8_t unknown; #int32_t unknown; #int32_t unknown; terrain_border_struct0 = Struct(endianness + "h 13s 13s 3i 3B b 2i") pc = terrain_border_struct0.unpack_from(raw, offset) offset += terrain_border_struct0.size self.data["enabled"] = pc[0] self.data["name0"] = zstr(pc[1]) self.data["name1"] = zstr(pc[2]) self.data["ressource_id"] = pc[3] #self.data[""] = pc[4] #self.data[""] = pc[5] self.data["color"] = pc[6:(6+3)] #self.data[""] = pc[9] #self.data[""] = pc[10] #self.data[""] = pc[11] self.data["frame_data"] = list() for i in range(230): t = FrameData() offset = t.read(raw, offset) self.data["frame_data"] += [t.data] #int16_t frame_count; #int16_t unknown; #int16_t unknown; #int16_t unknown; terrain_border_struct1 = Struct(endianness + "4h") pc = terrain_border_struct1.unpack_from(raw, offset) offset += terrain_border_struct1.size self.data["frame_count"] = pc[0] #self.data[""] = pc[1] #self.data[""] = pc[2] #self.data[""] = pc[3] return offset
def binrepr(cls, buffer): lenStruct = Struct("HHHH") (headerLen, _, pageClassLen, schemaDescLen) = lenStruct.unpack_from(buffer) if headerLen > 0 and pageClassLen > 0 and schemaDescLen > 0: return Struct("HHHH"+str(pageClassLen)+"s"+str(schemaDescLen)+"s") else: raise ValueError("Invalid header length read from storage file header")
def _unpack_symblock(self, offset): code = Struct('>H') self._fobj.seek(self._start_offset + offset) blk = self.sym_block_fmt.unpack_file(self._fobj) print blk self.sym_block = [] assert blk.divider == -1 assert blk.block_id == 1 blk_data = self._fobj.read(blk.block_len - 6) # 6 for len and nlayers layer_off = 0 for l in range(blk.nlayer): layer_hdr = self.sym_layer_fmt.unpack_from(blk_data, layer_off) print layer_hdr assert layer_hdr.divider == -1 layer_off += self.sym_layer_fmt.size layer_data = blk_data[layer_off:layer_off + layer_hdr.length] layer_off += layer_hdr.length data_off = 0 layer = [] self.sym_block.append(layer) while data_off < len(layer_data): packet_code, = code.unpack_from(layer_data, data_off) data_off += code.size print packet_code, '%x' % packet_code data,size = self.packet_map[packet_code](layer_data[data_off:]) layer.append(data) data_off += size
def read(self, raw, offset): self.data = dict() #int16_t graphic_id; #int16_t unknown; #int16_t unknown; #int16_t unknown; #int16_t direction_x; #int16_t direction_y; #int16_t unknown; #int16_t unknown; graphic_delta_struct = Struct(endianness + "8h") pc = graphic_delta_struct.unpack_from(raw, offset) offset += graphic_delta_struct.size self.data["graphic_id"] = pc[0] #self.data[""] = pc[1] #self.data[""] = pc[2] #self.data[""] = pc[3] self.data["direction_x"] = pc[4] self.data["direction_y"] = pc[5] #self.data[""] = pc[6] #self.data[""] = pc[7] return offset
def unpack_records(format, data): """ Yield the records contained in a binary string """ record_struct = Struct(format) for offset in range(0, len(data), record_struct.size): yield record_struct.unpack_from(data, offset)
def __extract_fdt_header(self): """Extract DTB header""" header = Struct(self.__fdt_header_format) header_entry = Struct(">I") data = self.infile.read(header.size) result = dict(zip(self.__fdt_header_names, header.unpack_from(data))) if result['version'] >= 2: data = self.infile.read(header_entry.size) result['boot_cpuid_phys'] = header_entry.unpack_from(data)[0] if result['version'] >= 3: data = self.infile.read(header_entry.size) result['size_dt_strings'] = header_entry.unpack_from(data)[0] if result['version'] >= 17: data = self.infile.read(header_entry.size) result['size_dt_struct'] = header_entry.unpack_from(data)[0] return result
def __extract_fdt_dt(self): """Extract tags""" cell = Struct(self.__fdt_dt_cell_format) tags = [] self.infile.seek(self.fdt_header['off_dt_struct']) while True: data = self.infile.read(cell.size) if len(data) < cell.size: break tag, = cell.unpack_from(data) # print "*** %s" % self.__fdt_dt_tag_name.get(tag, '') if self.__fdt_dt_tag_name.get(tag, '') in 'node_begin': name = self.__extract_fdt_nodename() if len(name) == 0: name = '/' tags.append((tag, name)) elif self.__fdt_dt_tag_name.get(tag, '') in ('node_end', 'nop'): tags.append((tag, '')) elif self.__fdt_dt_tag_name.get(tag, '') in 'end': tags.append((tag, '')) break elif self.__fdt_dt_tag_name.get(tag, '') in 'prop': propdata = self.__extract_fdt_prop() tags.append((tag, propdata)) else: print("Unknown Tag %d" % tag) return tags
def __init__(self, fname): self.fname = fname dbg("reading blendomatic data from %s" % fname, 1, push="blendomatic") fname = file_get_path(fname, write=False) f = file_open(fname, binary=True, write=False) buf = f.read(Blendomatic.blendomatic_header.size) self.header = Blendomatic.blendomatic_header.unpack_from(buf) blending_mode_count, tile_count = self.header dbg("%d blending modes, each %d tiles" % (blending_mode_count, tile_count), 2) blending_mode = Struct(endianness + "I %dB" % (tile_count)) self.blending_modes = list() for i in range(blending_mode_count): header_data = f.read(blending_mode.size) bmode_header = blending_mode.unpack_from(header_data) new_mode = BlendingMode(i, f, tile_count, bmode_header) self.blending_modes.append(new_mode) f.close() dbg(pop="blendomatic")
def read(self, raw, offset): self.data = dict() #int32_t id; #int32_t palette; #int32_t color; #int32_t unknown; #int32_t unknown; #int32_t minimap_color; #int32_t unknown; #int32_t unknown; #int32_t unknown; player_color_struct = Struct(endianness + "9i") pc = player_color_struct.unpack_from(raw, offset) offset += player_color_struct.size self.data["id"] = pc[0] self.data["palette"] = pc[1] self.data["color"] = pc[2] #self.data[""] = pc[0] #self.data[""] = pc[0] self.data["minimap_color"] = pc[5] #self.data[""] = pc[0] #self.data[""] = pc[0] return offset
class Block: def __init__(self, structure, encoding): self._structure = structure self._encoding = encoding # Create a new Struct object to correctly read the binary data in this # block in particular, pass it along that it is a little endian (<), # along with all expected fields. self._compiled = Struct("<" + "".join( [field.format for field in self._structure])) self.size = self._compiled.size def unpack(self, buffer, offset=0): # Use the Struct to read the binary data in the buffer # where this block appears at the given offset. values = self._compiled.unpack_from(buffer, offset) # Match up each value with the corresponding field in the block # and put it in a dictionary for easy reference. return {field.field_name: value for value, field in zip(values, self._structure)} def _unpack_from_file(self, file, offset=None): if offset is not None: # move the pointer in the file to the specified offset; # this is not index 0 file.seek(offset) # read in the amount of data corresponding to the block size buffer = file.read(self.size) # return the values of the fields after unpacking them return self.unpack(buffer) def unpack_from_file(self, file, seek=None): # When more advanced behaviour is needed, # this method can be overridden by subclassing. return self._unpack_from_file(file, seek)
def get_file_size_from_rar(first_rar_filename): log_name = __name__ + " [RAR]" RAR_BLOCK_MAIN = 0x73 # s RAR_BLOCK_FILE = 0x74 # t RAR_FILE_LARGE = 0x0100 RAR_ID = str("Rar!\x1a\x07\x00") S_BLK_HDR = Struct('<HBHH') S_FILE_HDR = Struct('<LLBLLBBHL') S_LONG = Struct('<L') fd = xbmcvfs.File(first_rar_filename) if fd.read(len(RAR_ID)) == RAR_ID: log(log_name, "Reading file headers") while True: buf = fd.read(S_BLK_HDR.size) if not buf: return None t = S_BLK_HDR.unpack_from(buf) header_crc, header_type, header_flags, header_size = t pos = S_BLK_HDR.size # read full header header_data = buf + fd.read(header_size - S_BLK_HDR.size) if header_size > S_BLK_HDR.size else buf if len(header_data) != header_size: return None # unexpected EOF? if header_type == RAR_BLOCK_MAIN: log(log_name, "Main block found") continue elif header_type == RAR_BLOCK_FILE: log(log_name, "File block found") file_size = S_FILE_HDR.unpack_from(header_data, pos)[1] log(log_name, "File in rar size: %s" % file_size) if header_flags & RAR_FILE_LARGE: # Large file support log(log_name, "Large file flag") file_size |= S_LONG.unpack_from(header_data, pos + S_FILE_HDR.size + 4)[0] << 32 log(log_name, "File in rar size: %s after large file" % file_size) return file_size else: log(__name__, "RAR unknown header type %s" % header_type) return None else: return None
def unpack_records(format, data): ''' read the file entirely into a byte string with a single read and convert it piece by piece ''' record_struct = Struct(format) return (record_struct.unpack_from(data, offset) for offset in range(0, len(data), record_struct.size))
def binrepr(cls, buffer): lenStruct = Struct("H") reprLen = lenStruct.unpack_from(buffer)[0] if reprLen > 0: fmt = "H"+str(FileId.binrepr.size)+"s" filePathLen = reprLen-struct.calcsize(fmt) return Struct(fmt+str(filePathLen)+"s") else: raise ValueError("Invalid format length read from storage file serialization")
def unpack(self, data): # structure taken from cryptfs.h in crespo source. s = Struct('<'+'L H H') ftrMagic, majorVersion, minorVersion = s.unpack_from(data) if minorVersion < SCRYPT_ADDED_MINOR: s = Struct('<'+'L H H L L L L L L L 64s L 48s 16s') (self.ftrMagic, self.majorVersion, self.minorVersion, self.ftrSize, self.flags, self.keySize, self.spare1, self.fsSize1, self.fsSize2, self.failedDecrypt, self.cryptoType, self.spare2, self.cryptoKey, self.cryptoSalt) = s.unpack_from(data) self.cryptoKey = self.cryptoKey[0:self.keySize] elif minorVersion == SCRYPT_ADDED_MINOR: s = Struct('<'+'L H H L L L L L L L 64s L 48s 16s 2Q L B B B B') (self.ftrMagic, self.majorVersion, self.minorVersion, self.ftrSize, self.flags, self.keySize, self.spare1, self.fsSize1, self.fsSize2, self.failedDecrypt, self.cryptoType, self.spare2, self.cryptoKey, self.cryptoSalt, self.persistDataOffset1, self.persistDataOffset2, self.persistDataSize, self.kdf, self.N_factor, self.r_factor, self.p_factor) = s.unpack_from(data) self.cryptoKey = self.cryptoKey[0:self.keySize] self.N = 1 << self.N_factor self.r = 1 << self.r_factor self.p = 1 << self.p_factor else: s = Struct('<'+'L H H L L L L Q L 64s L 48s 16s 2Q L B B B B Q 32s 2048s L 32s') (self.ftrMagic, self.majorVersion, self.minorVersion, self.ftrSize, self.flags, self.keySize, self.crypt_type, self.fsSize, self.failedDecrypt, self.cryptoType, self.spare2, self.cryptoKey, self.cryptoSalt, self.persistDataOffset1, self.persistDataOffset2, self.persistDataSize, self.kdf, self.N_factor, self.r_factor, self.p_factor, self.encrypted_upto, self.hash_first_block, self.km_blob, self.km_blob_size, self.scrypted_intermediate_key) = s.unpack_from(data) self.cryptoKey = self.cryptoKey[0:self.keySize] self.N = 1 << self.N_factor self.r = 1 << self.r_factor self.p = 1 << self.p_factor self.km_blob = self.km_blob[0:self.km_blob_size]
def binrepr(cls, buffer): lenStruct = Struct("HHHH") (headerLen, _, pageClassLen, schemaDescLen) = lenStruct.unpack_from(buffer) if headerLen > 0 and pageClassLen > 0 and schemaDescLen > 0: return Struct("HHHH" + str(pageClassLen) + "s" + str(schemaDescLen) + "s") else: raise ValueError( "Invalid header length read from storage file header")
def binrepr(cls, buffer): lenStruct = Struct("H") numSlots = lenStruct.unpack_from(buffer, offset=PageHeader.size)[0] slotArrayLen = numSlots >> 3 if numSlots % 8 != 0: slotArrayLen += 1 if numSlots > 0: return Struct(SlottedPageHeader.prefixFmt+str(slotArrayLen)+"s") else: raise ValueError("Invalid number of slots in slotted page header")
def __init__(self, data, data_offset=0): Chunk.__init__(self, data, data_offset) slice_offset = data_offset + 6 slice_chunk_struct = Struct(SliceChunk.slice_chunk_format) num_slices, self.flags, self.reserved = slice_chunk_struct.unpack_from(data, slice_offset) slice_offset += slice_chunk_struct.size string_size, self.name = parse_string(data, slice_offset) slice_offset += string_size self.slices = [] for i in range(num_slices): slice_struct = Struct(SliceChunk.slice_format) slice = {} ( slice['start_frame'], slice['x'], slice['y'], slice['width'], slice['height'] ) = slice_struct.unpack_from(data, slice_offset) slice_offset += slice_struct.size if self.flags & 1 != 0: slice_bit_1_struct = Struct(SliceChunk.slice_bit_1_format) slice['center'] = {} ( slice['center']['x'], slice['center']['y'], slice['center']['width'], slice['center']['height'] ) = slice_bit_1_struct.unpack_from(data, slice_offset) slice_offset += slice_bit_1_struct.size if self.flags & 2 != 0: slice_bit_2_struct = Struct(SliceChunk.slice_bit_2_format) slice['pivot'] = {} ( slice['pivot']['x'], slice['pivot']['y'], ) = slice_bit_2_struct.unpack_from(data, slice_offset) slice_offset += slice_bit_2_struct.size self.slices.append(slice)
def __init__(self, data, data_offset=0): Chunk.__init__(self, data, data_offset) cel_struct = Struct(CelExtraChunk.celextra_format) ( self.flags, self.precise_x_pos, self.precise_y_pos, self.cel_width, self.cel_height ) = cel_struct.unpack_from(data, data_offset + 6)
def __init__(self, data, data_offset=0): Chunk.__init__(self, data, data_offset) packet_struct = Struct('<BB') color_packet_struct = Struct('<BBB') self.num_packets = Struct('<H').unpack_from(data, data_offset) self.packets = [] packet_offset = data_offset + 6 for packet_index in range(self.num_packets): packet = {'colors':[]} (packet.previous_packet_skip, num_colors) = packet_struct.unpack_from(data, packet_offset) packet_offset += 2 for color in range(0, num_colors): (red, blue, green) = color_packet_struct.unpack_from(data, packet_offset) packet['colors'].append([red, blue, green]) packet_offset += 3 self.packets.append(packet)
def read_filename_attribute_datarun_entry(dump, offset): dump.seek(offset, 0) data = dump.read(ATTR_FN_DATARUN_ENTRY_HEADER_FORMAT.size) fields = ATTR_FN_DATARUN_ENTRY_HEADER_FORMAT.unpack_from(data, 0) attr = {'_absolute_offset': offset, 'size': fields[0], 'unknown0': fields[1], 'unknown1': fields[2], 'unknown2': fields[3], 'header_size': fields[4], 'header_body_size': fields[5], 'body_size_copy': fields[6], 'unknown3': fields[7], 'attributeid?': fields[8]} attr['_body_offset'] = attr['header_size'] dump.seek(offset + attr['_body_offset'], 0) data = dump.read(ATTR_FN_DATARUN_ENTRY_BODY_FORMAT.size) fields = ATTR_FN_DATARUN_ENTRY_BODY_FORMAT.unpack_from(data, 0) attr['body_size'] = fields[0] # NOTE: This is wrong in the thesis, logical comes before physical attr['physical_size'] = fields[2] attr['logical_size'] = fields[3] attr['_body_list_offset'] = attr['_body_offset'] + attr['body_size'] dump.seek(offset + attr['_body_list_offset'], 0) data = dump.read(ATTR_FN_DATARUN_ENTRY_BODY_LIST_FORMAT.size) fields = ATTR_FN_DATARUN_ENTRY_BODY_LIST_FORMAT.unpack_from(data, 0) attr['body_list_size'] = fields[0] attr['body_list_offset_next_record'] = fields[1] attr['body_list_free_space'] = fields[2] attr['offset_pointers'] = fields[4] attr['num_pointers'] = fields[5] attr['body_list_end_struct'] = fields[6] if attr['num_pointers']: pointers_format = Struct('<' + ('L' * attr['num_pointers'])) dump.seek(offset + attr['_body_list_offset'] + attr['offset_pointers'], 0) data = dump.read(pointers_format.size) fields = pointers_format.unpack_from(data, 0) attr['pointers'] = fields attr['pointers_data'] = [] else: attr['pointers'] = None attr['pointers_data'] = None attr['_structure_size'] = attr['_body_list_offset'] + attr['body_list_size'] if attr['pointers']: for ptr in attr['pointers']: ptr_addr = offset + attr['_body_list_offset'] + ptr dump.seek(ptr_addr, 0) data = dump.read(ATTR_FN_DATARUN_ENTRY_BODY_LIST_ENTRY_FORMAT.size) fields = ATTR_FN_DATARUN_ENTRY_BODY_LIST_ENTRY_FORMAT.unpack_from(data, 0) entry = {'_absolute_offset': ptr_addr, 'size': fields[0], 'num_blocks': fields[2], 'blockid': fields[3]} attr['pointers_data'].append(entry) return attr
def binrepr(cls, buffer): lenStruct = Struct("H") numSlots = lenStruct.unpack_from(buffer, offset=PageHeader.size)[0] slotArrayLen = numSlots >> 3 if numSlots % 8 != 0: slotArrayLen += 1 if numSlots > 0: return Struct(SlottedPageHeader.prefixFmt + str(slotArrayLen) + "s") else: raise ValueError("Invalid number of slots in slotted page header")
def __extract_fdt_reserve_entries(self): """Extract reserved memory entries""" header = Struct(self.__fdt_reserve_entry_format) entries = [] self.infile.seek(self.fdt_header["off_mem_rsvmap"]) while True: data = self.infile.read(header.size) result = dict(zip(self.__fdt_reserve_entry_names, header.unpack_from(data))) if result["address"] == 0 and result["size"] == 0: return entries entries.append(result)
def read(self, raw, offset): self.data = dict() #int16_t one; #int16_t id; #int8_t unknown; #int16_t type; #int16_t class_id; #int16_t unit_id; #int16_t unknown; #int16_t ressource_in; #int16_t sub_type; #int16_t ressource_out; #int16_t unknown; #float work_rate_multiplier; #float execution_radius; #float extra_range; #int8_t unknown; #float unknown; #int8_t unknown; #int8_t unknown; #int32_t unknown; #int8_t unknown; #int8_t unknown; #int8_t unknown; #int16_t graphic[6]; unit_command_struct = Struct(endianness + "2h b 8h 3f b f 2b i 3b 6h") pc = unit_command_struct.unpack_from(raw, offset) offset += unit_command_struct.size self.data["one"] = pc[0] self.data["id"] = pc[1] #self.data[""] = pc[2] self.data["class_id"] = pc[3] self.data["unit_id"] = pc[4] #self.data[""] = pc[5] self.data["ressource_in"] = pc[6] self.data["ressource_out"] = pc[7] #self.data[""] = pc[8] self.data["work_rate_multiplier"] = pc[9] self.data["execution_radius"] = pc[10] self.data["extra_range"] = pc[11] #self.data[""] = pc[12] #self.data[""] = pc[13] #self.data[""] = pc[14] #self.data[""] = pc[15] #self.data[""] = pc[16] #self.data[""] = pc[17] #self.data[""] = pc[18] #self.data[""] = pc[19] self.data["graphic"] = pc[20:(20+6)] return offset
def tabdes(filename, body): """Deserialize file to data""" # XXX checksums ignored head = Struct("!BiHBxxxB") body = Struct(body) # foot = Struct("!4s") data = [] with open(filename, "rb") as f: buffer = f.read() _, _, count, length, _ = head.unpack_from(buffer, 0) offset = head.size for i in range(count): row = body.unpack_from(buffer, offset) data.append(row) offset += body.size else: print("read %d rows" % len(data)) # offset = 2 ** 16 - foot.size # _, foot.unpack_from(buffer, offset)) return data
def fromFile(cls, f): pos = f.tell() if pos == 0: lenStruct = Struct("H") headerLen = lenStruct.unpack_from(f.peek(lenStruct.size))[0] if headerLen > 0: buffer = f.read(headerLen) return FileHeader.unpack(buffer) else: raise ValueError("Invalid header length read from storage file header") else: raise ValueError("Cannot read file header, file positioned beyond its start.")
def __extract_fdt_reserve_entries(self): """Extract reserved memory entries""" header = Struct(self.__fdt_reserve_entry_format) entries = [] self.infile.seek(self.fdt_header['off_mem_rsvmap']) while True: data = self.infile.read(header.size) result = dict(zip(self.__fdt_reserve_entry_names, header.unpack_from(data))) if result['address'] == 0 and result['size'] == 0: return entries entries.append(result)
def fromFile(cls, f): pos = f.tell() if pos == 0: lenStruct = Struct("H") headerLen = lenStruct.unpack_from(f.peek(lenStruct.size))[0] if headerLen > 0: buffer = f.read(headerLen) return FileHeader.unpack(buffer) else: raise ValueError("Invalid header length read from storage file header") else: raise ValueError("Cannot read file header, file positioned beyond its start.")
def __init__(self, data, data_offset=0): header_struct = Struct(Header.header_format) (self.filesize, self.magic_number, self.num_frames, self.width, self.height, self.color_depth, self.flags, self.palette_mask, self.num_colors, self.pixel_width, self.pixel_height) = header_struct.unpack_from(data, data_offset) if self.magic_number != 0xA5E0: raise ValueError( 'Incorrect magic number, expected {:x}, got {:x}'.format( 0xA5E0, self.magic_number))
def parse(self,data): s = Struct('<'+'L L 512s L 512s L 16s 512s L 32s') (self.magic_num, self.version_num, self.modulus, self.modulus_size, self.pub_exp, self.pub_exp_size, self.iv, self.enc_priv_exp, self.enc_priv_exp_size, self.hmac) = s.unpack_from(data) self.modulus = self.modulus[0:self.modulus_size] self.pub_exp = self.pub_exp[0:self.pub_exp_size] self.enc_priv_exp = self.enc_priv_exp[0:self.enc_priv_exp_size]
def craw(b, d, l, depth): S_CRAW = Struct('>LL16sHHHHHHLH32sHHHH') NT_CRAW = namedtuple('craw', 'w h bits') _, _, _, w, h, _, _, _, _, _, _, _, bits, _, _, _ = S_CRAW.unpack_from( d, 0) #print(S_CRAW.unpack_from(d, 0)) _craw = NT_CRAW(w, h, bits) #print(_craw) if not options.quiet: print("CRAW: (0x{0:x})".format(l)) print(' %swidth=%d, height=%d, bits=%d' % (depth * ' ', w, h, bits)) return _craw
def _parse_footer(cls, stct: Struct, d: ByteString) -> Tuple[int, int, float, int, int]: """parse footer returning the data: estimated elements, elements added, false positive rate, hash function, number hashes, number bits""" e_elms, e_added, fpr = stct.unpack_from(bytearray(d)) est_elements = e_elms els_added = e_added fpr = float(fpr) fpr, n_hashes, n_bits = cls._get_optimized_params(est_elements, fpr) return int(est_elements), int(els_added), float(fpr), int( n_hashes), int(n_bits)
def __unpack_value(self, structure, data, results, evaluator_dict, parent, fields, **kargs): struct=None value=structure.get("value",None) if value is not None: struct=Struct(self.__format.ByteOrder+value) if struct: result=struct.unpack_from(data) data=data[struct.size:] else: result=None, results=results+result evaluator_dict=self.__update_evaluator(evaluator_dict, parent, fields, results) return results, data, evaluator_dict
def __init__(self, response_bytes, as_float=False, as_unsigned=True): PKE, IND = unpack(">HH", response_bytes[:4]) self.responseID = PKE >> 12 self.responseIDstr = getEnumString(ResponseID, self.responseID) pnu = PKE & 0b11111111111 ext = (IND >> 8) & 0b11111100 try: self.param = pnu + pnu_offset[ext] except: self.param = 0 PWE = response_bytes[4:] nPWE = str(len(PWE) / 2) # noResponse, xferParam16, xferParam32, xferDesc, xferArray16, xferArray32, xferNum pwe_interpreter = { ResponseID.noResponse: "", ResponseID.xferParam16: ">H" if as_unsigned else ">h", ResponseID.xferParam32: ">f" if as_float else ">I" if as_unsigned else ">i", ResponseID.xferDesc: ">H", ResponseID.xferArray16: ">" + nPWE + "H" if as_unsigned else ">" + nPWE + "h", ResponseID.xferArray32: ">" + nPWE + "f" if as_float else ">" + nPWE + "I" if as_unsigned else ">" + nPWE + "i", ResponseID.cannotExec: ">H", ResponseID.noPKWRights: ">H", ResponseID.changeReport16: ">H", ResponseID.changeReport32: ">H", ResponseID.changeReportArray16: ">H", ResponseID.changeReportArray32: ">H", ResponseID.transferText: ">H", }.get(self.responseID) pwe_struct = Struct(pwe_interpreter) self.bytesize = pwe_struct.size super(Response, self).__init__(pwe_struct.unpack_from(PWE))
def read_directory_metadata_attribute(dump, offset): dump.seek(offset) data = dump.read(ATTR_DIR_METADATA_HEADER_FORMAT.size) fields = ATTR_DIR_METADATA_HEADER_FORMAT.unpack_from(data, 0) attr = {'_absolute_offset': offset, 'size': fields[0], 'offset_identifier': fields[1], 'header_rem_data': fields[2], 'header_length': fields[4], 'record_rem_data': fields[5], '_structure_size': fields[0]} dump.seek(offset + attr['offset_identifier']) data = dump.read(ATTR_DIR_METADATA_HEADER_2_FORMAT.size) fields = ATTR_DIR_METADATA_HEADER_2_FORMAT.unpack_from(data, 0) attr['type'] = fields[0] dump.seek(offset + attr['header_length']) data = dump.read(ATTR_DIR_METADATA_BODY_FORMAT.size) fields = ATTR_DIR_METADATA_BODY_FORMAT.unpack_from(data, 0) attr['_offset_body'] = attr['header_length'] attr['body_length'] = fields[0] attr['offset_first_timestamp'] = fields[1] attr['created'] = fields[3] attr['modified'] = fields[4] attr['metadata_modified'] = fields[5] attr['last_accessed'] = fields[6] attr['nodeid'] = fields[8] attr['_offset_psec'] = attr['_offset_body'] + attr['body_length'] dump.seek(offset + attr['_offset_psec'], 0) data = dump.read(ATTR_DIR_METADATA_PSEC_FORMAT.size) fields = ATTR_DIR_METADATA_PSEC_FORMAT.unpack_from(data, 0) attr['psec_length'] = fields[0] attr['offset_first_pointer'] = fields[2] attr['num_pointers'] = fields[3] attr['offset_end_pointers_area?'] = fields[4] if attr['num_pointers']: pointers_format = Struct('<' + ('L' * attr['num_pointers'])) dump.seek(offset + attr['_offset_psec'] + attr['offset_first_pointer'], 0) data = dump.read(pointers_format.size) fields = pointers_format.unpack_from(data, 0) attr['pointers'] = fields attr['pointers_data'] = [] else: attr['pointers'] = None attr['pointers_data'] = None attr['_offset_rec_area'] = attr['_offset_psec'] + attr['psec_length'] rec_offset = attr['_offset_rec_area'] for ptr in attr['pointers']: ptr_addr = offset + attr['_offset_psec'] + ptr attr['pointers_data'].append(read_directory_metadata_subattribute(dump, ptr_addr)) return attr
class Integer(object): default = 0 def __init__(self, fmt): self.struct = Struct(fmt) def encode(self, value): if value is None: value = 0 return self.struct.pack(int(value)) def decode(self, buf, offset): value, = self.struct.unpack_from(buf, offset) return value, offset + self.struct.size
def unpack(self, _format): ''' Unpacks data from a binary string according to the given format. Arguments: _format (str): The pack format Returns: tuple: The unpacked data fields ''' compiled_format = Struct(_format) return compiled_format.unpack_from(self) \ + (self[compiled_format.size:],)
def __init__(self, data, layer_index, data_offset=0): Chunk.__init__(self, data, data_offset) layer_struct = Struct(LayerChunk.layer_format) ( self.flags, self.layer_type, self.layer_child_level, self.default_width, self.default_height, self.blend_mode, self.opacity ) = layer_struct.unpack_from(data, data_offset + 6) _, self.name = parse_string(data, data_offset + 6 + layer_struct.size) self.layer_index = layer_index
def cmp1(b, d, l, depth): S_CMP1 = Struct('>HHHHLLLLBBBBL') NT_CMP1 = namedtuple('cmp1', 'iw ih tw th d p cfa extra wl b35 hsize') if not options.quiet: print('CMP1: (0x{:x})'.format(l)) _, size, version, _, iw, ih, tw, th, _32, _33, _34, b35, hsize = S_CMP1.unpack_from( d, 0) bits = int(_32) planes = int(_33) >> 4 cfa = int(_33) & 0xf extra = int(_34) >> 4 wavelets = int(_34) & 0xf cmp = NT_CMP1(iw, ih, tw, th, bits, planes, cfa, extra, wavelets, b35, hsize) return cmp
class WDC1SparseDataValue(WDC1ExtendedColumnValue): def __init__(self, column): super().__init__(column) self.unpacker = Struct('<' + column.struct_type()) # Bytes are not needed for sparse data, sparse blocks are directly indexed # with the id column of the record def __call__(self, id_, data, bytes_): # Value is the id_th value in the block value_offset = self.column.parser().sparse_data_offset(self.column, id_) if value_offset == -1: return (self.column.default(),) return self.unpacker.unpack_from(data, value_offset)
def __init__(self, data, data_offset=0): Chunk.__init__(self, data, data_offset) palette_struct = Struct(FrameTagsChunk.frametag_head_format) (num_tags,) = palette_struct.unpack_from(data, data_offset + 6) self.tags = [] tag_offset = data_offset + palette_struct.size + 6 palette_tag_struct = Struct(FrameTagsChunk.frametag_format) for index in range(num_tags): tag = {'color':{}} ( tag['from'], tag['to'], tag['loop'], tag['color']['red'], tag['color']['green'], tag['color']['blue'] ) = palette_tag_struct.unpack_from(data, tag_offset) self.tags.append(tag) tag_offset += palette_tag_struct.size string_size, tag['name'] = parse_string(data, tag_offset) tag_offset += string_size
def __unpack_value(self, structure, data, results, evaluator_dict, parent, fields, **kargs): struct = None value = structure.get("value", None) if value is not None: struct = Struct(self.__format.ByteOrder + value) if struct: result = struct.unpack_from(data) data = data[struct.size:] else: result = None, results = results + result evaluator_dict = self.__update_evaluator(evaluator_dict, parent, fields, results) return results, data, evaluator_dict
def __init__(self, data, data_offset=0): Chunk.__init__(self, data, data_offset) mask_struct = Struct(MaskChunk.mask_format) ( self.x_pos, self.y_pos, self.width, self.height ) = mask_struct.unpack_from(data, data_offset + 6) name_offset = data_offset + 6 + mask_struct.size string_size, self.name = parse_string(data, name_offset) start_range = name_offset + string_size end_range = start_range + math.ceil(self.height*((self.width+7)/8)) self.bitmap = data[start_range:start_range:end_range]
def ctbo(b, d, l, depth): if not options.quiet: print('CTBO: (0x{0:x})'.format(l)) S_CTBO_LINE = Struct('>LQQ') NT_CTBO_LINE = namedtuple('ctbo_line', 'index offset size') nbLine = getLongBE(d, 0) offsetList = {} for n in range(nbLine): idx, offset, size = S_CTBO_LINE.unpack_from(d, 4 + n * S_CTBO_LINE.size) _ctbo_line = NT_CTBO_LINE(idx, offset, size) if not options.quiet: print(' %s%x %7x %7x' % (depth * ' ', _ctbo_line.index, _ctbo_line.offset, _ctbo_line.size)) offsetList[idx] = _ctbo_line return offsetList
def _load_hex(self, hex_string, hash_function=None): ''' placeholder for loading from hex string ''' offset = calcsize('>QQf') * 2 stct = Struct('>QQf') tmp_data = stct.unpack_from(unhexlify(hex_string[-offset:])) vals = self._set_optimized_params(tmp_data[0], tmp_data[2], hash_function) self.__hash_func = vals[0] self.__fpr = vals[1] self.__number_hashes = vals[2] self.__num_bits = vals[3] if self.__blm_type in ['regular', 'reg-ondisk']: self.__bloom_length = int(math.ceil(self.__num_bits / 8.0)) else: self.__bloom_length = self.number_bits tmp_bloom = unhexlify(hex_string[:-offset]) rep = self.__impt_type * self.bloom_length self._bloom = list(unpack(rep, tmp_bloom))
def __extract_fdt_prop(self): """Extract property""" prop = Struct(self.__fdt_dt_prop_format) pos = self.infile.tell() data = self.infile.read(prop.size) (prop_size, prop_string_pos) = prop.unpack_from(data) prop_start = pos + prop.size if self.fdt_header["version"] < 16 and prop_size >= 8: prop_start = ((prop_start) + ((8) - 1)) & ~((8) - 1) self.infile.seek(prop_start) value = self.infile.read(prop_size) align_pos = self.infile.tell() align_pos = ((align_pos) + ((4) - 1)) & ~((4) - 1) self.infile.seek(align_pos) return (self.__extract_fdt_string(prop_string_pos), value)
class WDC1BitPackedValue(WDC1ExtendedColumnValue): def __init__(self, column): super().__init__(column) self.unpacker = None if column.value_bit_size() != 24 and column.value_bit_size() % 8 == 0: struct_type = get_struct_type(column.is_float(), column.is_signed(), column.value_bit_size()) self.unpacker = Struct('<{}'.format(struct_type)) def __call__(self, id_, data, bytes_): if self.unpacker: value = self.unpacker.unpack_from(bytes_, 0)[0] else: value = int.from_bytes(bytes_, byteorder = 'little') if self.column.is_signed(): value = transform_sign(value, self.element_mask, self.column.value_bit_size()) return (value,)
def stsz(b, d, l, depth): S_STSZ = Struct('>BBBBLL') #size==12 version, f1, f2, f3, size, count = S_STSZ.unpack_from(d, 0) flags = f1 << 16 | f2 << 8 | f3 size_list = [] if size != 0: for s in range(count): size_list.append(size) else: for s in range(count): sample_size = getLongBE(d, 12 + s * 4) size_list.append(sample_size) if not options.quiet: print( "stsz: version={0}, size=0x{1:x}, count={2} (0x{3:x})\n {4}". format(version, size, count, l, depth * ' '), end='') for s in size_list: print('0x%x ' % s, end='') print() return size_list
def get_minidump_create_timestamp(self, minidump_path): """Returns the unix timestamp of the minidump create time. It is extracted from the minidump header.""" # Read the minidump's header to extract the create time stamp. More information about # the mindump header format can be found here: https://goo.gl/uxKZVe # # typedef struct { # uint32_t signature; # uint32_t version; # uint32_t stream_count; # MDRVA stream_directory_rva; /* A |stream_count|-sized array of # * MDRawDirectory structures. */ # uint32_t checksum; /* Can be 0. In fact, that's all that's # * been found in minidump files. */ # uint32_t time_date_stamp; /* time_t */ # uint64_t flags; # } MDRawHeader; /* MINIDUMP_HEADER */ s = Struct("IIIiIIQ") data = open(minidump_path, "rb").read(s.size) header = self.MINIDUMP_HEADER(*s.unpack_from(data)) return header.time_date_stamp
def parse_sparse_block(self): offset = self.sparse_block_offset for column_idx in range(0, self.fields): block = {} column = self.column(column_idx) if column.field_ext_type() != COLUMN_TYPE_SPARSE: self.sparse_blocks.append(block) continue # TODO: Are sparse blocks always <dbc_id, value> tuples with 4 bytes for a value? # TODO: Do we want to preparse the sparse block? Would save an # unpack call at the cost of increased memory if column.field_block_size() % 8 != 0: logging.error('%s: Unknown sparse block type for column %s', self.class_name(), column) return False logging.debug('%s unpacking sparse block for %s at %d, %d entries', self.full_name(), column, offset, column.field_block_size() // 8) unpack_full_str = '<' + ((column.field_block_size() // 8) * 'I4x') unpacker = Struct(unpack_full_str) value_index = 0 for dbc_id in unpacker.unpack_from(self.data, offset): # Store <dbc_id, offset> tuples into the sparse block block[dbc_id] = offset + value_index * 8 + 4 value_index += 1 offset += column.field_block_size() self.sparse_blocks.append(block) logging.debug('Parsed sparse blocks') return True
def unpack(self, fmt): ''' Unpack values from contained buffer Unpacks values from ``self.buf`` and updates ``self.ptr`` to the position after the read data. Parameters ---------- fmt : str format string as for ``unpack`` Returns ------- values : tuple values as unpacked from ``self.buf`` according to `fmt` ''' # try and get a struct corresponding to the format string from # the cache pkst = self._cache.get(fmt) if pkst is None: # struct not in cache # if we've not got a default endian, or the format has an # explicit endianness, then we make a new struct directly # from the format string if self.endian is None or fmt[0] in _ENDIAN_CODES: pkst = Struct(fmt) else: # we're going to modify the endianness with our # default. endian_fmt = self.endian + fmt pkst = Struct(endian_fmt) # add an entry in the cache for the modified format # string as well as (below) the unmodified format # string, in case we get a format string with the same # endianness as default, but specified explicitly. self._cache[endian_fmt] = pkst self._cache[fmt] = pkst values = pkst.unpack_from(self.buf, self.ptr) self.ptr += pkst.size return values