def __init__(self, fname, endianness="<"): self.endianness = endianness self.data = open(fname, "rb").read() self.superblock = RomFSSuperblock(self.data[:self.SUPERBLOCK_SIZE], self.endianness) self.entries = self._process_all_entries() if len(self.entries) != self.superblock.entry_count: warn("entry count not equal to value stored in header")
def __init__(self, data): self.data = bytearray(data) self.correct = True if self._get_world_at(0x0) != self.UIMAGE_MAGIC: warn("Wrong uImage magic number") self.correct = False self.size = self._get_world_at(self.SIZE_OFFSET) if len(self.data) < self.size + self.HEADER_SIZE: warn("image shorter than size from header") self.correct = False self.name = self.data[self.IMAGE_NAME_OFFSET:self.IMAGE_NAME_OFFSET + self.NAME_SIZE]
def modify_entry(self, uid, info): offset = self.SUPERBLOCK_SIZE + uid*self.FILE_ENTRY_SIZE try: entry = RomFSEntry(self.data[offset:offset + self.FILE_ENTRY_SIZE], endianness=self.endianness) except ValueError as e: warn("entry (%d) could not be read " % uid); return False if entry.uid != uid: warn("entry uid not equal to it's number"); for param in {'offset','size','size_decompressed'}: if hasattr(info, param): setattr(entry, param, getattr(info, param)) entry.update() self.data[offset:offset + self.FILE_ENTRY_SIZE] = entry.data return True
def get_data(self, uid, uncompress=True): start = self.entries[uid].offset end = start + self.entries[uid].size data = self.data[start:end] if uncompress: try: data = lzma.decompress(data) if len(data) != self.entries[uid].size_decompressed: warn("[lzma] Wrong decompressed size! %s (%d)" % (repr(self.entries[uid]), uid)) except KeyboardInterrupt as e: raise e except Exception as e: warn("Could not uncompress! %s (%d)" % (repr(self.entries[uid]), uid)) pass return data
def _process_all_entries(self): entries = {} offset = self.SUPERBLOCK_SIZE counter = 0 while counter < self.superblock.entry_count: try: entry = RomFSEntry(self.data[offset:offset + self.FILE_ENTRY_SIZE], endianness=self.endianness) except ValueError as e: warn("entry (%d) could not be read " % counter); break if not entry.uid in entries: entries[entry.uid] = FileContainer() entries[entry.uid].offset = entry.offset entries[entry.uid].size = entry.size entries[entry.uid].type = entry.type entries[entry.uid].raw_type = entry.type entries[entry.uid].size_decompressed = entry.size_decompressed entries[entry.uid].ctime = entry.ctime entries[entry.uid].nlink = entry.nlink if entry.uid == 0: entries[entry.uid].name = os.path.sep if entry.type & entry.DIR_STRUCT_MASK: entries[entry.uid].type = "directory" ds = RomFSDirStruct(self.data[entry.offset:entry.offset + entry.size], endianness=self.endianness) for (uid, name) in ds.ls: if not uid in entries: entries[uid] = FileContainer() else: warn('Multiple links to one file:', self.build_path(uid), name) # DEBUG entries[uid].parent = ds.uid entries[uid].name = name else: entries[entry.uid].type = "data" offset += self.FILE_ENTRY_SIZE counter += 1 return entries
def rebuild(self): self.data = bytearray(self.data) new_data_block = b'' data_offset = self.SUPERBLOCK_SIZE + len(self.entries)*self.FILE_ENTRY_SIZE for uid, info in self.entries.items(): # guarantee alignment of data rem = data_offset % self.ALIGNMENT if rem != 0: padlen = self.ALIGNMENT - rem new_data_block += self.PAD_BYTE * padlen data_offset += padlen data = None if hasattr(info, 'new_data'): data = info.new_data if len(data) > self.MAX_NOTCOMPRESSED: info.size_decompressed = len(data) data = lzma_compress(data) else: info.size_decompressed = 0 del info.new_data else: data = self.get_data(uid, uncompress=False) info.size = len(data) info.offset = data_offset self.modify_entry(uid, info) new_data_block += data data_offset += info.size new_size = len(new_data_block) + self.SUPERBLOCK_SIZE + len(self.entries)*self.FILE_ENTRY_SIZE if new_size%32 != 0: # max size should be aligned to 32 byte blocks new_size += 32 - (new_size%32) if new_size > self.superblock.max_size: warn("RomFS larger than max_size in header! Increasing max_size to %d" % new_size) self.superblock.max_size = new_size self.data[0x8:0xc] = struct.pack("%sL" % self.endianness, new_size) self.data = bytes(self.data[:self.SUPERBLOCK_SIZE + len(self.entries)*self.FILE_ENTRY_SIZE]) + new_data_block
def verify(self): correct = True if self.magic != 0x5ea3_a417: warn("Wrong SEAMA magic number") correct = False if 0 != len(self.surplus_data): warn("Surplus data after SEAMA data length found") correct = False if len(self.data) < self.data_len: warn("Data length is {}, but SEAMA's data length is {}".format( len(self.data), self.data_len)) correct = False h = hashlib.md5(self.data).digest() if h != self.md5: warn("MD5 checksum does not match") correct = False return correct