def index(ubifs, lnum, offset, inodes={}): """Walk the index gathering Inode, Dir Entry, and File nodes. Arguments: Obj:ubifs -- UBIFS object. Int:lnum -- Logical erase block number. Int:offset -- Offset in logical erase block. Dict:inodes -- Dict of ino/dent/file nodes keyed to inode number. Returns: Dict:inodes -- Dict of ino/dent/file nodes keyed to inode number. 'ino' -- Inode node. 'data' -- List of data nodes if present. 'dent' -- List of directory entry nodes if present. """ try: ubifs.file.seek((ubifs.leb_size * lnum) + offset) buf = ubifs.file.read(UBIFS_COMMON_HDR_SZ) chdr = nodes.common_hdr(buf) log(index , '%s file addr: %s' % (chdr, ubifs.file.last_read_addr())) verbose_display(chdr) node_buf = ubifs.file.read(chdr.len - UBIFS_COMMON_HDR_SZ) file_offset = ubifs.file.last_read_addr() except Exception, e: error(index, 'Fatal', 'leb: %s, ubifs offset: %s, error: %s' % (lnum, ((ubifs.leb_size * lnum) + offset), e))
def _process_reg_file(ubifs, inode, path): try: buf = b'' if 'data' in inode: compr_type = 0 sorted_data = sorted(inode['data'], key=lambda x: x.key['khash']) last_khash = sorted_data[0].key['khash']-1 for data in sorted_data: # If data nodes are missing in sequence, fill in blanks # with \x00 * UBIFS_BLOCK_SIZE if data.key['khash'] - last_khash != 1: while 1 != (data.key['khash'] - last_khash): buf += b'\x00'*UBIFS_BLOCK_SIZE last_khash += 1 compr_type = data.compr_type ubifs.file.seek(data.offset) d = ubifs.file.read(data.compr_len) buf += decompress(compr_type, data.size, d) last_khash = data.key['khash'] verbose_log(_process_reg_file, 'ino num: %s, compression: %s, path: %s' % (inode['ino'].key['ino_num'], compr_type, path)) except Exception as e: error(_process_reg_file, 'Warn', 'inode num:%s :%s' % (inode['ino'].key['ino_num'], e)) # Pad end of file with \x00 if needed. if inode['ino'].size > len(buf): buf += b'\x00' * (inode['ino'].size - len(buf)) return buf
def _process_reg_file(ubifs, inode, path): try: buf = '' if 'data' in inode: compr_type = 0 sorted_data = sorted(inode['data'], key=lambda x: x.key['khash']) last_khash = sorted_data[0].key['khash'] - 1 for data in sorted_data: # If data nodes are missing in sequence, fill in blanks # with \x00 * UBIFS_BLOCK_SIZE if data.key['khash'] - last_khash != 1: while 1 != (data.key['khash'] - last_khash): buf += '\x00' * UBIFS_BLOCK_SIZE last_khash += 1 compr_type = data.compr_type ubifs.file.seek(data.offset) d = ubifs.file.read(data.compr_len) buf += decompress(compr_type, data.size, d) last_khash = data.key['khash'] verbose_log( _process_reg_file, 'ino num: %s, compression: %s, path: %s' % (inode['ino'].key['ino_num'], compr_type, path)) except Exception, e: error(_process_reg_file, 'Warn', 'inode num:%s :%s' % (inode['ino'].key['ino_num'], e))
def index(ubifs, lnum, offset, inodes={}): """Walk the index gathering Inode, Dir Entry, and File nodes. Arguments: Obj:ubifs -- UBIFS object. Int:lnum -- Logical erase block number. Int:offset -- Offset in logical erase block. Dict:inodes -- Dict of ino/dent/file nodes keyed to inode number. Returns: Dict:inodes -- Dict of ino/dent/file nodes keyed to inode number. 'ino' -- Inode node. 'data' -- List of data nodes if present. 'dent' -- List of directory entry nodes if present. """ try: ubifs.file.seek((ubifs.leb_size * lnum) + offset) buf = ubifs.file.read(UBIFS_COMMON_HDR_SZ) chdr = nodes.common_hdr(buf) log(index, '%s file addr: %s' % (chdr, ubifs.file.last_read_addr())) verbose_display(chdr) node_buf = ubifs.file.read(chdr.len - UBIFS_COMMON_HDR_SZ) file_offset = ubifs.file.last_read_addr() except Exception, e: error( index, 'Fatal', 'leb: %s, ubifs offset: %s, error: %s' % (lnum, ((ubifs.leb_size * lnum) + offset), e))
def list_files(ubifs, list_path): pathnames = list_path.split("/") pnames = [] for i in pathnames: if len(i) > 0: pnames.append(i) try: inodes = {} bad_blocks = [] walk.index(ubifs, ubifs.master_node.root_lnum, ubifs.master_node.root_offs, inodes, bad_blocks) if len(inodes) < 2: raise Exception('No inodes found') inum = find_dir(inodes, 1, pnames, 0) if inum == None: return if not 'dent' in inodes[inum]: return for dent in inodes[inum]['dent']: print_dent(ubifs, inodes, dent, longts=False) if len(bad_blocks): error( list_files, 'Warning', 'Data may be missing or corrupted, bad blocks, LEB [%s]' % ','.join(map(str, bad_blocks))) except Exception as e: error(list_files, 'Error', '%s' % e)
def read(self, size): buf = '' leb = int(self.tell() / self._ubi.leb_size) offset = self.tell() % self._ubi.leb_size try: if size < 0: raise Exception('Bad Read Offset Request') self._last_read_addr = self._ubi.blocks[self._blocks[leb]].file_offset + self._ubi.blocks[self._blocks[leb]].ec_hdr.data_offset + offset except Exception as e: error(self.read, 'Error', 'LEB: %s is corrupted or has no data.' % (leb)) raise Exception('Bad Read Offset Request') verbose_log(self, 'read loc: %s, size: %s' % (self._last_read_addr, size)) if leb == self._last_leb: self.seek(self.tell() + size) return self._last_buf[offset:offset+size] else: try: buf = self._ubi.file.read_block_data(self._ubi.blocks[self._blocks[leb]]) self._last_buf = buf self._last_leb = leb self.seek(self.tell() + size) return buf[offset:offset+size] except Exception as e: error(self, 'Fatal', 'read loc: %s, size: %s, LEB: %s, offset: %s, error: %s' % (self._last_read_addr, size, leb, offset, e))
def guess_start_offset(path): file_offset = 0 f = open(path, 'rb') f.seek(0, 2) file_size = f.tell() + 1 f.seek(0) for _ in range(0, file_size, FILE_CHUNK_SZ): buf = f.read(FILE_CHUNK_SZ) ubi_loc = buf.find(UBI_EC_HDR_MAGIC) ubifs_loc = buf.find(UBIFS_NODE_MAGIC) if ubi_loc == -1 and ubifs_loc == -1: file_offset += FILE_CHUNK_SZ continue else: if ubi_loc == -1: ubi_loc = file_size + 1 elif ubifs_loc == -1: ubifs_loc = file_size + 1 if ubi_loc < ubifs_loc: return file_offset + ubi_loc elif ubifs_loc < ubi_loc: return file_offset + ubifs_loc else: error(guess_start_offset, 'Fatal', 'Could not determine start offset.') f.close()
def index(ubifs, lnum, offset, inodes={}, bad_blocks=[]): """Walk the index gathering Inode, Dir Entry, and File nodes. Arguments: Obj:ubifs -- UBIFS object. Int:lnum -- Logical erase block number. Int:offset -- Offset in logical erase block. Dict:inodes -- Dict of ino/dent/file nodes keyed to inode number. Returns: Dict:inodes -- Dict of ino/dent/file nodes keyed to inode number. 'ino' -- Inode node. 'data' -- List of data nodes if present. 'dent' -- List of directory entry nodes if present. """ try: if len(bad_blocks): if lnum in bad_blocks: return ubifs.file.seek((ubifs.leb_size * lnum) + offset) buf = ubifs.file.read(UBIFS_COMMON_HDR_SZ) chdr = nodes.common_hdr(buf) log(index , '%s file addr: %s' % (chdr, ubifs.file.last_read_addr())) verbose_display(chdr) node_buf = ubifs.file.read(chdr.len - UBIFS_COMMON_HDR_SZ) file_offset = ubifs.file.last_read_addr() except Exception, e: if e.message == 'Bad Read Offset Request' and settings.warn_only_block_read_errors: bad_blocks.append(lnum) return else: error(index, 'Fatal', 'LEB: %s, UBIFS offset: %s, error: %s' % (lnum, ((ubifs.leb_size * lnum) + offset), e))
def __init__(self, ubi_file): self.__name__ = 'UBI' self._file = ubi_file self._first_peb_num = 0 self._blocks = extract_blocks(self) self._block_count = len(self.blocks) if self._block_count <= 0: error(self, 'Fatal', 'No blocks found.') layout_list, data_list, int_vol_list, unknown_list = sort.by_type(self.blocks) if len(layout_list) < 2: error(self, 'Fatal', 'Less than 2 layout blocks found.') self._layout_blocks_list = layout_list self._data_blocks_list = data_list self._int_vol_blocks_list = int_vol_list self._unknown_blocks_list = unknown_list arbitrary_block = next(iter(list(self.blocks.values()))) self._min_io_size = arbitrary_block.ec_hdr.vid_hdr_offset self._leb_size = self.file.block_size - arbitrary_block.ec_hdr.data_offset layout_pairs = layout.group_pairs(self.blocks, self.layout_blocks_list) layout_infos = layout.associate_blocks(self.blocks, layout_pairs, self.first_peb_num) self._images = [] for i in range(0, len(layout_infos)): self._images.append(image(self.blocks, layout_infos[i]))
def guess_start_offset(path, guess_offset=0): file_offset = guess_offset f = open(path, 'rb') f.seek(0,2) file_size = f.tell()+1 f.seek(guess_offset) for _ in range(0, file_size, FILE_CHUNK_SZ): buf = f.read(FILE_CHUNK_SZ) ubi_loc = buf.find(UBI_EC_HDR_MAGIC) ubifs_loc = buf.find(UBIFS_NODE_MAGIC) if ubi_loc == -1 and ubifs_loc == -1: file_offset += FILE_CHUNK_SZ continue else: if ubi_loc == -1: ubi_loc = file_size + 1 elif ubifs_loc == -1: ubifs_loc = file_size + 1 if ubi_loc < ubifs_loc: log(guess_start_offset, 'Found UBI magic number at %s' % (file_offset + ubi_loc)) return file_offset + ubi_loc elif ubifs_loc < ubi_loc: log(guess_start_offset, 'Found UBIFS magic number at %s' % (file_offset + ubifs_loc)) return file_offset + ubifs_loc else: error(guess_start_offset, 'Fatal', 'Could not determine start offset.') else: error(guess_start_offset, 'Fatal', 'Could not determine start offset.') f.close()
def extract_dents(ubifs, inodes, dent_node, path='', perms=False): if dent_node.inum not in inodes: error(extract_dents, 'Error', 'inum: %s not found in inodes' % (dent_node.inum)) return inode = inodes[dent_node.inum] dent_path = os.path.join(path, dent_node.name) if dent_node.type == UBIFS_ITYPE_DIR: try: if not os.path.exists(dent_path): os.mkdir(dent_path) log(extract_dents, 'Make Dir: %s' % (dent_path)) if perms: _set_file_perms(dent_path, inode) except Exception, e: error(extract_dents, 'Warn', 'DIR Fail: %s' % e) if 'dent' in inode: for dnode in inode['dent']: extract_dents(ubifs, inodes, dnode, dent_path, perms) _set_file_timestamps(dent_path, inode)
def extract_files(ubifs, out_path, perms=False): """Extract UBIFS contents to_path/ Arguments: Obj:ubifs -- UBIFS object. Str:out_path -- Path to extract contents to. """ try: inodes = {} bad_blocks = [] walk.index(ubifs, ubifs.master_node.root_lnum, ubifs.master_node.root_offs, inodes, bad_blocks) if len(inodes) < 2: raise Exception('No inodes found') for dent in inodes[1]['dent']: extract_dents(ubifs, inodes, dent, out_path, perms) if len(bad_blocks): error( extract_files, 'Warning', 'Data may be missing or corrupted, bad blocks, LEB [%s]' % ','.join(map(str, bad_blocks))) except Exception as e: error(extract_files, 'Error', '%s' % e)
def __init__(self, ubi_file): self.__name__ = 'UBI' self._file = ubi_file self._first_peb_num = 0 self._blocks = extract_blocks(self) self._block_count = len(self.blocks) if self._block_count <= 0: error(self, 'Fatal', 'No blocks found.') layout_list, data_list, int_vol_list, unknown_list = sort.by_type(self.blocks) if len(layout_list) < 2: error(self, 'Fatal', 'Less than 2 layout blocks found.') self._layout_blocks_list = layout_list self._data_blocks_list = data_list self._int_vol_blocks_list = int_vol_list self._unknown_blocks_list = unknown_list arbitrary_block = self.blocks.itervalues().next() self._min_io_size = arbitrary_block.ec_hdr.vid_hdr_offset self._leb_size = self.file.block_size - arbitrary_block.ec_hdr.data_offset layout_pairs = layout.group_pairs(self.blocks, self.layout_blocks_list) layout_infos = layout.associate_blocks(self.blocks, layout_pairs, self.first_peb_num) self._images = [] for i in range(0, len(layout_infos)): self._images.append(image(self.blocks, layout_infos[i]))
def list_files(ubifs, list_path): pathnames = list_path.split("/") pnames = [] for i in pathnames: if len(i) > 0: pnames.append(i) try: inodes = {} bad_blocks = [] walk.index(ubifs, ubifs.master_node.root_lnum, ubifs.master_node.root_offs, inodes, bad_blocks) if len(inodes) < 2: raise Exception('No inodes found') inum = find_dir(inodes, 1, pnames, 0) if inum == None: return if not 'dent' in inodes[inum]: return for dent in inodes[inum]['dent']: print_dent(ubifs, inodes, dent, longts=False) if len(bad_blocks): error(list_files, 'Warning', 'Data may be missing or corrupted, bad blocks, LEB [%s]' % ','.join(map(str, bad_blocks))) except Exception as e: error(list_files, 'Error', '%s' % e)
def read(self, size): buf = '' leb = int(self.tell() / self._ubi.leb_size) offset = self.tell() % self._ubi.leb_size self._last_read_addr = self._ubi.blocks[ self._blocks[leb]].file_offset + self._ubi.blocks[ self._blocks[leb]].ec_hdr.data_offset + offset verbose_log(self, 'read loc: %s, size: %s' % (self._last_read_addr, size)) if leb == self._last_leb: self.seek(self.tell() + size) return self._last_buf[offset:offset + size] else: try: buf = self._ubi.file.read_block_data( self._ubi.blocks[self._blocks[leb]]) self._last_buf = buf self._last_leb = leb self.seek(self.tell() + size) return buf[offset:offset + size] except Exception, e: error( self, 'Fatal', 'read loc: %s, size: %s, LEB: %s, offset: %s, error: %s' % (self._last_read_addr, size, leb, offset, e))
def read(self, size): if self.end_offset < self.tell() + size: error(self.read, 'Error', 'Block ends at %s which is greater than file size %s' % (self.tell() + size, self.end_offset)) raise Exception('Bad Read Offset Request') self._last_read_addr = self.tell() verbose_log(self, 'read loc: %s, size: %s' % (self._last_read_addr, size)) return self._fhandle.read(size)
def __init__(self, path, block_size, start_offset=0, end_offset=None): self.__name__ = 'UBI_File' self.is_valid = False try: log(self, 'Open Path: %s' % path) self._fhandle = open(path, 'rb') except Exception, e: error(self, 'Fatal', 'Open file: %s' % e)
def extract_blocks(ubi): """Get a list of UBI block objects from file Arguments:. Obj:ubi -- UBI object. Returns: Dict -- Of block objects keyed by PEB number. """ blocks = {} ubi.file.seek(ubi.file.start_offset) peb_count = 0 cur_offset = 0 bad_blocks = [] # range instead of xrange, as xrange breaks > 4GB end_offset. for i in range(ubi.file.start_offset, ubi.file.end_offset, ubi.file.block_size): try: buf = ubi.file.read(ubi.file.block_size) except Exception as e: if settings.warn_only_block_read_errors: error(extract_blocks, 'Error', 'PEB: %s: %s' % (ubi.first_peb_num + peb_count, str(e))) continue else: error(extract_blocks, 'Fatal', 'PEB: %s: %s' % (ubi.first_peb_num + peb_count, str(e))) if buf.startswith(UBI_EC_HDR_MAGIC): blk = description(buf) blk.file_offset = i blk.peb_num = ubi.first_peb_num + peb_count blk.size = ubi.file.block_size blocks[blk.peb_num] = blk peb_count += 1 log(extract_blocks, blk) verbose_log(extract_blocks, 'file addr: %s' % (ubi.file.last_read_addr())) ec_hdr_errors = '' vid_hdr_errors = '' if blk.ec_hdr.errors: ec_hdr_errors = ','.join(blk.ec_hdr.errors) if blk.vid_hdr and blk.vid_hdr.errors: vid_hdr_errors = ','.join(blk.vid_hdr.errors) if ec_hdr_errors or vid_hdr_errors: if blk.peb_num not in bad_blocks: bad_blocks.append(blk.peb_num) log(extract_blocks, 'PEB: %s has possible issue EC_HDR [%s], VID_HDR [%s]' % (blk.peb_num, ec_hdr_errors, vid_hdr_errors)) verbose_display(blk) else: cur_offset += ubi.file.block_size ubi.first_peb_num = cur_offset/ubi.file.block_size ubi.file.start_offset = cur_offset return blocks
def __init__(self, ubi_file): self.__name__ = 'UBI' self._file = ubi_file self._first_peb_num = 0 self._blocks = extract_blocks(self) self._block_count = len(self.blocks) if self._block_count <= 0: error(self, 'Fatal', 'No blocks found.') arbitrary_block = self.blocks.itervalues().next() self._min_io_size = arbitrary_block.ec_hdr.vid_hdr_offset self._leb_size = self.file.block_size - arbitrary_block.ec_hdr.data_offset
def __init__(self, ubi_file): self.__name__ = 'UBI' self._file = ubi_file self._first_peb_num = 0 self._blocks = extract_blocks(self) self._block_count = len(self.blocks) if self._block_count <= 0: error(self, 'Fatal', 'No blocks found.') arbitrary_block = next(iter(self.blocks.values())) self._min_io_size = arbitrary_block.ec_hdr.vid_hdr_offset self._leb_size = self.file.block_size - arbitrary_block.ec_hdr.data_offset
def __init__(self, ubi, block_list): self.__name__ = 'leb_virtual_file' self.is_valid = False self._ubi = ubi self._last_read_addr = 0 if not len(block_list): error(self, 'Info', 'Empty block list') else: self._blocks = sort.by_leb(block_list) self._seek = 0 self._last_leb = -1 self._last_buf = '' self.is_valid = True
def decompress(ctype, unc_len, data): """Decompress data. Arguments: Int:ctype -- Compression type LZO, ZLIB (*currently unused*). Int:unc_len -- Uncompressed data lenth. Str:data -- Data to be uncompessed. Returns: Uncompressed Data. """ if ctype == UBIFS_COMPR_LZO: try: return lzo.decompress(''.join(('\xf0', struct.pack('>I', unc_len), data))) except Exception, e: error(decompress, 'Warn', 'LZO Error: %s' % e)
def extract_files(ubifs, out_path, perms=False): """Extract UBIFS contents to_path/ Arguments: Obj:ubifs -- UBIFS object. Str:out_path -- Path to extract contents to. """ try: inodes = {} walk.index(ubifs, ubifs.master_node.root_lnum, ubifs.master_node.root_offs, inodes) for dent in inodes[1]['dent']: extract_dents(ubifs, inodes, dent, out_path, perms) except Exception, e: error(extract_files, 'Fatal', '%s' % e)
def guess_filetype(path, start_offset=0): with open(path, 'rb') as f: f.seek(start_offset) buf = f.read(4) if buf == UBI_EC_HDR_MAGIC: ftype = UBI_EC_HDR_MAGIC log(guess_filetype, 'File looks like a UBI image.') elif buf == UBIFS_NODE_MAGIC: ftype = UBIFS_NODE_MAGIC log(guess_filetype, 'File looks like a UBIFS image.') else: ftype = None error(guess_filetype, 'Fatal', 'Could not determine file type.') return ftype
def __init__(self, path, block_size, start_offset=0, end_offset=None): self.__name__ = 'UBI_File' self.is_valid = False try: log(self, 'Open Path: %s' % path) self._fhandle = open(path, 'rb') except Exception as e: error(self, 'Fatal', 'Open file: %s' % e) self._fhandle.seek(0, 2) file_size = self.tell() log(self, 'File Size: %s' % file_size) self._start_offset = start_offset log(self, 'Start Offset: %s' % (self._start_offset)) if end_offset: tail = file_size - end_offset self._end_offset = end_offset else: tail = (file_size - start_offset) % block_size self._end_offset = file_size - tail log(self, 'End Offset: %s' % (self._end_offset)) if tail > 0: log(self, 'File Tail Size: %s' % (tail)) self._block_size = block_size log(self, 'Block Size: %s' % block_size) if start_offset > self._end_offset: error(self, 'Fatal', 'Start offset larger than end offset.') if (not end_offset is None) and (end_offset > file_size): error(self, 'Fatal', 'End offset larger than file size.') remainder = (self._end_offset - start_offset) % block_size if remainder != 0: if settings.warn_only_block_read_errors: error(self, 'Error', 'File read is not block aligned.') else: error(self, 'Fatal', 'File read is not block aligned.') self._fhandle.seek(self._start_offset) self._last_read_addr = self._fhandle.tell() self.is_valid = True
def extract_dents(ubifs, inodes, dent_node, path='', perms=False): inode = inodes[dent_node.inum] dent_path = os.path.join(path, dent_node.name) if dent_node.type == UBIFS_ITYPE_DIR: try: if not os.path.exists(dent_path): os.mkdir(dent_path) log(extract_dents, 'Make Dir: %s' % (dent_path)) if perms: _set_file_perms(dent_path, inode) except Exception, e: error(extract_dents, 'Warn', 'DIR Fail: %s' % e) if 'dent' in inode: for dnode in inode['dent']: extract_dents(ubifs, inodes, dnode, dent_path, perms)
def read(self, size): buf = '' leb = int(self.tell() / self._ubi.leb_size) offset = self.tell() % self._ubi.leb_size self._last_read_addr = self._ubi.blocks[self._blocks[leb]].file_offset + self._ubi.blocks[self._blocks[leb]].ec_hdr.data_offset + offset verbose_log(self, 'read loc: %s, size: %s' % (self._last_read_addr, size)) if leb == self._last_leb: self.seek(self.tell() + size) return self._last_buf[offset:offset+size] else: try: buf = self._ubi.file.read_block_data(self._ubi.blocks[self._blocks[leb]]) self._last_buf = buf self._last_leb = leb self.seek(self.tell() + size) return buf[offset:offset+size] except Exception, e: error(self, 'Fatal', 'read loc: %s, size: %s, LEB: %s, offset: %s, error: %s' % (self._last_read_addr, size, leb, offset, e))
def __init__(self, ubi_file): super(ubi, self).__init__(ubi_file) layout_list, data_list, int_vol_list, unknown_list = sort.by_type(self.blocks) if len(layout_list) < 2: error(self, 'Fatal', 'Less than 2 layout blocks found.') self._layout_blocks_list = layout.get_newest(self.blocks, layout_list) self._data_blocks_list = data_list self._int_vol_blocks_list = int_vol_list self._unknown_blocks_list = unknown_list layout_pairs = layout.group_pairs(self.blocks, self.layout_blocks_list) layout_infos = layout.associate_blocks(self.blocks, layout_pairs, self.first_peb_num) self._images = [] for i in range(0, len(layout_infos)): self._images.append(image(self.blocks, layout_infos[i]))
def __init__(self, ubifs_file): self.__name__ = 'UBIFS' self._file = ubifs_file try: self.file.reset() sb_chdr = nodes.common_hdr(self.file.read(UBIFS_COMMON_HDR_SZ)) log(self , '%s file addr: %s' % (sb_chdr, self.file.last_read_addr())) verbose_display(sb_chdr) if sb_chdr.node_type == UBIFS_SB_NODE: self.file.seek(UBIFS_COMMON_HDR_SZ) buf = self.file.read(UBIFS_SB_NODE_SZ) self._sb_node = nodes.sb_node(buf) self._min_io_size = self._sb_node.min_io_size self._leb_size = self._sb_node.leb_size log(self , '%s file addr: %s' % (self._sb_node, self.file.last_read_addr())) verbose_display(self._sb_node) else: raise Exception('Wrong node type.') except Exception, e: error(self, 'Fatal', 'Super block error: %s' % e)
def __init__(self, path, block_size, start_offset=0, end_offset=None): self.__name__ = 'UBI_File' self.is_valid = False try: log(self, 'Open Path: %s' % path) self._fhandle = open(path, 'rb') except Exception as e: error(self, 'Fatal', 'Open file: %s' % e) self._fhandle.seek(0,2) file_size = self.tell() log(self, 'File Size: %s' % file_size) self._start_offset = start_offset log(self, 'Start Offset: %s' % (self._start_offset)) if end_offset: self._end_offset = end_offset else: self._end_offset = file_size log(self, 'End Offset: %s' % (self._end_offset)) self._block_size = block_size log(self, 'Block Size: %s' % block_size) if start_offset > self._end_offset: error(self, 'Fatal', 'Start offset larger than end offset.') if ( not end_offset is None ) and ( end_offset > file_size ): error(self, 'Fatal', 'End offset larger than file size.') self._fhandle.seek(self._start_offset) self._last_read_addr = self._fhandle.tell() self.is_valid = True
def extract_files(ubifs, out_path, perms=False): """Extract UBIFS contents to_path/ Arguments: Obj:ubifs -- UBIFS object. Str:out_path -- Path to extract contents to. """ try: inodes = {} walk.index(ubifs, ubifs.master_node.root_lnum, ubifs.master_node.root_offs, inodes) #only return if a directory cannot be extracted if not 'dent' in inodes[1].keys(): print("Couldn't read dent for %s" % out_path) return for dent in inodes[1]['dent']: extract_dents(ubifs, inodes, dent, out_path, perms) except Exception as e: error(extract_files, 'Fatal', '%s' % e)
def __init__(self, ubi_file): super(ubi, self).__init__(ubi_file) layout_list, data_list, int_vol_list, unknown_list = sort.by_type( self.blocks) if len(layout_list) < 2: error(self, 'Fatal', 'Less than 2 layout blocks found.') self._layout_blocks_list = layout_list self._data_blocks_list = data_list self._int_vol_blocks_list = int_vol_list self._unknown_blocks_list = unknown_list layout_pairs = layout.group_pairs(self.blocks, self.layout_blocks_list) layout_infos = layout.associate_blocks(self.blocks, layout_pairs, self.first_peb_num) self._images = [] for i in range(0, len(layout_infos)): self._images.append(image(self.blocks, layout_infos[i]))
def __init__(self, ubifs_file): self.__name__ = 'UBIFS' self._file = ubifs_file try: self.file.reset() sb_chdr = nodes.common_hdr(self.file.read(UBIFS_COMMON_HDR_SZ)) log(self, '%s file addr: %s' % (sb_chdr, self.file.last_read_addr())) verbose_display(sb_chdr) if sb_chdr.node_type == UBIFS_SB_NODE: self.file.seek(UBIFS_COMMON_HDR_SZ) buf = self.file.read(UBIFS_SB_NODE_SZ) self._sb_node = nodes.sb_node(buf, self.file.last_read_addr()) self._min_io_size = self._sb_node.min_io_size self._leb_size = self._sb_node.leb_size log( self, '%s file addr: %s' % (self._sb_node, self.file.last_read_addr())) verbose_display(self._sb_node) else: raise Exception('Wrong node type.') except Exception as e: error(self, 'Fatal', 'Super block error: %s' % e) self._mst_nodes = [None, None] for i in range(0, 2): try: mst_offset = self.leb_size * (UBIFS_MST_LNUM + i) self.file.seek(mst_offset) mst_chdr = nodes.common_hdr( self.file.read(UBIFS_COMMON_HDR_SZ)) log( self, '%s file addr: %s' % (mst_chdr, self.file.last_read_addr())) verbose_display(mst_chdr) if mst_chdr.node_type == UBIFS_MST_NODE: self.file.seek(mst_offset + UBIFS_COMMON_HDR_SZ) buf = self.file.read(UBIFS_MST_NODE_SZ) self._mst_nodes[i] = nodes.mst_node( buf, self.file.last_read_addr()) log( self, '%s%s file addr: %s' % (self._mst_nodes[i], i, self.file.last_read_addr())) verbose_display(self._mst_nodes[i]) else: raise Exception('Wrong node type.') except Exception as e: error(self, 'Warn', 'Master block %s error: %s' % (i, e)) if self._mst_nodes[0] is None and self._mst_nodes[1] is None: error(self, 'Fatal', 'No valid Master Node found.') elif self._mst_nodes[0] is None and self._mst_nodes[1] is not None: self._mst_nodes[0] = self._mst_nodes[1] self._mst_nodes[1] = None log(self, 'Swapping Master Nodes due to bad first node.')
def __init__(self, ubifs_file): self.__name__ = 'UBIFS' self._file = ubifs_file try: self.file.reset() sb_chdr = nodes.common_hdr(self.file.read(UBIFS_COMMON_HDR_SZ)) log(self, '%s file addr: %s' % (sb_chdr, self.file.last_read_addr())) verbose_display(sb_chdr) if sb_chdr.node_type == UBIFS_SB_NODE: self.file.seek(UBIFS_COMMON_HDR_SZ) buf = self.file.read(UBIFS_SB_NODE_SZ) self._sb_node = nodes.sb_node(buf) self._min_io_size = self._sb_node.min_io_size self._leb_size = self._sb_node.leb_size log( self, '%s file addr: %s' % (self._sb_node, self.file.last_read_addr())) verbose_display(self._sb_node) else: raise Exception('Wrong node type.') except Exception as e: error(self, 'Fatal', 'Super block error: %s' % e) self._mst_nodes = [None, None] for i in range(0, 2): try: mst_offset = self.leb_size * (UBIFS_MST_LNUM + i) while True: try: self.file.seek(mst_offset) mst_chdr = nodes.common_hdr( self.file.read(UBIFS_COMMON_HDR_SZ)) log( self, '%s file addr: %s' % (mst_chdr, self.file.last_read_addr())) verbose_display(mst_chdr) if mst_chdr.node_type == UBIFS_MST_NODE: self.file.seek(mst_offset + UBIFS_COMMON_HDR_SZ) buf = self.file.read(UBIFS_MST_NODE_SZ) self._mst_nodes[i] = nodes.mst_node(buf) log( self, '%s%s file addr: %s' % (self._mst_nodes[i], i, self.file.last_read_addr())) verbose_display(self._mst_nodes[i]) else: raise Exception('Wrong node type.') mst_offset += 0x1000 except: break except Exception as e: error(self, 'Fatal', 'Master block %s error: %s' % (i, e)) if not self._mst_nodes[0] or not self._mst_nodes[1]: error(self, 'Fatal', 'Less than 2 Master blocks found.')
def my_extract_files(ubifs, extract_func): try: inodes = {} bad_blocks = [] walk.index(ubifs, ubifs.master_node.root_lnum, ubifs.master_node.root_offs, inodes, bad_blocks) if len(inodes) < 2: raise Exception('No inodes found') index = 0 for dent in inodes[1]['dent']: index = IMG_UBI.extract_dents(ubifs, inodes, dent, index, "", False, extract_func) index += 1 if len(bad_blocks): error( extract_files, 'Warning', 'Data may be missing or corrupted, bad blocks, LEB [%s]' % ','.join(map(str, bad_blocks))) except Exception as e: error(extract_files, 'Error', '%s' % e)
def __init__(self, ubifs_file): self.__name__ = 'UBIFS' self._file = ubifs_file try: self.file.reset() sb_chdr = nodes.common_hdr(self.file.read(UBIFS_COMMON_HDR_SZ)) log(self, '%s file addr: %s' % (sb_chdr, self.file.last_read_addr())) verbose_display(sb_chdr) if sb_chdr.node_type == UBIFS_SB_NODE: self.file.seek(UBIFS_COMMON_HDR_SZ) buf = self.file.read(UBIFS_SB_NODE_SZ) self._sb_node = nodes.sb_node(buf) self._min_io_size = self._sb_node.min_io_size self._leb_size = self._sb_node.leb_size log( self, '%s file addr: %s' % (self._sb_node, self.file.last_read_addr())) verbose_display(self._sb_node) else: raise Exception('Wrong node type.') except Exception, e: error(self, 'Fatal', 'Super block error: %s' % e)
def extract_files(ubifs, out_path, perms=False): """Extract UBIFS contents to_path/ Arguments: Obj:ubifs -- UBIFS object. Str:out_path -- Path to extract contents to. """ try: inodes = {} bad_blocks = [] walk.index(ubifs, ubifs.master_node.root_lnum, ubifs.master_node.root_offs, inodes, bad_blocks) if len(inodes) < 2: raise Exception('No inodes found') for dent in inodes[1]['dent']: extract_dents(ubifs, inodes, dent, out_path, perms) if len(bad_blocks): error(extract_files, 'Warning', 'Data may be missing or corrupted, bad blocks, LEB [%s]' % ','.join(map(str, bad_blocks))) except Exception as e: error(extract_files, 'Error', '%s' % e)
def __init__(self, ubifs_file): self.__name__ = 'UBIFS' self._file = ubifs_file try: self.file.reset() sb_chdr = nodes.common_hdr(self.file.read(UBIFS_COMMON_HDR_SZ)) log(self , '%s file addr: %s' % (sb_chdr, self.file.last_read_addr())) verbose_display(sb_chdr) if sb_chdr.node_type == UBIFS_SB_NODE: self.file.seek(UBIFS_COMMON_HDR_SZ) buf = self.file.read(UBIFS_SB_NODE_SZ) self._sb_node = nodes.sb_node(buf) self._min_io_size = self._sb_node.min_io_size self._leb_size = self._sb_node.leb_size log(self , '%s file addr: %s' % (self._sb_node, self.file.last_read_addr())) verbose_display(self._sb_node) else: raise Exception('Wrong node type.') except Exception as e: error(self, 'Fatal', 'Super block error: %s' % e) self._mst_nodes = [None, None] for i in range(0, 2): try: mst_offset = self.leb_size * (UBIFS_MST_LNUM + i) self.file.seek(mst_offset) mst_chdr = nodes.common_hdr(self.file.read(UBIFS_COMMON_HDR_SZ)) log(self , '%s file addr: %s' % (mst_chdr, self.file.last_read_addr())) verbose_display(mst_chdr) if mst_chdr.node_type == UBIFS_MST_NODE: self.file.seek(mst_offset + UBIFS_COMMON_HDR_SZ) buf = self.file.read(UBIFS_MST_NODE_SZ) self._mst_nodes[i] = nodes.mst_node(buf) log(self , '%s%s file addr: %s' % (self._mst_nodes[i], i, self.file.last_read_addr())) verbose_display(self._mst_nodes[i]) else: raise Exception('Wrong node type.') except Exception as e: error(self, 'Fatal', 'Master block %s error: %s' % (i, e)) if not self._mst_nodes[0] or not self._mst_nodes[1]: error(self, 'Fatal', 'Less than 2 Master blocks found.')
def __init__(self, path, block_size, start_offset=0, end_offset=None): self.__name__ = 'UBI_File' self.is_valid = False try: log(self, 'Open Path: %s' % path) self._fhandle = open(path, 'rb') except Exception as e: error(self, 'Fatal', 'Open file: %s' % e) self._fhandle.seek(0,2) file_size = self.tell() log(self, 'File Size: %s' % file_size) self._start_offset = start_offset log(self, 'Start Offset: %s' % (self._start_offset)) if end_offset: self._end_offset = end_offset else: self._end_offset = file_size log(self, 'End Offset: %s' % (self._end_offset)) self._block_size = block_size log(self, 'Block Size: %s' % block_size) if start_offset > self._end_offset: error(self, 'Fatal', 'Start offset larger than end offset.') if ( not end_offset is None ) and ( end_offset > file_size ): error(self, 'Fatal', 'End offset larger than file size.') # check if data aligns to blocks alignment = self._end_offset % self._block_size if ( alignment !=0 ): old_end_offset=self._end_offset self._end_offset = self._end_offset - alignment log(self, 'End offset does not align with block size (0x%08X). ' 'Truncating %i bytes from file (was: %i bytes, now: %i bytes)'%(self._block_size,alignment,old_end_offset,self._end_offset)) self._fhandle.seek(self._start_offset) self._last_read_addr = self._fhandle.tell() self.is_valid = True
def extract_dents(ubifs, inodes, dent_node, path='', perms=False): if dent_node.inum not in inodes: error(extract_dents, 'Error', 'inum: %s not found in inodes' % (dent_node.inum)) return inode = inodes[dent_node.inum] dent_path = os.path.join(path, dent_node.name) if dent_node.type == UBIFS_ITYPE_DIR: try: if not os.path.exists(dent_path): os.mkdir(dent_path) log(extract_dents, 'Make Dir: %s' % (dent_path)) if perms: _set_file_perms(dent_path, inode) except Exception as e: error(extract_dents, 'Warn', 'DIR Fail: %s' % e) if 'dent' in inode: for dnode in inode['dent']: extract_dents(ubifs, inodes, dnode, dent_path, perms) _set_file_timestamps(dent_path, inode) elif dent_node.type == UBIFS_ITYPE_REG: try: if inode['ino'].nlink > 1: if 'hlink' not in inode: inode['hlink'] = dent_path buf = _process_reg_file(ubifs, inode, dent_path) _write_reg_file(dent_path, buf) else: os.link(inode['hlink'], dent_path) log(extract_dents, 'Make Link: %s > %s' % (dent_path, inode['hlink'])) else: buf = _process_reg_file(ubifs, inode, dent_path) _write_reg_file(dent_path, buf) _set_file_timestamps(dent_path, inode) if perms: _set_file_perms(dent_path, inode) except Exception as e: error(extract_dents, 'Warn', 'FILE Fail: %s' % e) elif dent_node.type == UBIFS_ITYPE_LNK: try: # probably will need to decompress ino data if > UBIFS_MIN_COMPR_LEN os.symlink('%s' % inode['ino'].data.decode('utf-8'), dent_path) log(extract_dents, 'Make Symlink: %s > %s' % (dent_path, inode['ino'].data)) except Exception as e: error(extract_dents, 'Warn', 'SYMLINK Fail: %s' % e) elif dent_node.type in [UBIFS_ITYPE_BLK, UBIFS_ITYPE_CHR]: try: dev = struct.unpack('<II', inode['ino'].data)[0] if True: os.mknod(dent_path, inode['ino'].mode, dev) log(extract_dents, 'Make Device Node: %s' % (dent_path)) if perms: _set_file_perms(path, inode) else: log(extract_dents, 'Create dummy node.') _write_reg_file(dent_path, str(dev)) if perms: _set_file_perms(dent_path, inode) except Exception as e: error(extract_dents, 'Warn', 'DEV Fail: %s' % e) elif dent_node.type == UBIFS_ITYPE_FIFO: try: os.mkfifo(dent_path, inode['ino'].mode) log(extract_dents, 'Make FIFO: %s' % (path)) if perms: _set_file_perms(dent_path, inode) except Exception as e: error(extract_dents, 'Warn', 'FIFO Fail: %s : %s' % (dent_path, e)) elif dent_node.type == UBIFS_ITYPE_SOCK: try: if settings.use_dummy_socket_file: _write_reg_file(dent_path, '') if perms: _set_file_perms(dent_path, inode) except Exception as e: error(extract_dents, 'Warn', 'SOCK Fail: %s : %s' % (dent_path, e))
log(self , '%s file addr: %s' % (mst_chdr, self.file.last_read_addr())) verbose_display(mst_chdr) if mst_chdr.node_type == UBIFS_MST_NODE: self.file.seek(mst_offset + UBIFS_COMMON_HDR_SZ) buf = self.file.read(UBIFS_MST_NODE_SZ) self._mst_nodes[i] = nodes.mst_node(buf) log(self , '%s%s file addr: %s' % (self._mst_nodes[i], i, self.file.last_read_addr())) verbose_display(self._mst_nodes[i]) else: raise Exception('Wrong node type.') except Exception, e: error(self, 'Fatal', 'Master block %s error: %s' % (i, e)) if not self._mst_nodes[0] or not self._mst_nodes[1]: error(self, 'Fatal', 'Less than 2 Master blocks found.') def _get_file(self): return self._file file = property(_get_file) def _get_superblock(self): """ Superblock Node Object Returns: Obj:Superblock Node """ return self._sb_node superblock_node = property(_get_superblock)
class ubi_file(object): """UBI image file object Arguments: Str:path -- Path to file to parse Int:block_size -- Erase block size of NAND in bytes. Int:start_offset -- (optional) Where to start looking in the file for UBI data. Int:end_offset -- (optional) Where to stop looking in the file. Methods: seek -- Put file head to specified byte offset. Int:offset read -- Read specified bytes from file handle. Int:size tell -- Returns byte offset of current file location. read_block -- Returns complete PEB data of provided block description. Obj:block read_block_data -- Returns LEB data only from provided block. Obj:block reader -- Generator that returns data from file. reset -- Reset file position to start_offset. is_valid -- If the object intialized okay. Handles all the actual file interactions, read, seek, extract blocks, etc. """ def __init__(self, path, block_size, start_offset=0, end_offset=None): self.__name__ = 'UBI_File' self.is_valid = False try: log(self, 'Open Path: %s' % path) self._fhandle = open(path, 'rb') except Exception, e: error(self, 'Fatal', 'Open file: %s' % e) self._fhandle.seek(0, 2) file_size = self.tell() log(self, 'File Size: %s' % file_size) self._start_offset = start_offset log(self, 'Start Offset: %s' % (self._start_offset)) if end_offset: self._end_offset = end_offset else: self._end_offset = file_size log(self, 'End Offset: %s' % (self._end_offset)) self._block_size = block_size log(self, 'Block Size: %s' % block_size) if start_offset > self._end_offset: error(self, 'Fatal', 'Start offset larger than end offset.') if end_offset > file_size: error(self, 'Fatal', 'End offset larger than file size.') self._fhandle.seek(self._start_offset) self._last_read_addr = self._fhandle.tell() self.is_valid = True
if 'hlink' not in inode: inode['hlink'] = dent_path buf = _process_reg_file(ubifs, inode, dent_path) _write_reg_file(dent_path, buf) else: os.link(inode['hlink'] ,dent_path) log(extract_dents, 'Make Link: %s > %s' % (dent_path, inode['hlink'])) else: buf = _process_reg_file(ubifs, inode, dent_path) _write_reg_file(dent_path, buf) if perms: _set_file_perms(dent_path, inode) except Exception, e: error(extract_dents, 'Warn', 'FILE Fail: %s' % e) elif dent_node.type == UBIFS_ITYPE_LNK: try: # probably will need to decompress ino data if > UBIFS_MIN_COMPR_LEN os.symlink('%s' % inode['ino'].data, dent_path) log(extract_dents, 'Make Symlink: %s > %s' % (dent_path, inode['ino'].data)) except Exception, e: error(extract_dents, 'Warn', 'SYMLINK Fail: %s' % e) elif dent_node.type in [UBIFS_ITYPE_BLK, UBIFS_ITYPE_CHR]: try: dev = struct.unpack('<II', inode['ino'].data)[0] if True: os.mknod(dent_path, inode['ino'].mode, dev)
class ubifs(): """UBIFS object Arguments: Str:path -- File path to UBIFS image. Attributes: Obj:file -- File object Int:leb_size -- Size of Logical Erase Blocks. Int:min_io -- Size of min I/O from vid_hdr_offset. Obj:sb_node -- Superblock node of UBIFS image LEB0 Obj:mst_node -- Master Node of UBIFS image LEB1 Obj:mst_node2 -- Master Node 2 of UBIFS image LEB2 """ def __init__(self, ubifs_file): self.__name__ = 'UBIFS' self._file = ubifs_file try: self.file.reset() sb_chdr = nodes.common_hdr(self.file.read(UBIFS_COMMON_HDR_SZ)) log(self, '%s file addr: %s' % (sb_chdr, self.file.last_read_addr())) verbose_display(sb_chdr) if sb_chdr.node_type == UBIFS_SB_NODE: self.file.seek(UBIFS_COMMON_HDR_SZ) buf = self.file.read(UBIFS_SB_NODE_SZ) self._sb_node = nodes.sb_node(buf) self._min_io_size = self._sb_node.min_io_size self._leb_size = self._sb_node.leb_size log( self, '%s file addr: %s' % (self._sb_node, self.file.last_read_addr())) verbose_display(self._sb_node) else: raise Exception('Wrong node type.') except Exception, e: error(self, 'Fatal', 'Super block error: %s' % e) self._mst_nodes = [None, None] for i in xrange(0, 2): try: mst_offset = self.leb_size * (UBIFS_MST_LNUM + i) self.file.seek(mst_offset) mst_chdr = nodes.common_hdr( self.file.read(UBIFS_COMMON_HDR_SZ)) log( self, '%s file addr: %s' % (mst_chdr, self.file.last_read_addr())) verbose_display(mst_chdr) if mst_chdr.node_type == UBIFS_MST_NODE: self.file.seek(mst_offset + UBIFS_COMMON_HDR_SZ) buf = self.file.read(UBIFS_MST_NODE_SZ) self._mst_nodes[i] = nodes.mst_node(buf) log( self, '%s%s file addr: %s' % (self._mst_nodes[i], i, self.file.last_read_addr())) verbose_display(self._mst_nodes[i]) else: raise Exception('Wrong node type.') except Exception, e: error(self, 'Fatal', 'Master block %s error: %s' % (i, e))
def extract_dents(ubifs, inodes, dent_node, path='', perms=False): if dent_node.inum not in inodes: error(extract_dents, 'Error', 'inum: %s not found in inodes' % (dent_node.inum)) return inode = inodes[dent_node.inum] dent_path = os.path.join(path, dent_node.name) if dent_node.type == UBIFS_ITYPE_DIR: try: if not os.path.exists(dent_path): os.mkdir(dent_path) log(extract_dents, 'Make Dir: %s' % (dent_path)) if perms: _set_file_perms(dent_path, inode) except Exception as e: error(extract_dents, 'Warn', 'DIR Fail: %s' % e) if 'dent' in inode: for dnode in inode['dent']: extract_dents(ubifs, inodes, dnode, dent_path, perms) _set_file_timestamps(dent_path, inode) elif dent_node.type == UBIFS_ITYPE_REG: try: if inode['ino'].nlink > 1: if 'hlink' not in inode: inode['hlink'] = dent_path buf = _process_reg_file(ubifs, inode, dent_path) _write_reg_file(dent_path, buf) else: os.link(inode['hlink'], dent_path) log(extract_dents, 'Make Link: %s > %s' % (dent_path, inode['hlink'])) else: buf = _process_reg_file(ubifs, inode, dent_path) _write_reg_file(dent_path, buf) _set_file_timestamps(dent_path, inode) if perms: _set_file_perms(dent_path, inode) except Exception as e: error(extract_dents, 'Warn', 'FILE Fail: %s' % e) elif dent_node.type == UBIFS_ITYPE_LNK: try: # probably will need to decompress ino data if > UBIFS_MIN_COMPR_LEN os.symlink('%s' % inode['ino'].data.decode('utf-8'), dent_path) log(extract_dents, 'Make Symlink: %s > %s' % (dent_path, inode['ino'].data)) except Exception as e: error(extract_dents, 'Warn', 'SYMLINK Fail: %s' % e) elif dent_node.type in [UBIFS_ITYPE_BLK, UBIFS_ITYPE_CHR]: try: dev = struct.unpack('<II', inode['ino'].data)[0] if not settings.use_dummy_devices: os.mknod(dent_path, inode['ino'].mode, dev) log(extract_dents, 'Make Device Node: %s' % (dent_path)) if perms: _set_file_perms(dent_path, inode) else: log(extract_dents, 'Create dummy device.') _write_reg_file(dent_path, str(dev)) if perms: _set_file_perms(dent_path, inode) except Exception as e: error(extract_dents, 'Warn', 'DEV Fail: %s' % e) elif dent_node.type == UBIFS_ITYPE_FIFO: try: os.mkfifo(dent_path, inode['ino'].mode) log(extract_dents, 'Make FIFO: %s' % (path)) if perms: _set_file_perms(dent_path, inode) except Exception as e: error(extract_dents, 'Warn', 'FIFO Fail: %s : %s' % (dent_path, e)) elif dent_node.type == UBIFS_ITYPE_SOCK: try: if settings.use_dummy_socket_file: _write_reg_file(dent_path, '') if perms: _set_file_perms(dent_path, inode) except Exception as e: error(extract_dents, 'Warn', 'SOCK Fail: %s : %s' % (dent_path, e))
def index(ubifs, lnum, offset, inodes={}, bad_blocks=[]): """Walk the index gathering Inode, Dir Entry, and File nodes. Arguments: Obj:ubifs -- UBIFS object. Int:lnum -- Logical erase block number. Int:offset -- Offset in logical erase block. Dict:inodes -- Dict of ino/dent/file nodes keyed to inode number. Returns: Dict:inodes -- Dict of ino/dent/file nodes keyed to inode number. 'ino' -- Inode node. 'data' -- List of data nodes if present. 'dent' -- List of directory entry nodes if present. """ try: if len(bad_blocks): if lnum in bad_blocks: return ubifs.file.seek((ubifs.leb_size * lnum) + offset) buf = ubifs.file.read(UBIFS_COMMON_HDR_SZ) chdr = nodes.common_hdr(buf) log(index , '%s file addr: %s' % (chdr, ubifs.file.last_read_addr())) verbose_display(chdr) node_buf = ubifs.file.read(chdr.len - UBIFS_COMMON_HDR_SZ) file_offset = ubifs.file.last_read_addr() except Exception as e: if str(e) == 'Bad Read Offset Request' and settings.warn_only_block_read_errors: bad_blocks.append(lnum) return else: error(index, 'Fatal', 'LEB: %s, UBIFS offset: %s, error: %s' % (lnum, ((ubifs.leb_size * lnum) + offset), e)) if chdr.node_type == UBIFS_IDX_NODE: try: idxn = nodes.idx_node(node_buf) except Exception as e: if settings.warn_only_block_read_errors: error(index, 'Error', 'Problem at file address: %s extracting idx_node: %s' % (file_offset, e)) return else: error(index, 'Fatal', 'Problem at file address: %s extracting idx_node: %s' % (file_offset, e)) log(index, '%s file addr: %s' % (idxn, file_offset)) verbose_display(idxn) branch_idx = 0 for branch in idxn.branches: verbose_log(index, '-------------------') log(index, '%s file addr: %s' % (branch, file_offset + UBIFS_IDX_NODE_SZ + (branch_idx * UBIFS_BRANCH_SZ))) verbose_display(branch) index(ubifs, branch.lnum, branch.offs, inodes, bad_blocks) branch_idx += 1 elif chdr.node_type == UBIFS_INO_NODE: try: inon = nodes.ino_node(node_buf) except Exception as e: if settings.warn_only_block_read_errors: error(index, 'Error', 'Problem at file address: %s extracting ino_node: %s' % (file_offset, e)) return else: error(index, 'Fatal', 'Problem at file address: %s extracting ino_node: %s' % (file_offset, e)) ino_num = inon.key['ino_num'] log(index, '%s file addr: %s, ino num: %s' % (inon, file_offset, ino_num)) verbose_display(inon) if not ino_num in inodes: inodes[ino_num] = {} inodes[ino_num]['ino'] = inon elif chdr.node_type == UBIFS_DATA_NODE: try: datn = nodes.data_node(node_buf, (ubifs.leb_size * lnum) + UBIFS_COMMON_HDR_SZ + offset + UBIFS_DATA_NODE_SZ) except Exception as e: if settings.warn_only_block_read_errors: error(index, 'Error', 'Problem at file address: %s extracting data_node: %s' % (file_offset, e)) return else: error(index, 'Fatal', 'Problem at file address: %s extracting data_node: %s' % (file_offset, e)) ino_num = datn.key['ino_num'] log(index, '%s file addr: %s, ino num: %s' % (datn, file_offset, ino_num)) verbose_display(datn) if not ino_num in inodes: inodes[ino_num] = {} if not 'data' in inodes[ino_num]: inodes[ino_num]['data']= [] inodes[ino_num]['data'].append(datn) elif chdr.node_type == UBIFS_DENT_NODE: try: dn = nodes.dent_node(node_buf) except Exception as e: if settings.warn_only_block_read_errors: error(index, 'Error', 'Problem at file address: %s extracting dent_node: %s' % (file_offset, e)) return else: error(index, 'Fatal', 'Problem at file address: %s extracting dent_node: %s' % (file_offset, e)) ino_num = dn.key['ino_num'] log(index, '%s file addr: %s, ino num: %s' % (dn, file_offset, ino_num)) verbose_display(dn) if not ino_num in inodes: inodes[ino_num] = {} if not 'dent' in inodes[ino_num]: inodes[ino_num]['dent']= [] inodes[ino_num]['dent'].append(dn)
#if key_type < UBIFS_KEY_TYPES_CNT: return {'type':key_type, 'ino_num':ino_num, 'khash': khash} def decompress(ctype, unc_len, data): """Decompress data. Arguments: Int:ctype -- Compression type LZO, ZLIB (*currently unused*). Int:unc_len -- Uncompressed data lenth. Str:data -- Data to be uncompessed. Returns: Uncompressed Data. """ if ctype == UBIFS_COMPR_LZO: try: return lzo.decompress(''.join(('\xf0', struct.pack('>I', unc_len), data))) except Exception, e: error(decompress, 'Warn', 'LZO Error: %s' % e) elif ctype == UBIFS_COMPR_ZLIB: try: return zlib.decompress(data, -11) except Exception, e: error(decompress, 'Warn', 'ZLib Error: %s' % e) else: return data
if mst_chdr.node_type == UBIFS_MST_NODE: self.file.seek(mst_offset + UBIFS_COMMON_HDR_SZ) buf = self.file.read(UBIFS_MST_NODE_SZ) self._mst_nodes[i] = nodes.mst_node(buf) log( self, '%s%s file addr: %s' % (self._mst_nodes[i], i, self.file.last_read_addr())) verbose_display(self._mst_nodes[i]) else: raise Exception('Wrong node type.') except Exception, e: error(self, 'Fatal', 'Master block %s error: %s' % (i, e)) if not self._mst_nodes[0] or not self._mst_nodes[1]: error(self, 'Fatal', 'Less than 2 Master blocks found.') def _get_file(self): return self._file file = property(_get_file) def _get_superblock(self): """ Superblock Node Object Returns: Obj:Superblock Node """ return self._sb_node superblock_node = property(_get_superblock)