def __init__(self, ubi_file): self.__name__ = 'UBI' self._file = ubi_file self._first_peb_num = 0 self._blocks = extract_blocks(self) self._block_count = len(self.blocks) if self._block_count <= 0: error(self, 'Fatal', 'No blocks found.') layout_list, data_list, int_vol_list, unknown_list = sort.by_type(self.blocks) if len(layout_list) < 2: error(self, 'Fatal', 'Less than 2 layout blocks found.') self._layout_blocks_list = layout_list self._data_blocks_list = data_list self._int_vol_blocks_list = int_vol_list self._unknown_blocks_list = unknown_list arbitrary_block = self.blocks.itervalues().next() self._min_io_size = arbitrary_block.ec_hdr.vid_hdr_offset self._leb_size = self.file.block_size - arbitrary_block.ec_hdr.data_offset layout_pairs = layout.group_pairs(self.blocks, self.layout_blocks_list) layout_infos = layout.associate_blocks(self.blocks, layout_pairs, self.first_peb_num) self._images = [] for i in range(0, len(layout_infos)): self._images.append(image(self.blocks, layout_infos[i]))
def group_pairs(blocks, layout_blocks_list): """Sort a list of layout blocks into pairs Arguments: List:blocks -- List of block objects List:layout_blocks -- List of layout block indexes Returns: List -- Layout block pair indexes grouped in a list """ try: layouts_grouped = [[blocks[layout_blocks_list[0]].peb_num]] for l in layout_blocks_list[1:]: for lnd in layouts_grouped: if blocks[l].vtbl_recs[0].name == blocks[lnd[0]].vtbl_recs[0].name: lnd.append(blocks[l].peb_num) break else: layouts_grouped.append([blocks[l].peb_num]) log(group_pairs, layouts_grouped) return layouts_grouped except Exception, e: error(group_pairs, 'Fatal', e)
def _process_reg_file(ubifs, inode, path): try: buf = '' if 'data' in inode: compr_type = 0 sorted_data = sorted(inode['data'], key=lambda x: x.key['khash']) last_khash = sorted_data[0].key['khash'] - 1 for data in sorted_data: # If data nodes are missing in sequence, fill in blanks # with \x00 * UBIFS_BLOCK_SIZE if data.key['khash'] - last_khash != 1: while 1 != (data.key['khash'] - last_khash): buf += '\x00' * UBIFS_BLOCK_SIZE last_khash += 1 compr_type = data.compr_type ubifs.file.seek(data.offset) d = ubifs.file.read(data.compr_len) buf += decompress(compr_type, data.size, d) last_khash = data.key['khash'] verbose_log( _process_reg_file, 'ino num: %s, compression: %s, path: %s' % (inode['ino'].key['ino_num'], compr_type, path)) except Exception, e: error(_process_reg_file, 'Warn', 'inode num:%s :%s' % (inode['ino'].key['ino_num'], e))
def group_pairs(blocks, layout_blocks_list): """Sort a list of layout blocks into pairs Arguments: List:blocks -- List of block objects List:layout_blocks -- List of layout block indexes Returns: List -- Layout block pair indexes grouped in a list """ try: layouts_grouped = [[blocks[layout_blocks_list[0]].peb_num]] for l in layout_blocks_list[1:]: for lnd in layouts_grouped: if blocks[l].vtbl_recs[0].name == blocks[ lnd[0]].vtbl_recs[0].name: lnd.append(blocks[l].peb_num) break else: layouts_grouped.append([blocks[l].peb_num]) log(group_pairs, layouts_grouped) return layouts_grouped except Exception, e: error(group_pairs, 'Fatal', e)
def guess_start_offset(path): file_offset = 0 f = open(path, 'rb') f.seek(0, 2) file_size = f.tell() + 1 f.seek(0) for _ in range(0, file_size, FILE_CHUNK_SZ): buf = f.read(FILE_CHUNK_SZ) ubi_loc = buf.find(UBI_EC_HDR_MAGIC) ubifs_loc = buf.find(UBIFS_NODE_MAGIC) if ubi_loc == -1 and ubifs_loc == -1: file_offset += FILE_CHUNK_SZ continue else: if ubi_loc == -1: ubi_loc = file_size + 1 elif ubifs_loc == -1: ubifs_loc = file_size + 1 if ubi_loc < ubifs_loc: return file_offset + ubi_loc elif ubifs_loc < ubi_loc: return file_offset + ubifs_loc else: error(guess_start_offset, 'Fatal', 'Could not determine start offset.') f.close()
def read(self, size): buf = '' leb = int(self.tell() / self._ubi.leb_size) offset = self.tell() % self._ubi.leb_size self._last_read_addr = self._ubi.blocks[ self._blocks[leb]].file_offset + self._ubi.blocks[ self._blocks[leb]].ec_hdr.data_offset + offset verbose_log(self, 'read loc: %s, size: %s' % (self._last_read_addr, size)) if leb == self._last_leb: self.seek(self.tell() + size) return self._last_buf[offset:offset + size] else: try: buf = self._ubi.file.read_block_data( self._ubi.blocks[self._blocks[leb]]) self._last_buf = buf self._last_leb = leb self.seek(self.tell() + size) return buf[offset:offset + size] except Exception, e: error( self, 'Fatal', 'read loc: %s, size: %s, LEB: %s, offset: %s, error: %s' % (self._last_read_addr, size, leb, offset, e))
def create_output_dir(outpath): if not os.path.exists(outpath): try: os.makedirs(outpath) log(create_output_dir, 'Created output path: %s' % outpath) except Exception, e: error(create_output_dir, 'Fatal', '%s' % e)
def index(ubifs, lnum, offset, inodes={}): """Walk the index gathering Inode, Dir Entry, and File nodes. Arguments: Obj:ubifs -- UBIFS object. Int:lnum -- Logical erase block number. Int:offset -- Offset in logical erase block. Dict:inodes -- Dict of ino/dent/file nodes keyed to inode number. Returns: Dict:inodes -- Dict of ino/dent/file nodes keyed to inode number. 'ino' -- Inode node. 'data' -- List of data nodes if present. 'dent' -- List of directory entry nodes if present. """ try: ubifs.file.seek((ubifs.leb_size * lnum) + offset) buf = ubifs.file.read(UBIFS_COMMON_HDR_SZ) chdr = nodes.common_hdr(buf) log(index, '%s file addr: %s' % (chdr, ubifs.file.last_read_addr())) verbose_display(chdr) node_buf = ubifs.file.read(chdr.len - UBIFS_COMMON_HDR_SZ) file_offset = ubifs.file.last_read_addr() except Exception, e: error( index, 'Fatal', 'leb: %s, ubifs offset: %s, error: %s' % (lnum, ((ubifs.leb_size * lnum) + offset), e))
def index(ubifs, lnum, offset, inodes={}): """Walk the index gathering Inode, Dir Entry, and File nodes. Arguments: Obj:ubifs -- UBIFS object. Int:lnum -- Logical erase block number. Int:offset -- Offset in logical erase block. Dict:inodes -- Dict of ino/dent/file nodes keyed to inode number. Returns: Dict:inodes -- Dict of ino/dent/file nodes keyed to inode number. 'ino' -- Inode node. 'data' -- List of data nodes if present. 'dent' -- List of directory entry nodes if present. """ try: ubifs.file.seek((ubifs.leb_size * lnum) + offset) buf = ubifs.file.read(UBIFS_COMMON_HDR_SZ) chdr = nodes.common_hdr(buf) log(index , '%s file addr: %s' % (chdr, ubifs.file.last_read_addr())) verbose_display(chdr) node_buf = ubifs.file.read(chdr.len - UBIFS_COMMON_HDR_SZ) file_offset = ubifs.file.last_read_addr() except Exception, e: error(index, 'Fatal', 'buf read, %s' % (e))
def guess_start_offset(path): file_offset = 0 f = open(path, 'rb') f.seek(0,2) file_size = f.tell()+1 f.seek(0) for _ in range(0, file_size, FILE_CHUNK_SZ): buf = f.read(FILE_CHUNK_SZ) ubi_loc = buf.find(UBI_EC_HDR_MAGIC) ubifs_loc = buf.find(UBIFS_NODE_MAGIC) if ubi_loc == -1 and ubifs_loc == -1: file_offset += FILE_CHUNK_SZ continue else: if ubi_loc == -1: ubi_loc = file_size + 1 elif ubifs_loc == -1: ubifs_loc = file_size + 1 if ubi_loc < ubifs_loc: return file_offset + ubi_loc elif ubifs_loc < ubi_loc: return file_offset + ubifs_loc else: error(guess_start_offset, 'Fatal', 'Could not determine start offset.') f.close()
def __init__(self, path, block_size, start_offset=0, end_offset=None): self.__name__ = 'UBI_File' self.is_valid = False try: log(self, 'Open Path: %s' % path) self._fhandle = open(path, 'rb') except Exception, e: error(self, 'Fatal', 'Open file: %s' % e)
def create_output_dir(outpath): if os.path.exists(outpath): if os.listdir(outpath): error(create_output_dir, 'Fatal', 'Output directory is not empty. %s' % outpath) else: try: os.makedirs(outpath) log(create_output_dir, 'Created output path: %s' % outpath) except Exception, e: error(create_output_dir, 'Fatal', '%s' % e)
def __init__(self, ubi, block_list): self.__name__ = 'leb_virtual_file' self.is_valid = False self._ubi = ubi self._last_read_addr = 0 if not len(block_list): error(self, 'Info', 'Empty block list') else: self._blocks = sort.by_leb(block_list) self._seek = 0 self._last_leb = -1 self._last_buf = '' self.is_valid = True
def getReportTypes(self, id): if not codexEmitter.reporttypes.get(id): url = "{}/reporttypes?journalID={}&_limit=1000".format(self.getUrl(), id) debug(url) r = requests.get("{}/reporttypes?journalID={}&_limit=1000".format(self.getUrl(), id)) if r.status_code == requests.codes.ok: for exc in r.json(): codexEmitter.reporttypes["{}".format(exc["journalID"])] = {"endpoint": exc["endpoint"], "location": exc["location"], "type": exc["type"]} else: error("error in getReportTypes")
def enter(self, event): type = event.widget["text"] # clear it if it exists for col in self.tooltipcol1: col["text"] = "" try: col.grid() col.grid_remove() except: error("Col1 grid_remove error") for col in self.tooltipcol2: col["text"] = "" try: col.grid() col.grid_remove() except: error("Col2 grid_remove error") poicount = 0 # need to initialise if not exists if len(self.tooltipcol1) == 0: self.tooltipcol1.append(tk.Label(self.tooltiplist, text="")) self.tooltipcol2.append(tk.Label(self.tooltiplist, text="")) for poi in self.poidata: if poi.get("hud_category") == type: ## add a new label if it dont exist if len(self.tooltipcol1) == poicount: self.tooltipcol1.append(tk.Label(self.tooltiplist, text=poi.get("english_name"))) self.tooltipcol2.append(tk.Label(self.tooltiplist, text=poi.get("body"))) else: ## just set the label self.tooltipcol1[poicount]["text"] = poi.get("english_name") self.tooltipcol2[poicount]["text"] = poi.get("body") # remember to grid them self.tooltipcol1[poicount].grid(row=poicount, column=0, columnspan=1, sticky="NSEW") self.tooltipcol2[poicount].grid(row=poicount, column=1, sticky="NSEW") poicount = poicount + 1 if poicount == 0: self.tooltipcol1[poicount]["text"] = CodexTypes.tooltips.get(type) self.tooltipcol1[poicount].grid(row=poicount, column=0, columnspan=2) self.tooltipcol2[poicount].grid_remove() # self.tooltip.grid(sticky="NSEW") self.tooltiplist.grid(sticky="NSEW")
def extract_files(ubifs, out_path, perms=False): """Extract UBIFS contents to_path/ Arguments: Obj:ubifs -- UBIFS object. Str:out_path -- Path to extract contents to. """ try: inodes = {} walk.index(ubifs, ubifs.master_node.root_lnum, ubifs.master_node.root_offs, inodes) for dent in inodes[1]['dent']: extract_dents(ubifs, inodes, dent, out_path, perms) except Exception, e: error(extract_files, 'Fatal', '%s' % e)
def decompress(ctype, unc_len, data): """Decompress data. Arguments: Int:ctype -- Compression type LZO, ZLIB (*currently unused*). Int:unc_len -- Uncompressed data lenth. Str:data -- Data to be uncompessed. Returns: Uncompressed Data. """ if ctype == UBIFS_COMPR_LZO: try: return lzo.decompress(''.join(('\xf0', struct.pack('>I', unc_len), data))) except Exception, e: error(decompress, 'Warn', 'LZO Error: %s' % e)
def guess_filetype(path, start_offset=0): with open(path, 'rb') as f: f.seek(start_offset) buf = f.read(4) if buf == UBI_EC_HDR_MAGIC: ftype = UBI_EC_HDR_MAGIC log(guess_filetype, 'File looks like a UBI image.') elif buf == UBIFS_NODE_MAGIC: ftype = UBIFS_NODE_MAGIC log(guess_filetype, 'File looks like a UBIFS image.') else: ftype = None error(guess_filetype, 'Fatal', 'Could not determine file type.') return ftype
def decompress(ctype, unc_len, data): """Decompress data. Arguments: Int:ctype -- Compression type LZO, ZLIB (*currently unused*). Int:unc_len -- Uncompressed data lenth. Str:data -- Data to be uncompessed. Returns: Uncompressed Data. """ if ctype == UBIFS_COMPR_LZO: try: return lzo.decompress(''.join( ('\xf0', struct.pack('>I', unc_len), data))) except Exception, e: error(decompress, 'Warn', 'LZO Error: %s' % e)
def extract_dents(ubifs, inodes, dent_node, path='', perms=False): inode = inodes[dent_node.inum] dent_path = os.path.join(path, dent_node.name) if dent_node.type == UBIFS_ITYPE_DIR: try: if not os.path.exists(dent_path): os.mkdir(dent_path) log(extract_dents, 'Make Dir: %s' % (dent_path)) if perms: _set_file_perms(dent_path, inode) except Exception, e: error(extract_dents, 'Warn', 'DIR Fail: %s' % e) if 'dent' in inode: for dnode in inode['dent']: extract_dents(ubifs, inodes, dnode, dent_path, perms)
def read(self, size): buf = '' leb = int(self.tell() / self._ubi.leb_size) offset = self.tell() % self._ubi.leb_size self._last_read_addr = self._ubi.blocks[self._blocks[leb]].file_offset + self._ubi.blocks[self._blocks[leb]].ec_hdr.data_offset + offset verbose_log(self, 'read loc: %s, size: %s' % (self._last_read_addr, size)) if leb == self._last_leb: self.seek(self.tell() + size) return self._last_buf[offset:offset+size] else: try: buf = self._ubi.file.read_block_data(self._ubi.blocks[self._blocks[leb]]) self._last_buf = buf self._last_leb = leb self.seek(self.tell() + size) return buf[offset:offset+size] except Exception, e: error(self, 'Fatal', 'read loc: %s, size: %s, LEB: %s, offset: %s, error: %s' % (self._last_read_addr, size, leb, offset, e))
def __init__(self, ubifs_file): self.__name__ = 'UBIFS' self._file = ubifs_file try: self.file.reset() sb_chdr = nodes.common_hdr(self.file.read(UBIFS_COMMON_HDR_SZ)) log(self , '%s file addr: %s' % (sb_chdr, self.file.last_read_addr())) verbose_display(sb_chdr) if sb_chdr.node_type == UBIFS_SB_NODE: self.file.seek(UBIFS_COMMON_HDR_SZ) buf = self.file.read(UBIFS_SB_NODE_SZ) self._sb_node = nodes.sb_node(buf) self._min_io_size = self._sb_node.min_io_size self._leb_size = self._sb_node.leb_size log(self , '%s file addr: %s' % (self._sb_node, self.file.last_read_addr())) verbose_display(self._sb_node) else: raise Exception('Wrong node type.') except Exception, e: error(self, 'Fatal', 'Super block error: %s' % e)
def run(self): debug("sending gSubmitCodex") url = "https://us-central1-canonn-api-236217.cloudfunctions.net/submitCodex?cmdrName={}".format(self.cmdr) url = url + "&system={}".format(self.system) url = url + "&body={}".format(self.body) url = url + "&x={}".format(self.x) url = url + "&y={}".format(self.y) url = url + "&z={}".format(self.z) url = url + "&latitude={}".format(self.lat) url = url + "&longitude={}".format(self.lon) url = url + "&entryid={}".format(self.entry.get("EntryID")) url = url + "&name={}".format(self.entry.get("Name").encode('utf8')) url = url + "&name_localised={}".format(self.entry.get("Name_Localised").encode('utf8')) url = url + "&category={}".format(self.entry.get("Category").encode('utf8')) url = url + "&category_localised={}".format(self.entry.get("Category_Localised").encode('utf8')) url = url + "&sub_category={}".format(self.entry.get("SubCategory").encode('utf8')) url = url + "&sub_category_localised={}".format(self.entry.get("SubCategory_Localised").encode('utf8')) url = url + "®ion_name={}".format(self.entry.get("Region").encode('utf8')) url = url + "®ion_name_localised={}".format(self.entry.get("Region_Localised").encode('utf8')) url = url + "&is_beta={}".format(self.is_beta) debug(url) r = requests.get(url) if not r.status_code == requests.codes.ok: error("gSubmitCodex {} ".format(url)) error(r.status_code) error(r.json())
def edsmGetSystem(cls, system): if not system: error("system is null") return #if not system in cls.systemCache and not cls.scanned: #journalGetSystem() #cls.scanned=True if system in cls.systemCache: return cls.systemCache[system] else: url = 'https://www.edsm.net/api-v1/system?systemName=' + quote_plus( system) + '&showCoordinates=1' r = requests.get(url) s = r.json() cls.systemCache[system] = (s["coords"]["x"], s["coords"]["y"], s["coords"]["z"]) return s["coords"]["x"], s["coords"]["y"], s["coords"]["z"]
def __init__(self, ubifs_file): self.__name__ = 'UBIFS' self._file = ubifs_file try: self.file.reset() sb_chdr = nodes.common_hdr(self.file.read(UBIFS_COMMON_HDR_SZ)) log(self, '%s file addr: %s' % (sb_chdr, self.file.last_read_addr())) verbose_display(sb_chdr) if sb_chdr.node_type == UBIFS_SB_NODE: self.file.seek(UBIFS_COMMON_HDR_SZ) buf = self.file.read(UBIFS_SB_NODE_SZ) self._sb_node = nodes.sb_node(buf) self._min_io_size = self._sb_node.min_io_size self._leb_size = self._sb_node.leb_size log( self, '%s file addr: %s' % (self._sb_node, self.file.last_read_addr())) verbose_display(self._sb_node) else: raise Exception('Wrong node type.') except Exception, e: error(self, 'Fatal', 'Super block error: %s' % e)
if 'hlink' not in inode: inode['hlink'] = dent_path buf = _process_reg_file(ubifs, inode, dent_path) _write_reg_file(dent_path, buf) else: os.link(inode['hlink'] ,dent_path) log(extract_dents, 'Make Link: %s > %s' % (dent_path, inode['hlink'])) else: buf = _process_reg_file(ubifs, inode, dent_path) _write_reg_file(dent_path, buf) if perms: _set_file_perms(dent_path, inode) except Exception, e: error(extract_dents, 'Warn', 'FILE Fail: %s' % e) elif dent_node.type == UBIFS_ITYPE_LNK: try: # probably will need to decompress ino data if > UBIFS_MIN_COMPR_LEN os.symlink('%s' % inode['ino'].data, dent_path) log(extract_dents, 'Make Symlink: %s > %s' % (dent_path, inode['ino'].data)) except Exception, e: error(extract_dents, 'Warn', 'SYMLINK Fail: %s : %s' % (inode['ino'].data, dent_path)) elif dent_node.type in [UBIFS_ITYPE_BLK, UBIFS_ITYPE_CHR]: try: dev = struct.unpack('<II', inode['ino'].data)[0] if True: os.mknod(dent_path, inode['ino'].mode, dev) log(extract_dents, 'Make Device Node: %s' % (dent_path))
class ubi_file(object): """UBI image file object Arguments: Str:path -- Path to file to parse Int:block_size -- Erase block size of NAND in bytes. Int:start_offset -- (optional) Where to start looking in the file for UBI data. Int:end_offset -- (optional) Where to stop looking in the file. Methods: seek -- Put file head to specified byte offset. Int:offset read -- Read specified bytes from file handle. Int:size tell -- Returns byte offset of current file location. read_block -- Returns complete PEB data of provided block description. Obj:block read_block_data -- Returns LEB data only from provided block. Obj:block reader -- Generator that returns data from file. reset -- Reset file position to start_offset. is_valid -- If the object intialized okay. Handles all the actual file interactions, read, seek, extract blocks, etc. """ def __init__(self, path, block_size, start_offset=0, end_offset=None): self.__name__ = 'UBI_File' self.is_valid = False try: log(self, 'Open Path: %s' % path) self._fhandle = open(path, 'rb') except Exception, e: error(self, 'Fatal', 'Open file: %s' % e) self._fhandle.seek(0, 2) file_size = self.tell() log(self, 'File Size: %s' % file_size) self._start_offset = start_offset log(self, 'Start Offset: %s' % (self._start_offset)) if end_offset: self._end_offset = end_offset else: self._end_offset = file_size log(self, 'End Offset: %s' % (self._end_offset)) self._block_size = block_size log(self, 'Block Size: %s' % block_size) if start_offset > self._end_offset: error(self, 'Fatal', 'Start offset larger than end offset.') if end_offset > file_size: error(self, 'Fatal', 'End offset larger than file size.') self._fhandle.seek(self._start_offset) self._last_read_addr = self._fhandle.tell() self.is_valid = True
#if key_type < UBIFS_KEY_TYPES_CNT: return {'type':key_type, 'ino_num':ino_num, 'khash': khash} def decompress(ctype, unc_len, data): """Decompress data. Arguments: Int:ctype -- Compression type LZO, ZLIB (*currently unused*). Int:unc_len -- Uncompressed data lenth. Str:data -- Data to be uncompessed. Returns: Uncompressed Data. """ if ctype == UBIFS_COMPR_LZO: try: return lzo.decompress(''.join(('\xf0', struct.pack('>I', unc_len), data))) except Exception, e: error(decompress, 'Warn', 'LZO Error: %s' % e) elif ctype == UBIFS_COMPR_ZLIB: try: return zlib.decompress(data, -11) except Exception, e: error(decompress, 'Warn', 'ZLib Error: %s' % e) else: return data
if mst_chdr.node_type == UBIFS_MST_NODE: self.file.seek(mst_offset + UBIFS_COMMON_HDR_SZ) buf = self.file.read(UBIFS_MST_NODE_SZ) self._mst_nodes[i] = nodes.mst_node(buf) log( self, '%s%s file addr: %s' % (self._mst_nodes[i], i, self.file.last_read_addr())) verbose_display(self._mst_nodes[i]) else: raise Exception('Wrong node type.') except Exception, e: error(self, 'Fatal', 'Master block %s error: %s' % (i, e)) if not self._mst_nodes[0] or not self._mst_nodes[1]: error(self, 'Fatal', 'Less than 2 Master blocks found.') def _get_file(self): return self._file file = property(_get_file) def _get_superblock(self): """ Superblock Node Object Returns: Obj:Superblock Node """ return self._sb_node superblock_node = property(_get_superblock)
log(self , '%s file addr: %s' % (mst_chdr, self.file.last_read_addr())) verbose_display(mst_chdr) if mst_chdr.node_type == UBIFS_MST_NODE: self.file.seek(mst_offset + UBIFS_COMMON_HDR_SZ) buf = self.file.read(UBIFS_MST_NODE_SZ) self._mst_nodes[i] = nodes.mst_node(buf) log(self , '%s%s file addr: %s' % (self._mst_nodes[i], i, self.file.last_read_addr())) verbose_display(self._mst_nodes[i]) else: raise Exception('Wrong node type.') except Exception, e: error(self, 'Fatal', 'Master block %s error: %s' % (i, e)) if not self._mst_nodes[0] or not self._mst_nodes[1]: error(self, 'Fatal', 'Less than 2 Master blocks found.') def _get_file(self): return self._file file = property(_get_file) def _get_superblock(self): """ Superblock Node Object Returns: Obj:Superblock Node """ return self._sb_node superblock_node = property(_get_superblock)
class ubifs(): """UBIFS object Arguments: Str:path -- File path to UBIFS image. Attributes: Obj:file -- File object Int:leb_size -- Size of Logical Erase Blocks. Int:min_io -- Size of min I/O from vid_hdr_offset. Obj:sb_node -- Superblock node of UBIFS image LEB0 Obj:mst_node -- Master Node of UBIFS image LEB1 Obj:mst_node2 -- Master Node 2 of UBIFS image LEB2 """ def __init__(self, ubifs_file): self.__name__ = 'UBIFS' self._file = ubifs_file try: self.file.reset() sb_chdr = nodes.common_hdr(self.file.read(UBIFS_COMMON_HDR_SZ)) log(self, '%s file addr: %s' % (sb_chdr, self.file.last_read_addr())) verbose_display(sb_chdr) if sb_chdr.node_type == UBIFS_SB_NODE: self.file.seek(UBIFS_COMMON_HDR_SZ) buf = self.file.read(UBIFS_SB_NODE_SZ) self._sb_node = nodes.sb_node(buf) self._min_io_size = self._sb_node.min_io_size self._leb_size = self._sb_node.leb_size log( self, '%s file addr: %s' % (self._sb_node, self.file.last_read_addr())) verbose_display(self._sb_node) else: raise Exception('Wrong node type.') except Exception, e: error(self, 'Fatal', 'Super block error: %s' % e) self._mst_nodes = [None, None] for i in xrange(0, 2): try: mst_offset = self.leb_size * (UBIFS_MST_LNUM + i) self.file.seek(mst_offset) mst_chdr = nodes.common_hdr( self.file.read(UBIFS_COMMON_HDR_SZ)) log( self, '%s file addr: %s' % (mst_chdr, self.file.last_read_addr())) verbose_display(mst_chdr) if mst_chdr.node_type == UBIFS_MST_NODE: self.file.seek(mst_offset + UBIFS_COMMON_HDR_SZ) buf = self.file.read(UBIFS_MST_NODE_SZ) self._mst_nodes[i] = nodes.mst_node(buf) log( self, '%s%s file addr: %s' % (self._mst_nodes[i], i, self.file.last_read_addr())) verbose_display(self._mst_nodes[i]) else: raise Exception('Wrong node type.') except Exception, e: error(self, 'Fatal', 'Master block %s error: %s' % (i, e))
khash = lkey #if key_type < UBIFS_KEY_TYPES_CNT: return {'type': key_type, 'ino_num': ino_num, 'khash': khash} def decompress(ctype, unc_len, data): """Decompress data. Arguments: Int:ctype -- Compression type LZO, ZLIB (*currently unused*). Int:unc_len -- Uncompressed data lenth. Str:data -- Data to be uncompessed. Returns: Uncompressed Data. """ if ctype == UBIFS_COMPR_LZO: try: return lzo.decompress(''.join( ('\xf0', struct.pack('>I', unc_len), data))) except Exception, e: error(decompress, 'Warn', 'LZO Error: %s' % e) elif ctype == UBIFS_COMPR_ZLIB: try: return zlib.decompress(data, -11) except Exception, e: error(decompress, 'Warn', 'ZLib Error: %s' % e) else: return data
inode['hlink'] = dent_path buf = _process_reg_file(ubifs, inode, dent_path) _write_reg_file(dent_path, buf) else: os.link(inode['hlink'], dent_path) log(extract_dents, 'Make Link: %s > %s' % (dent_path, inode['hlink'])) else: buf = _process_reg_file(ubifs, inode, dent_path) _write_reg_file(dent_path, buf) if perms: _set_file_perms(dent_path, inode) except Exception, e: error(extract_dents, 'Warn', 'FILE Fail: %s' % e) elif dent_node.type == UBIFS_ITYPE_LNK: try: # probably will need to decompress ino data if > UBIFS_MIN_COMPR_LEN os.symlink('%s' % inode['ino'].data, dent_path) log(extract_dents, 'Make Symlink: %s > %s' % (dent_path, inode['ino'].data)) except Exception, e: error(extract_dents, 'Warn', 'SYMLINK Fail: %s' % e) elif dent_node.type in [UBIFS_ITYPE_BLK, UBIFS_ITYPE_CHR]: try: dev = struct.unpack('<II', inode['ino'].data)[0] if True: