def __init__(self, pytsk_image, offset=0): self.img = pytsk_image self.offset = offset try: data = self.read(0, 0x1000) self.header = HFSPlusVolumeHeader.parse(data[0x400:0x800]) assert self.header.signature == 0x4858 or self.header.signature == 0x482B except AssertionError: raise ValueError("Not an HFS+ image") #self.is_hfsx = self.header.signature == 0x4858 self.blockSize = self.header.blockSize self.allocationFile = HFSFile(self, self.header.allocationFile, kHFSAllocationFileID) self.allocationBitmap = self.allocationFile.readAllBuffer() self.extentsFile = HFSFile(self, self.header.extentsFile, kHFSExtentsFileID) self.extentsTree = ExtentsOverflowTree(self.extentsFile) self.catalogFile = HFSFile(self, self.header.catalogFile, kHFSCatalogFileID) self.xattrFile = HFSFile(self, self.header.attributesFile, kHFSAttributesFileID) self.catalogTree = CatalogTree(self.catalogFile) self.xattrTree = AttributesTree(self.xattrFile) self.hasJournal = self.header.attributes & ( 1 << kHFSVolumeJournaledBit)
class HFSVolume(object): def __init__(self, pytsk_image, offset=0): self.img = pytsk_image self.offset = offset try: data = self.read(0, 0x1000) self.header = HFSPlusVolumeHeader.parse(data[0x400:0x800]) assert self.header.signature == 0x4858 or self.header.signature == 0x482B except AssertionError: raise ValueError("Not an HFS+ image") #self.is_hfsx = self.header.signature == 0x4858 self.blockSize = self.header.blockSize self.allocationFile = HFSFile(self, self.header.allocationFile, kHFSAllocationFileID) self.allocationBitmap = self.allocationFile.readAllBuffer() self.extentsFile = HFSFile(self, self.header.extentsFile, kHFSExtentsFileID) self.extentsTree = ExtentsOverflowTree(self.extentsFile) self.catalogFile = HFSFile(self, self.header.catalogFile, kHFSCatalogFileID) self.xattrFile = HFSFile(self, self.header.attributesFile, kHFSAttributesFileID) self.catalogTree = CatalogTree(self.catalogFile) self.xattrTree = AttributesTree(self.xattrFile) self.hasJournal = self.header.attributes & (1 << kHFSVolumeJournaledBit) def read(self, offset, size): return self.img.read(self.offset + offset, size) def volumeID(self): return struct.pack(">LL", self.header.finderInfo[6], self.header.finderInfo[7]) def isBlockInUse(self, block): thisByte = self.allocationBitmap[block // 8] return (thisByte & (1 << (7 - (block % 8)))) != 0 def unallocatedBlocks(self): for i in range(self.header.totalBlocks): if not self.isBlockInUse(i): yield i, self.read(i*self.blockSize, self.blockSize) def getExtentsOverflowForFile(self, fileID, startBlock, forkType=kForkTypeData): return self.extentsTree.searchExtents(fileID, forkType, startBlock) def getXattr(self, fileID, name): return self.xattrTree.searchXattr(fileID, name) def getFileByPath(self, path): return self.catalogTree.getRecordFromPath(path) def getFinderDateAdded(self, path): k,v = self.catalogTree.getRecordFromPath(path) if k and v.recordType == kHFSPlusFileRecord: return v.data.ExtendedFileInfo.finderDateAdded elif k and v.recordType == kHFSPlusFolderRecord: return v.data.ExtendedFolderInfo.finderDateAdded return 0 def listFolderContents(self, path): k,v = self.catalogTree.getRecordFromPath(path) if not k or v.recordType != kHFSPlusFolderRecord: return for k,v in self.catalogTree.getFolderContents(v.data.folderID): if v.recordType == kHFSPlusFolderRecord: print(v.data.folderID, getString(k) + "/") elif v.recordType == kHFSPlusFileRecord: print(v.data.fileID, getString(k)) def listFinderData(self, path): '''Returns finder data''' finder_data = {} k,v = self.catalogTree.getRecordFromPath(path) date_added = 0 if k and v.recordType == kHFSPlusFileRecord: date_added = v.data.ExtendedFileInfo.finderDateAdded if v.data.FileInfo.fileType: finder_data['fileType'] = v.data.FileInfo.fileType if v.data.FileInfo.fileCreator: finder_data['fileCreator'] = v.data.FileInfo.fileCreator if v.data.FileInfo.finderFlags: finder_data['finderFlags'] = v.data.FileInfo.finderFlags if v.data.ExtendedFileInfo.extendedFinderFlags: finder_data['extendedFinderFlags'] = v.data.ExtendedFileInfo.extendedFinderFlags elif k and v.recordType == kHFSPlusFolderRecord: date_added = v.data.ExtendedFolderInfo.finderDateAdded if v.data.FolderInfo.finderFlags: finder_data['FinderFlags'] = v.data.FolderInfo.finderFlags if v.data.ExtendedFolderInfo.extendedFinderFlags: finder_data['extendedFinderFlags'] = v.data.ExtendedFolderInfo.extendedFinderFlags if date_added: finder_data['DateAdded'] = date_added return finder_data def getCnidForPath(self, path): k,v = self.catalogTree.getRecordFromPath(path) if not v: raise ValueError("Path not found") if k and v.recordType == kHFSPlusFileRecord: return v.data.fileID elif k and v.recordType == kHFSPlusFolderThreadRecord: return v.data.folderID def getXattrsByPath(self, path): file_id = self.getCnidForPath(path) return self.xattrTree.getAllXattrs(file_id) def getXattrByPath(self, path, name): file_id = self.getCnidForPath(path) return self.getXattr(fileID, name) ''' Compression type in Xattr as per apple: Source: https://opensource.apple.com/source/copyfile/copyfile-138/copyfile.c.auto.html case 3: /* zlib-compressed data in xattr */ case 4: /* 64k chunked zlib-compressed data in resource fork */ case 7: /* LZVN-compressed data in xattr */ case 8: /* 64k chunked LZVN-compressed data in resource fork */ case 9: /* uncompressed data in xattr (similar to but not identical to CMP_Type1) */ case 10: /* 64k chunked uncompressed data in resource fork */ case 11: /* LZFSE-compressed data in xattr */ case 12: /* 64k chunked LZFSE-compressed data in resource fork */ /* valid compression type, we want to copy. */ break; case 5: /* specifies de-dup within the generation store. Don't copy decmpfs xattr. */ copyfile_debug(3, "compression_type <5> on attribute com.apple.decmpfs for src file %s is not copied.", s->src ? s->src : "(null string)"); continue; case 6: /* unused */ ''' def readFile(self, path, output_file=None): '''Reads file specified by 'path' and copies it out into output_file if valid, else returns as string''' k,v = self.catalogTree.getRecordFromPath(path) if not v: raise ValueError("File not found") data = b'' assert v.recordType == kHFSPlusFileRecord xattr = self.getXattr(v.data.fileID, "com.apple.decmpfs") if xattr: decmpfs = HFSPlusDecmpfs.parse(xattr) log.debug("decmpfs.compression_type={}".format(str(decmpfs.compression_type))) if decmpfs.compression_type == 1: data = xattr[16:] if output_file: output_file.write(data) elif decmpfs.compression_type == 3: if decmpfs.uncompressed_size == len(xattr) - 16: data = xattr[16:] else: data = zlib.decompress(xattr[16:]) if output_file: output_file.write(data) elif decmpfs.compression_type == 4: f = HFSCompressedResourceFork(self, v.data.resourceFork, v.data.fileID, decmpfs.compression_type, decmpfs.uncompressed_size) data = f.readAllBuffer(True, output_file) elif decmpfs.compression_type in [7, 11]: if xattr[16] == 0x06: # perhaps even 0xF? data = xattr[17:] #tested OK else: #tested OK uncompressed_size = struct.unpack('<I', xattr[8:12])[0] compressed_size = len(xattr) - 16 compressed_stream = xattr[16:] data = lzvn_decompress(compressed_stream, compressed_size, uncompressed_size) if output_file: output_file.write(data) elif decmpfs.compression_type in [8, 12]: # tested for type 8 , OK f = HFSCompressedResourceFork(self, v.data.resourceFork, v.data.fileID, decmpfs.compression_type, decmpfs.uncompressed_size) data = f.readAllBuffer(True, output_file) if output_file: output_file.write(data) else: f = HFSFile(self, v.data.dataFork, v.data.fileID) data = f.readAllBuffer(True, output_file) return data def readJournal(self): jb = self.read(self.header.journalInfoBlock * self.blockSize, self.blockSize) jib = JournalInfoBlock.parse(jb) return self.read(jib.offset,jib.size) def GetFileMACTimesFromFileRecord(self, v): times = { 'c_time':None, 'm_time':None, 'cr_time':None, 'a_time':None } catalog_file = v.data times['c_time'] = CommonFunctions.ReadMacHFSTime(catalog_file.attributeModDate) times['m_time'] = CommonFunctions.ReadMacHFSTime(catalog_file.contentModDate) times['cr_time'] = CommonFunctions.ReadMacHFSTime(catalog_file.createDate) times['a_time'] = CommonFunctions.ReadMacHFSTime(catalog_file.accessDate) return times def GetFileMACTimes(self, file_path): ''' Returns dictionary {c_time, m_time, cr_time, a_time} where cr_time = created time and c_time = Last time inode/mft modified ''' k,v = self.catalogTree.getRecordFromPath(file_path) if k and v.recordType in (kHFSPlusFileRecord, kHFSPlusFolderRecord): return self.GetFileMACTimesFromFileRecord(v) raise ValueError("Path not found or not file/folder!") def IsValidFilePath(self, path): '''Check if a file path is valid, does not check for folders!''' k,v = self.catalogTree.getRecordFromPath(path) if not v: return False return v.recordType == kHFSPlusFileRecord #TODO: Check for hard links , sym links? def IsValidFolderPath(self, path): '''Check if a folder path is valid''' k,v = self.catalogTree.getRecordFromPath(path) if not v: return False return v.recordType == kHFSPlusFolderRecord #TODO: Check for hard links , sym links? def IsSymbolicLink(self, path): '''Check if a path points to a file/folder or symbolic link''' mode = self.GetFileMode(path) if mode: return (mode & S_IFLNK) == S_IFLNK return False def GetFileSizeFromFileRecord(self, v): xattr = self.getXattr(v.data.fileID, "com.apple.decmpfs") if xattr: decmpfs = HFSPlusDecmpfs.parse(xattr) return decmpfs.uncompressed_size #TODO verify for all cases! else: return v.data.dataFork.logicalSize def GetFileSize(self, path): '''For a given file path, gets logical file size''' k,v = self.catalogTree.getRecordFromPath(path) if k and v.recordType == kHFSPlusFileRecord: return self.GetFileSizeFromFileRecord(v) else: raise ValueError("Path not found") def GetUserAndGroupID(self, path): k,v = self.catalogTree.getRecordFromPath(path) if k and v.recordType in (kHFSPlusFileRecord, kHFSPlusFolderRecord): return (v.data.HFSPlusBSDInfo.ownerID, v.data.HFSPlusBSDInfo.groupID) else: raise ValueError("Path not found") def GetFileMode(self, path): '''Returns the file or folder's fileMode ''' k,v = self.catalogTree.getRecordFromPath(path) if k and v and v.recordType in (kHFSPlusFileRecord, kHFSPlusFolderRecord): return v.data.HFSPlusBSDInfo.fileMode else: raise ValueError("Path not found or not a file/folder")