示例#1
0
    def __init__(self, bdev):
        self.bdev = bdev

        try:
            data = self.bdev.readBlock(0)
            self.header = HFSPlusVolumeHeader.parse(data[0x400:0x800])
            assert self.header.signature == 0x4858 or self.header.signature == 0x482B
        except:
            raise
            #raise Exception("Not an HFS+ image")

        self.blockSize = self.header.blockSize
        self.bdev.setBlockSize(self.blockSize)

        #if os.path.getsize(filename) < self.header.totalBlocks * self.blockSize:
        #    print "WARNING: HFS image appears to be truncated"

        self.allocationFile = HFSFile(self, self.header.allocationFile,
                                      kHFSAllocationFileID)
        self.allocationBitmap = self.allocationFile.readAllBuffer()
        self.extentsFile = HFSFile(self, self.header.extentsFile,
                                   kHFSExtentsFileID)
        self.extentsTree = ExtentsOverflowTree(self.extentsFile)
        self.catalogFile = HFSFile(self, self.header.catalogFile,
                                   kHFSCatalogFileID)
        self.xattrFile = HFSFile(self, self.header.attributesFile,
                                 kHFSAttributesFileID)
        self.catalogTree = CatalogTree(self.catalogFile, self)
        self.xattrTree = AttributesTree(self.xattrFile)

        self.hasJournal = self.header.attributes & (
            1 << kHFSVolumeJournaledBit)
    def __init__(self, bdev):
        self.bdev = bdev

        self.bdev.seek(0x400)
        self.header = HFSPlusVolumeHeader.parse_stream(self.bdev)
        assert self.header.signature == 0x4858 or self.header.signature == 0x482B

        self.blockSize = self.header.blockSize

        #if os.path.getsize(filename) < self.header.totalBlocks * self.blockSize:
        #    print "WARNING: HFS image appears to be truncated"

        self.allocationFile = HFSFile(self, self.header.allocationFile, kHFSAllocationFileID)
        self.allocationBitmap = self.allocationFile.readAllBuffer()
        self.extentsFile = HFSFile(self, self.header.extentsFile, kHFSExtentsFileID)
        self.extentsTree = ExtentsOverflowTree(self.extentsFile)
        self.catalogFile = HFSFile(self, self.header.catalogFile, kHFSCatalogFileID)
        self.xattrFile = HFSFile(self, self.header.attributesFile, kHFSAttributesFileID)
        self.catalogTree = CatalogTree(self.catalogFile, self)
        self.xattrTree = AttributesTree(self.xattrFile)

        self.hasJournal = self.header.attributes & (1 << kHFSVolumeJournaledBit)

        k,v = self.catalogTree.search((kHFSRootFolderID, ""))
        self.volumename = getString(v.data)
示例#3
0
    def __init__(self, pytsk_image, offset=0):
        self.img = pytsk_image
        self.offset = offset

        try:
            data = self.read(0, 0x1000)
            self.header = HFSPlusVolumeHeader.parse(data[0x400:0x800])
            assert self.header.signature == 0x4858 or self.header.signature == 0x482B
        except:
            raise Exception("Not an HFS+ image")
        #self.is_hfsx = self.header.signature == 0x4858
        self.blockSize = self.header.blockSize
        self.allocationFile = HFSFile(self, self.header.allocationFile,
                                      kHFSAllocationFileID)
        self.allocationBitmap = self.allocationFile.readAllBuffer()
        self.extentsFile = HFSFile(self, self.header.extentsFile,
                                   kHFSExtentsFileID)
        self.extentsTree = ExtentsOverflowTree(self.extentsFile)
        self.catalogFile = HFSFile(self, self.header.catalogFile,
                                   kHFSCatalogFileID)
        self.xattrFile = HFSFile(self, self.header.attributesFile,
                                 kHFSAttributesFileID)
        self.catalogTree = CatalogTree(self.catalogFile)
        self.xattrTree = AttributesTree(self.xattrFile)

        self.hasJournal = self.header.attributes & (
            1 << kHFSVolumeJournaledBit)
示例#4
0
    def __init__(self, filename, write=False, offset=0):
        flag = os.O_RDONLY if not write else os.O_RDWR
        if sys.platform == "win32":
            flag = flag | os.O_BINARY
        self.fd = os.open(filename, flag)
        self.offset = offset
        self.writeFlag = write

        try:
            data = self.read(0, 0x1000)
            self.header = HFSPlusVolumeHeader.parse(data[0x400:0x800])
            assert self.header.signature == 0x4858 or self.header.signature == 0x482B
        except:
            raise Exception("Not an HFS+ image")

        self.blockSize = self.header.blockSize

        if os.path.getsize(filename) < self.header.totalBlocks * self.blockSize:
            print "WARNING: image appears to be truncated"

        self.allocationFile = HFSFile(self, self.header.allocationFile, kHFSAllocationFileID)
        self.allocationBitmap = self.allocationFile.readAllBuffer()
        self.extentsFile = HFSFile(self, self.header.extentsFile, kHFSExtentsFileID)
        self.extentsTree = ExtentsOverflowTree(self.extentsFile)
        self.catalogFile = HFSFile(self, self.header.catalogFile, kHFSCatalogFileID)
        self.xattrFile = HFSFile(self, self.header.attributesFile, kHFSAttributesFileID)
        self.catalogTree = CatalogTree(self.catalogFile)
        self.xattrTree = AttributesTree(self.xattrFile)

        self.hasJournal = self.header.attributes & (1 << kHFSVolumeJournaledBit)
    def __init__(self, bdev):
        self.bdev = bdev

        try:
            data = self.bdev.readBlock(0)
            self.header = HFSPlusVolumeHeader.parse(data[0x400:0x800])
            assert self.header.signature == 0x4858 or self.header.signature == 0x482B
        except:
            raise
            #raise Exception("Not an HFS+ image")

        self.blockSize = self.header.blockSize
        self.bdev.setBlockSize(self.blockSize)

        #if os.path.getsize(filename) < self.header.totalBlocks * self.blockSize:
        #    print "WARNING: HFS image appears to be truncated"

        self.allocationFile = HFSFile(self, self.header.allocationFile, kHFSAllocationFileID)
        self.allocationBitmap = self.allocationFile.readAllBuffer()
        self.extentsFile = HFSFile(self, self.header.extentsFile, kHFSExtentsFileID)
        self.extentsTree = ExtentsOverflowTree(self.extentsFile)
        self.catalogFile = HFSFile(self, self.header.catalogFile, kHFSCatalogFileID)
        self.xattrFile = HFSFile(self, self.header.attributesFile, kHFSAttributesFileID)
        self.catalogTree = CatalogTree(self.catalogFile, self)
        self.xattrTree = AttributesTree(self.xattrFile)

        self.hasJournal = self.header.attributes & (1 << kHFSVolumeJournaledBit)
示例#6
0
    def __init__(self, filename, write=False, offset=0):
        flag = os.O_RDONLY if not write else os.O_RDWR
        if sys.platform == 'win32':
            flag = flag | os.O_BINARY
        self.fd = os.open(filename, flag)
        self.offset = offset
        self.writeFlag = write

        try:
            data = self.read(0, 0x1000)
            self.header = HFSPlusVolumeHeader.parse(data[0x400:0x800])
            assert self.header.signature == 0x4858 or self.header.signature == 0x482B
        except:
            raise Exception("Not an HFS+ image")

        self.blockSize = self.header.blockSize

        if os.path.getsize(
                filename) < self.header.totalBlocks * self.blockSize:
            print "WARNING: image appears to be truncated"

        self.allocationFile = HFSFile(self, self.header.allocationFile,
                                      kHFSAllocationFileID)
        self.allocationBitmap = self.allocationFile.readAllBuffer()
        self.extentsFile = HFSFile(self, self.header.extentsFile,
                                   kHFSExtentsFileID)
        self.extentsTree = ExtentsOverflowTree(self.extentsFile)
        self.catalogFile = HFSFile(self, self.header.catalogFile,
                                   kHFSCatalogFileID)
        self.xattrFile = HFSFile(self, self.header.attributesFile,
                                 kHFSAttributesFileID)
        self.catalogTree = CatalogTree(self.catalogFile)
        self.xattrTree = AttributesTree(self.xattrFile)

        self.hasJournal = self.header.attributes & (
            1 << kHFSVolumeJournaledBit)
示例#7
0
class HFSVolume(object):
    def __init__(self, filename, write=False, offset=0):
        flag = os.O_RDONLY if not write else os.O_RDWR
        if sys.platform == 'win32':
            flag = flag | os.O_BINARY
        self.fd = os.open(filename, flag)
        self.offset = offset
        self.writeFlag = write

        try:
            data = self.read(0, 0x1000)
            self.header = HFSPlusVolumeHeader.parse(data[0x400:0x800])
            assert self.header.signature == 0x4858 or self.header.signature == 0x482B
        except:
            raise Exception("Not an HFS+ image")

        self.blockSize = self.header.blockSize

        if os.path.getsize(
                filename) < self.header.totalBlocks * self.blockSize:
            print "WARNING: image appears to be truncated"

        self.allocationFile = HFSFile(self, self.header.allocationFile,
                                      kHFSAllocationFileID)
        self.allocationBitmap = self.allocationFile.readAllBuffer()
        self.extentsFile = HFSFile(self, self.header.extentsFile,
                                   kHFSExtentsFileID)
        self.extentsTree = ExtentsOverflowTree(self.extentsFile)
        self.catalogFile = HFSFile(self, self.header.catalogFile,
                                   kHFSCatalogFileID)
        self.xattrFile = HFSFile(self, self.header.attributesFile,
                                 kHFSAttributesFileID)
        self.catalogTree = CatalogTree(self.catalogFile)
        self.xattrTree = AttributesTree(self.xattrFile)

        self.hasJournal = self.header.attributes & (
            1 << kHFSVolumeJournaledBit)

    def read(self, offset, size):
        os.lseek(self.fd, self.offset + offset, os.SEEK_SET)
        return os.read(self.fd, size)

    def write(self, offset, data):
        if self.writeFlag:  #fail silently for testing
            os.lseek(self.fd, self.offset + offset, os.SEEK_SET)
            return os.write(self.fd, data)

    def writeBlock(self, lba, block):
        return self.write(lba * self.blockSize, block)

    def volumeID(self):
        return struct.pack(">LL", self.header.finderInfo[6],
                           self.header.finderInfo[7])

    def isBlockInUse(self, block):
        thisByte = ord(self.allocationBitmap[block / 8])
        return (thisByte & (1 << (7 - (block % 8)))) != 0

    def unallocatedBlocks(self):
        for i in xrange(self.header.totalBlocks):
            if not self.isBlockInUse(i):
                yield i, self.read(i * self.blockSize, self.blockSize)

    def getExtentsOverflowForFile(self,
                                  fileID,
                                  startBlock,
                                  forkType=kForkTypeData):
        return self.extentsTree.searchExtents(fileID, forkType, startBlock)

    def getXattr(self, fileID, name):
        return self.xattrTree.searchXattr(fileID, name)

    def getFileByPath(self, path):
        return self.catalogTree.getRecordFromPath(path)

    def listFolderContents(self, path):
        k, v = self.catalogTree.getRecordFromPath(path)
        if not k or v.recordType != kHFSPlusFolderRecord:
            return
        for k, v in self.catalogTree.getFolderContents(v.data.folderID):
            if v.recordType == kHFSPlusFolderRecord:
                print v.data.folderID, getString(k) + "/"
            elif v.recordType == kHFSPlusFileRecord:
                print v.data.fileID, getString(k)

    def listXattrs(self, path):
        k, v = self.catalogTree.getRecordFromPath(path)
        if k and v.recordType == kHFSPlusFileRecord:
            return self.xattrTree.getAllXattrs(v.data.fileID)
        elif k and v.recordType == kHFSPlusFolderThreadRecord:
            return self.xattrTree.getAllXattrs(v.data.folderID)

    def readFile(self, path, returnString=False):
        k, v = self.catalogTree.getRecordFromPath(path)
        if not v:
            print "File %s not found" % path
            return
        assert v.recordType == kHFSPlusFileRecord
        xattr = self.getXattr(v.data.fileID, "com.apple.decmpfs")
        if xattr:
            decmpfs = HFSPlusDecmpfs.parse(xattr)

            if decmpfs.compression_type == 1:
                return xattr[16:]
            elif decmpfs.compression_type == 3:
                if decmpfs.uncompressed_size == len(xattr) - 16:
                    return xattr[16:]
                return zlib.decompress(xattr[16:])
            elif decmpfs.compression_type == 4:
                f = HFSCompressedResourceFork(self, v.data.resourceFork,
                                              v.data.fileID)
                return f.readAllBuffer()

        f = HFSFile(self, v.data.dataFork, v.data.fileID)
        if returnString:
            return f.readAllBuffer()
        else:
            f.readAll(os.path.basename(path))

    def readJournal(self):
        jb = self.read(self.header.journalInfoBlock * self.blockSize,
                       self.blockSize)
        jib = JournalInfoBlock.parse(jb)
        return self.read(jib.offset, jib.size)
示例#8
0
class HFSVolume(object):
    def __init__(self, filename, write=False, offset=0):
        flag = os.O_RDONLY if not write else os.O_RDWR
        if sys.platform == "win32":
            flag = flag | os.O_BINARY
        self.fd = os.open(filename, flag)
        self.offset = offset
        self.writeFlag = write

        try:
            data = self.read(0, 0x1000)
            self.header = HFSPlusVolumeHeader.parse(data[0x400:0x800])
            assert self.header.signature == 0x4858 or self.header.signature == 0x482B
        except:
            raise Exception("Not an HFS+ image")

        self.blockSize = self.header.blockSize

        if os.path.getsize(filename) < self.header.totalBlocks * self.blockSize:
            print "WARNING: image appears to be truncated"

        self.allocationFile = HFSFile(self, self.header.allocationFile, kHFSAllocationFileID)
        self.allocationBitmap = self.allocationFile.readAllBuffer()
        self.extentsFile = HFSFile(self, self.header.extentsFile, kHFSExtentsFileID)
        self.extentsTree = ExtentsOverflowTree(self.extentsFile)
        self.catalogFile = HFSFile(self, self.header.catalogFile, kHFSCatalogFileID)
        self.xattrFile = HFSFile(self, self.header.attributesFile, kHFSAttributesFileID)
        self.catalogTree = CatalogTree(self.catalogFile)
        self.xattrTree = AttributesTree(self.xattrFile)

        self.hasJournal = self.header.attributes & (1 << kHFSVolumeJournaledBit)

    def read(self, offset, size):
        os.lseek(self.fd, self.offset + offset, os.SEEK_SET)
        return os.read(self.fd, size)

    def write(self, offset, data):
        if self.writeFlag:  # fail silently for testing
            os.lseek(self.fd, self.offset + offset, os.SEEK_SET)
            return os.write(self.fd, data)

    def writeBlock(self, lba, block):
        return self.write(lba * self.blockSize, block)

    def volumeID(self):
        return struct.pack(">LL", self.header.finderInfo[6], self.header.finderInfo[7])

    def isBlockInUse(self, block):
        thisByte = ord(self.allocationBitmap[block / 8])
        return (thisByte & (1 << (7 - (block % 8)))) != 0

    def unallocatedBlocks(self):
        for i in xrange(self.header.totalBlocks):
            if not self.isBlockInUse(i):
                yield i, self.read(i * self.blockSize, self.blockSize)

    def getExtentsOverflowForFile(self, fileID, startBlock, forkType=kForkTypeData):
        return self.extentsTree.searchExtents(fileID, forkType, startBlock)

    def getXattr(self, fileID, name):
        return self.xattrTree.searchXattr(fileID, name)

    def getFileByPath(self, path):
        return self.catalogTree.getRecordFromPath(path)

    def listFolderContents(self, path):
        k, v = self.catalogTree.getRecordFromPath(path)
        if not k or v.recordType != kHFSPlusFolderRecord:
            return
        for k, v in self.catalogTree.getFolderContents(v.data.folderID):
            if v.recordType == kHFSPlusFolderRecord:
                print v.data.folderID, getString(k) + "/"
            elif v.recordType == kHFSPlusFileRecord:
                print v.data.fileID, getString(k)

    def listXattrs(self, path):
        k, v = self.catalogTree.getRecordFromPath(path)
        if k and v.recordType == kHFSPlusFileRecord:
            return self.xattrTree.getAllXattrs(v.data.fileID)
        elif k and v.recordType == kHFSPlusFolderThreadRecord:
            return self.xattrTree.getAllXattrs(v.data.folderID)

    def readFile(self, path, returnString=False):
        k, v = self.catalogTree.getRecordFromPath(path)
        if not v:
            print "File %s not found" % path
            return
        assert v.recordType == kHFSPlusFileRecord
        xattr = self.getXattr(v.data.fileID, "com.apple.decmpfs")
        if xattr:
            decmpfs = HFSPlusDecmpfs.parse(xattr)

            if decmpfs.compression_type == 1:
                return xattr[16:]
            elif decmpfs.compression_type == 3:
                if decmpfs.uncompressed_size == len(xattr) - 16:
                    return xattr[16:]
                return zlib.decompress(xattr[16:])
            elif decmpfs.compression_type == 4:
                f = HFSCompressedResourceFork(self, v.data.resourceFork, v.data.fileID)
                return f.readAllBuffer()

        f = HFSFile(self, v.data.dataFork, v.data.fileID)
        if returnString:
            return f.readAllBuffer()
        else:
            f.readAll(os.path.basename(path))

    def readJournal(self):
        jb = self.read(self.header.journalInfoBlock * self.blockSize, self.blockSize)
        jib = JournalInfoBlock.parse(jb)
        return self.read(jib.offset, jib.size)
class HFSVolume(object):
    def __init__(self, bdev):
        self.bdev = bdev

        self.bdev.seek(0x400)
        self.header = HFSPlusVolumeHeader.parse_stream(self.bdev)
        assert self.header.signature == 0x4858 or self.header.signature == 0x482B

        self.blockSize = self.header.blockSize

        #if os.path.getsize(filename) < self.header.totalBlocks * self.blockSize:
        #    print "WARNING: HFS image appears to be truncated"

        self.allocationFile = HFSFile(self, self.header.allocationFile, kHFSAllocationFileID)
        self.allocationBitmap = self.allocationFile.readAllBuffer()
        self.extentsFile = HFSFile(self, self.header.extentsFile, kHFSExtentsFileID)
        self.extentsTree = ExtentsOverflowTree(self.extentsFile)
        self.catalogFile = HFSFile(self, self.header.catalogFile, kHFSCatalogFileID)
        self.xattrFile = HFSFile(self, self.header.attributesFile, kHFSAttributesFileID)
        self.catalogTree = CatalogTree(self.catalogFile, self)
        self.xattrTree = AttributesTree(self.xattrFile)

        self.hasJournal = self.header.attributes & (1 << kHFSVolumeJournaledBit)

        k,v = self.catalogTree.search((kHFSRootFolderID, ""))
        self.volumename = getString(v.data)

    def readBlock(self, b):
        self.bdev.seek(b * self.blockSize)
        return self.bdev.read(self.blockSize)

    def writeBlock(self, lba, data):
        raise NotImplementedError
        # return self.bdev.writeBlock(lba, data)

    def volumeID(self):
        return struct.pack(">LL", self.header.finderInfo[6], self.header.finderInfo[7])

    def isBlockInUse(self, block):
        thisByte = ord(self.allocationBitmap[block / 8])
        return (thisByte & (1 << (7 - (block % 8)))) != 0

    # def unallocatedBlocks(self):
    #     for i in xrange(self.header.totalBlocks):
    #         if not self.isBlockInUse(i):
    #             yield i, self.read(i*self.blockSize, self.blockSize)

    def getExtentsOverflowForFile(self, fileID, startBlock, forkType=kForkTypeData):
        return self.extentsTree.searchExtents(fileID, forkType, startBlock)

    def getXattr(self, fileID, name):
        return self.xattrTree.searchXattr(fileID, name)

    def getFileByPath(self, path):
        return self.catalogTree.getRecordFromPath(path)

    def getFileIDByPath(self, path):
        key, record = self.catalogTree.getRecordFromPath(path)
        if not record:
            return
        if record.recordType == kHFSPlusFolderRecord:
            return record.data.folderID
        return record.data.fileID

    def listFolderContents(self, path):
        k,v = self.catalogTree.getRecordFromPath(path)
        if not k or v.recordType != kHFSPlusFolderRecord:
            return
        for k,v in self.catalogTree.getFolderContents(v.data.folderID):
            if v.recordType == kHFSPlusFolderRecord:
                #.HFS+ Private Directory Data\r
                print v.data.folderID, getString(k).replace("\r","") + "/"
            elif v.recordType == kHFSPlusFileRecord:
                print v.data.fileID, getString(k)

    def ls(self, path):
        k,v = self.catalogTree.getRecordFromPath(path)
        return self._ls(k, v)

    def _ls(self, k, v):
        res = {}

        if not k or v.recordType != kHFSPlusFolderRecord:
            return None
        for k,v in self.catalogTree.getFolderContents(v.data.folderID):
            if v.recordType == kHFSPlusFolderRecord:
                #.HFS+ Private Directory Data\r
                res[getString(k).replace("\r","") + "/"] =  v.data
            elif v.recordType == kHFSPlusFileRecord:
                if is_hardlink(v.data):
                    #print "hardlink iNode%d" % v.data.HFSPlusBSDInfo.special.iNodeNum
                    k2,v2 = self.catalogTree.getRecordFromPath("/\x00\x00\x00\x00HFS+ Private Data/iNode%d" % v.data.HFSPlusBSDInfo.special.iNodeNum)
                    res[getString(k)] = v2.data
                else:
                    res[getString(k)] = v.data
        return res

    def listXattrs(self, path):
        k,v = self.catalogTree.getRecordFromPath(path)
        if k and v.recordType == kHFSPlusFileRecord:
            return self.xattrTree.getAllXattrs(v.data.fileID)
        elif k and v.recordType == kHFSPlusFolderThreadRecord:
            return self.xattrTree.getAllXattrs(v.data.folderID)

    def readCompressedFile(self, record, xattr, output):
        decmpfs = HFSPlusDecmpfs.parse(xattr)
        data = None
        if decmpfs.compression_type == 1:
            output.write(xattr[16:])
        elif decmpfs.compression_type == 3:
            if decmpfs.uncompressed_size == len(xattr) - 16:
                output.write(xattr[16:])
            elif xattr[16] == "\xFF":
                output.write(xattr[17:])
            else:
                output.write(zlib.decompress(xattr[16:]))
        elif decmpfs.compression_type == 4:
            f = HFSCompressedResourceFork(self, record.data.resourceFork, record.data.fileID)
            f.readAllBuffer(output)

    def readFileByRecord(self, key, record, output):
        assert record.recordType == kHFSPlusFileRecord
        xattr = self.getXattr(record.data.fileID, "com.apple.decmpfs")
        if xattr:
            self.readCompressedFile(record, xattr, output)
        else:
            f = HFSFile(self, record.data.dataFork, record.data.fileID)
            f.readAll(output)
        return True


    def _readFile(self, path, output):
        k,v = self.catalogTree.getRecordFromPath(path)
        if not v:
            print "File %s not found" % path
            return
        assert v.recordType == kHFSPlusFileRecord
        return self.readFileByRecord(k, v, output)

    def readFile(self, path, outdir="./", returnString=False):
        if returnString:
            return self.readFileToString(path)
        outputfile = os.path.join(outdir,os.path.basename(path))
        f = open(outputfile, "wb")
        res = self._readFile(path, f)
        f.close()
        if not res:
            os.unlink(outputfile)
        return res

    def readFileToString(self, path):
        sio = cStringIO.StringIO()
        self._readFile(path, sio)
        return sio.getvalue()

    def readJournal(self):
        #jb = self.read(self.header.journalInfoBlock * self.blockSize, self.blockSize)
        #jib = JournalInfoBlock.parse(jb)
        #return self.read(jib.offset,jib.size)
        return self.readFile("/.journal", returnString=True)

    def listAllFileIds(self):
        self.fileids={}
        self.catalogTree.traverseLeafNodes(callback=self.grabFileId)
        return self.fileids

    def grabFileId(self, k,v):
        if v.recordType == kHFSPlusFileRecord:
            self.fileids[v.data.fileID] = True

    def getFileRecordForFileID(self, fileID):
        k,v = self.catalogTree.searchByCNID(fileID)
        return v

    def getFullPath(self, fileID):
        k,v = self.catalogTree.search((fileID, ""))
        if not k:
            print "File ID %d not found" % fileID
            return ""
        if fileID == kHFSRootFolderID:
            return "/"
        p = getString(v.data)
        while k:
            k,v = self.catalogTree.search((v.data.parentID, ""))
            if k.parentID == kHFSRootFolderID:
                break
            p = getString(v.data) + "/" + p

        return "/" + p

    def getFileRecordForPath(self, path):
        k,v = self.catalogTree.getRecordFromPath(path)
        if not k:
            return
        return v.data

    def getAllExtents(self, hfsplusfork, fileID):
        b = 0
        extents = []
        for extent in hfsplusfork.HFSPlusExtentDescriptor:
            extents.append(extent)
            b += extent.blockCount
        while b != hfsplusfork.totalBlocks:
            k,v = self.getExtentsOverflowForFile(fileID, b)
            if not v:
                print "extents overflow missing, startblock=%d" % b
                break
            for extent in v:
                extents.append(extent)
                b += extent.blockCount
        return extents

    def dohashFiles(self, k,v):
        if v.recordType == kHFSPlusFileRecord and not is_symlink(v.data):
            filename = getString(k)
            f = HFSFile(self, v.data.dataFork, v.data.fileID)
            print filename, hashlib.sha1(f.readAllBuffer()).hexdigest()

    def hashFiles(self):
        self.catalogTree.traverseLeafNodes(callback=self.dohashFiles)
示例#10
0
class HFSVolume(object):
    def __init__(self, pytsk_image, offset=0):
        self.img = pytsk_image
        self.offset = offset

        try:
            data = self.read(0, 0x1000)
            self.header = HFSPlusVolumeHeader.parse(data[0x400:0x800])
            assert self.header.signature == 0x4858 or self.header.signature == 0x482B
        except:
            raise Exception("Not an HFS+ image")
        #self.is_hfsx = self.header.signature == 0x4858
        self.blockSize = self.header.blockSize
        self.allocationFile = HFSFile(self, self.header.allocationFile,
                                      kHFSAllocationFileID)
        self.allocationBitmap = self.allocationFile.readAllBuffer()
        self.extentsFile = HFSFile(self, self.header.extentsFile,
                                   kHFSExtentsFileID)
        self.extentsTree = ExtentsOverflowTree(self.extentsFile)
        self.catalogFile = HFSFile(self, self.header.catalogFile,
                                   kHFSCatalogFileID)
        self.xattrFile = HFSFile(self, self.header.attributesFile,
                                 kHFSAttributesFileID)
        self.catalogTree = CatalogTree(self.catalogFile)
        self.xattrTree = AttributesTree(self.xattrFile)

        self.hasJournal = self.header.attributes & (
            1 << kHFSVolumeJournaledBit)

    def read(self, offset, size):
        #return self.read_correct(self.img, self.offset + offset, size)
        return self.img.read(self.offset + offset, size)

    def volumeID(self):
        return struct.pack(">LL", self.header.finderInfo[6],
                           self.header.finderInfo[7])

    def isBlockInUse(self, block):
        thisByte = ord(self.allocationBitmap[block / 8])
        return (thisByte & (1 << (7 - (block % 8)))) != 0

    def unallocatedBlocks(self):
        for i in xrange(self.header.totalBlocks):
            if not self.isBlockInUse(i):
                yield i, self.read(i * self.blockSize, self.blockSize)

    def getExtentsOverflowForFile(self,
                                  fileID,
                                  startBlock,
                                  forkType=kForkTypeData):
        return self.extentsTree.searchExtents(fileID, forkType, startBlock)

    def getXattr(self, fileID, name):
        return self.xattrTree.searchXattr(fileID, name)

    def getFileByPath(self, path):
        return self.catalogTree.getRecordFromPath(path)

    def getFinderDateAdded(self, path):
        k, v = self.catalogTree.getRecordFromPath(path)
        if k and v.recordType == kHFSPlusFileRecord:
            return v.data.ExtendedFileInfo.finderDateAdded
        elif k and v.recordType == kHFSPlusFolderRecord:
            return v.data.ExtendedFolderInfo.finderDateAdded
        return 0

    def listFolderContents(self, path):
        k, v = self.catalogTree.getRecordFromPath(path)
        if not k or v.recordType != kHFSPlusFolderRecord:
            return
        for k, v in self.catalogTree.getFolderContents(v.data.folderID):
            if v.recordType == kHFSPlusFolderRecord:
                print(v.data.folderID, getString(k) + "/")
            elif v.recordType == kHFSPlusFileRecord:
                print(v.data.fileID, getString(k))

    def listFinderData(self, path):
        '''Returns finder data'''
        finder_data = {}
        k, v = self.catalogTree.getRecordFromPath(path)
        date_added = 0
        if k and v.recordType == kHFSPlusFileRecord:
            date_added = v.data.ExtendedFileInfo.finderDateAdded
            if v.data.FileInfo.fileType:
                finder_data['fileType'] = v.data.FileInfo.fileType
            if v.data.FileInfo.fileCreator:
                finder_data['fileCreator'] = v.data.FileInfo.fileCreator
            if v.data.FileInfo.finderFlags:
                finder_data['finderFlags'] = v.data.FileInfo.finderFlags
            if v.data.ExtendedFileInfo.extendedFinderFlags:
                finder_data[
                    'extendedFinderFlags'] = v.data.ExtendedFileInfo.extendedFinderFlags
        elif k and v.recordType == kHFSPlusFolderRecord:
            date_added = v.data.ExtendedFolderInfo.finderDateAdded
            if v.data.FolderInfo.finderFlags:
                finder_data['FinderFlags'] = v.data.FolderInfo.finderFlags
            if v.data.ExtendedFolderInfo.extendedFinderFlags:
                finder_data[
                    'extendedFinderFlags'] = v.data.ExtendedFolderInfo.extendedFinderFlags
        if date_added: finder_data['DateAdded'] = date_added

        return finder_data

    def listXattrs(self, path):
        k, v = self.catalogTree.getRecordFromPath(path)
        if k and v.recordType == kHFSPlusFileRecord:
            return self.xattrTree.getAllXattrs(v.data.fileID)
        elif k and v.recordType == kHFSPlusFolderThreadRecord:
            return self.xattrTree.getAllXattrs(v.data.folderID)

    '''	Compression type in Xattr as per apple:
        Source: https://opensource.apple.com/source/copyfile/copyfile-138/copyfile.c.auto.html
        case 3:  /* zlib-compressed data in xattr */
        case 4:  /* 64k chunked zlib-compressed data in resource fork */
        case 7:  /* LZVN-compressed data in xattr */
        case 8:  /* 64k chunked LZVN-compressed data in resource fork */
        case 9:  /* uncompressed data in xattr (similar to but not identical to CMP_Type1) */
        case 10: /* 64k chunked uncompressed data in resource fork */
        case 11: /* LZFSE-compressed data in xattr */
        case 12: /* 64k chunked LZFSE-compressed data in resource fork */
            /* valid compression type, we want to copy. */
            break;
        case 5: /* specifies de-dup within the generation store. Don't copy decmpfs xattr. */
            copyfile_debug(3, "compression_type <5> on attribute com.apple.decmpfs for src file %s is not copied.",
                    s->src ? s->src : "(null string)");
            continue;
        case 6: /* unused */
    '''

    def readFile(self, path, returnString=False):
        '''Reads file specified by 'path' and copies it out or returns as string'''
        k, v = self.catalogTree.getRecordFromPath(path)
        if not v:
            log.error("File {} not found".format(path))
            return None
        assert v.recordType == kHFSPlusFileRecord
        xattr = self.getXattr(v.data.fileID, "com.apple.decmpfs")
        if xattr:
            decmpfs = HFSPlusDecmpfs.parse(xattr)
            log.debug("decmpfs.compression_type={}".format(
                str(decmpfs.compression_type)))
            if decmpfs.compression_type == 1:
                data = xattr[16:]
                return data if returnString else self.writeout_file(
                    os.path.basename(path), data)
            elif decmpfs.compression_type == 3:
                data = None
                if decmpfs.uncompressed_size == len(xattr) - 16:
                    data = xattr[16:]
                else:
                    data = zlib.decompress(xattr[16:])
                return data if returnString else self.writeout_file(
                    os.path.basename(path), data)
            elif decmpfs.compression_type == 4:
                f = HFSCompressedResourceFork(self, v.data.resourceFork,
                                              v.data.fileID,
                                              decmpfs.compression_type,
                                              decmpfs.uncompressed_size)
                data = f.readAllBuffer()
                return data if returnString else self.writeout_file(
                    os.path.basename(path), data)
            elif decmpfs.compression_type in [7, 11]:
                data = xattr[16:]
                if xattr[16] == b'\x06':  # perhaps even 0xF?
                    data = xattr[17:]  #tested OK
                else:  #tested OK
                    uncompressed_size = struct.unpack('<I', xattr[8:12])[0]
                    compressed_size = len(xattr) - 16
                    compressed_stream = xattr[16:]
                    data = lzvn_decompress(compressed_stream, compressed_size,
                                           uncompressed_size)
                return data if returnString else self.writeout_file(
                    os.path.basename(path), data)
            elif decmpfs.compression_type in [8, 12]:
                # tested for type 8 , OK
                f = HFSCompressedResourceFork(self, v.data.resourceFork,
                                              v.data.fileID,
                                              decmpfs.compression_type,
                                              decmpfs.uncompressed_size)
                data = f.readAllBuffer()  # inefficient?
                return data if returnString else self.writeout_file(
                    os.path.basename(path), data)

        f = HFSFile(self, v.data.dataFork, v.data.fileID)
        if returnString:
            return f.readAllBuffer()
        else:
            f.copyOutFile(os.path.basename(path))

    def writeout_file(self, outputfile, data):
        with open(outputfile, "wb") as f:
            f.write(data)

    def readJournal(self):
        jb = self.read(self.header.journalInfoBlock * self.blockSize,
                       self.blockSize)
        jib = JournalInfoBlock.parse(jb)
        return self.read(jib.offset, jib.size)
class HFSVolume(object):
    def __init__(self, bdev):
        self.bdev = bdev

        try:
            data = self.bdev.readBlock(0)
            self.header = HFSPlusVolumeHeader.parse(data[0x400:0x800])
            assert self.header.signature == 0x4858 or self.header.signature == 0x482B
        except:
            raise
            #raise Exception("Not an HFS+ image")

        self.blockSize = self.header.blockSize
        self.bdev.setBlockSize(self.blockSize)

        #if os.path.getsize(filename) < self.header.totalBlocks * self.blockSize:
        #    print "WARNING: HFS image appears to be truncated"

        self.allocationFile = HFSFile(self, self.header.allocationFile, kHFSAllocationFileID)
        self.allocationBitmap = self.allocationFile.readAllBuffer()
        self.extentsFile = HFSFile(self, self.header.extentsFile, kHFSExtentsFileID)
        self.extentsTree = ExtentsOverflowTree(self.extentsFile)
        self.catalogFile = HFSFile(self, self.header.catalogFile, kHFSCatalogFileID)
        self.xattrFile = HFSFile(self, self.header.attributesFile, kHFSAttributesFileID)
        self.catalogTree = CatalogTree(self.catalogFile, self)
        self.xattrTree = AttributesTree(self.xattrFile)

        self.hasJournal = self.header.attributes & (1 << kHFSVolumeJournaledBit)

    def readBlock(self, b):
        return self.bdev.readBlock(b)

    def writeBlock(self, lba, data):
        return self.bdev.writeBlock(lba, data)

    def volumeID(self):
        return struct.pack(">LL", self.header.finderInfo[6], self.header.finderInfo[7])

    def isBlockInUse(self, block):
        thisByte = ord(self.allocationBitmap[block / 8])
        return (thisByte & (1 << (7 - (block % 8)))) != 0

    def unallocatedBlocks(self):
        for i in xrange(self.header.totalBlocks):
            if not self.isBlockInUse(i):
                yield i, self.read(i*self.blockSize, self.blockSize)

    def getExtentsOverflowForFile(self, fileID, startBlock, forkType=kForkTypeData):
        return self.extentsTree.searchExtents(fileID, forkType, startBlock)

    def getXattr(self, fileID, name):
        return self.xattrTree.searchXattr(fileID, name)

    def getFileByPath(self, path):
        return self.catalogTree.getRecordFromPath(path)

    def getFileIDByPath(self, path):
        key, record = self.catalogTree.getRecordFromPath(path)
        if not record:
            return
        if record.recordType == kHFSPlusFolderRecord:
            return record.data.folderID
        return record.data.fileID
    
    def listFolderContents(self, path):
        k,v = self.catalogTree.getRecordFromPath(path)
        if not k or v.recordType != kHFSPlusFolderRecord:
            return
        for k,v in self.catalogTree.getFolderContents(v.data.folderID):
            if v.recordType == kHFSPlusFolderRecord:
                #.HFS+ Private Directory Data\r
                print v.data.folderID, getString(k).replace("\r","") + "/"
            elif v.recordType == kHFSPlusFileRecord:
                print v.data.fileID, getString(k)

    def ls(self, path):
        k,v = self.catalogTree.getRecordFromPath(path)
        return self._ls(k, v)
    
    def _ls(self, k, v):
        res = {}
        
        if not k or v.recordType != kHFSPlusFolderRecord:
            return None
        for k,v in self.catalogTree.getFolderContents(v.data.folderID):
            if v.recordType == kHFSPlusFolderRecord:
                #.HFS+ Private Directory Data\r
                res[getString(k).replace("\r","") + "/"] =  v.data 
            elif v.recordType == kHFSPlusFileRecord:
                res[getString(k)] = v.data
        return res
    
    def listXattrs(self, path):
        k,v = self.catalogTree.getRecordFromPath(path)
        if k and v.recordType == kHFSPlusFileRecord:
            return self.xattrTree.getAllXattrs(v.data.fileID)
        elif k and v.recordType == kHFSPlusFolderThreadRecord:
            return self.xattrTree.getAllXattrs(v.data.folderID)

    def readFileByRecord(self, record):
        assert record.recordType == kHFSPlusFileRecord
        xattr = self.getXattr(record.data.fileID, "com.apple.decmpfs")
        data = None
        if xattr:
            decmpfs = HFSPlusDecmpfs.parse(xattr)
            if decmpfs.compression_type == 1:
                return xattr[16:]
            elif decmpfs.compression_type == 3:
                if decmpfs.uncompressed_size == len(xattr) - 16:
                    return xattr[16:]
                return zlib.decompress(xattr[16:])
            elif decmpfs.compression_type == 4:
                f = HFSCompressedResourceFork(self, record.data.resourceFork, record.data.fileID)
                data = f.readAllBuffer()
            return data

        f = HFSFile(self, record.data.dataFork, record.data.fileID)
        return f.readAllBuffer()

    #TODO: returnString compress
    def readFile(self, path, outFolder="./", returnString=False):
        k,v = self.catalogTree.getRecordFromPath(path)
        if not v:
            print "File %s not found" % path
            return
        assert v.recordType == kHFSPlusFileRecord
        xattr = self.getXattr(v.data.fileID, "com.apple.decmpfs")
        if xattr:
            decmpfs = HFSPlusDecmpfs.parse(xattr)

            if decmpfs.compression_type == 1:
                return xattr[16:]
            elif decmpfs.compression_type == 3:
                if decmpfs.uncompressed_size == len(xattr) - 16:
                    z = xattr[16:]
                else:
                    z = zlib.decompress(xattr[16:])
                open(outFolder + os.path.basename(path), "wb").write(z)
                return
            elif decmpfs.compression_type == 4:
                f = HFSCompressedResourceFork(self, v.data.resourceFork, v.data.fileID)
                z = f.readAllBuffer()
                open(outFolder + os.path.basename(path), "wb").write(z)
                return z

        f = HFSFile(self, v.data.dataFork, v.data.fileID)
        if returnString:
            return f.readAllBuffer()
        else:
            f.readAll(outFolder + os.path.basename(path))

    def readJournal(self):
        #jb = self.read(self.header.journalInfoBlock * self.blockSize, self.blockSize)
        #jib = JournalInfoBlock.parse(jb)
        #return self.read(jib.offset,jib.size)
        return self.readFile("/.journal", returnString=True)

    def listAllFileIds(self):
        self.fileids={}
        self.catalogTree.traverseLeafNodes(callback=self.grabFileId)
        return self.fileids
    
    def grabFileId(self, k,v):
        if v.recordType == kHFSPlusFileRecord:
            self.fileids[v.data.fileID] = True

    def getFileRecordForFileID(self, fileID):
        k,v = self.catalogTree.searchByCNID(fileID)
        return v
    
    def getFullPath(self, fileID):
        k,v = self.catalogTree.search((fileID, ""))
        if not k:
            print "File ID %d not found" % fileID
            return ""
        p = getString(v.data)
        while k:
            k,v = self.catalogTree.search((v.data.parentID, ""))
            if k.parentID == kHFSRootFolderID:
                break
            p = getString(v.data) + "/" + p
            
        return "/" + p
    
    def getFileRecordForPath(self, path):
        k,v = self.catalogTree.getRecordFromPath(path)
        if not k:
            return
        return v.data

    def getAllExtents(self, hfsplusfork, fileID):
        b = 0
        extents = []
        for extent in hfsplusfork.HFSPlusExtentDescriptor:
            extents.append(extent)
            b += extent.blockCount
        while b != hfsplusfork.totalBlocks:
            k,v = self.getExtentsOverflowForFile(fileID, b)
            if not v:
                print "extents overflow missing, startblock=%d" % b
                break
            for extent in v:
                extents.append(extent)
                b += extent.blockCount
        return extents

    def dohashFiles(self, k,v):
        if v.recordType == kHFSPlusFileRecord:
            filename = getString(k)
            f = HFSFile(self, v.data.dataFork, v.data.fileID)
            print filename, hashlib.sha1(f.readAllBuffer()).hexdigest()
            
    def hashFiles(self):
        self.catalogTree.traverseLeafNodes(callback=self.dohashFiles)
示例#12
0
class HFSVolume(object):
    def __init__(self, bdev):
        self.bdev = bdev

        try:
            data = self.bdev.readBlock(0)
            self.header = HFSPlusVolumeHeader.parse(data[0x400:0x800])
            assert self.header.signature == 0x4858 or self.header.signature == 0x482B
        except:
            raise
            #raise Exception("Not an HFS+ image")

        self.blockSize = self.header.blockSize
        self.bdev.setBlockSize(self.blockSize)

        #if os.path.getsize(filename) < self.header.totalBlocks * self.blockSize:
        #    print "WARNING: HFS image appears to be truncated"

        self.allocationFile = HFSFile(self, self.header.allocationFile,
                                      kHFSAllocationFileID)
        self.allocationBitmap = self.allocationFile.readAllBuffer()
        self.extentsFile = HFSFile(self, self.header.extentsFile,
                                   kHFSExtentsFileID)
        self.extentsTree = ExtentsOverflowTree(self.extentsFile)
        self.catalogFile = HFSFile(self, self.header.catalogFile,
                                   kHFSCatalogFileID)
        self.xattrFile = HFSFile(self, self.header.attributesFile,
                                 kHFSAttributesFileID)
        self.catalogTree = CatalogTree(self.catalogFile, self)
        self.xattrTree = AttributesTree(self.xattrFile)

        self.hasJournal = self.header.attributes & (
            1 << kHFSVolumeJournaledBit)

    def readBlock(self, b):
        return self.bdev.readBlock(b)

    def writeBlock(self, lba, data):
        return self.bdev.writeBlock(lba, data)

    def volumeID(self):
        return struct.pack(">LL", self.header.finderInfo[6],
                           self.header.finderInfo[7])

    def isBlockInUse(self, block):
        thisByte = ord(self.allocationBitmap[block / 8])
        return (thisByte & (1 << (7 - (block % 8)))) != 0

    def unallocatedBlocks(self):
        for i in xrange(self.header.totalBlocks):
            if not self.isBlockInUse(i):
                yield i, self.read(i * self.blockSize, self.blockSize)

    def getExtentsOverflowForFile(self,
                                  fileID,
                                  startBlock,
                                  forkType=kForkTypeData):
        return self.extentsTree.searchExtents(fileID, forkType, startBlock)

    def getXattr(self, fileID, name):
        return self.xattrTree.searchXattr(fileID, name)

    def getFileByPath(self, path):
        return self.catalogTree.getRecordFromPath(path)

    def getFileIDByPath(self, path):
        key, record = self.catalogTree.getRecordFromPath(path)
        if not record:
            return
        if record.recordType == kHFSPlusFolderRecord:
            return record.data.folderID
        return record.data.fileID

    def listFolderContents(self, path):
        k, v = self.catalogTree.getRecordFromPath(path)
        if not k or v.recordType != kHFSPlusFolderRecord:
            return
        for k, v in self.catalogTree.getFolderContents(v.data.folderID):
            if v.recordType == kHFSPlusFolderRecord:
                #.HFS+ Private Directory Data\r
                print v.data.folderID, getString(k).replace("\r", "") + "/"
            elif v.recordType == kHFSPlusFileRecord:
                print v.data.fileID, getString(k)

    def ls(self, path):
        k, v = self.catalogTree.getRecordFromPath(path)
        return self._ls(k, v)

    def _ls(self, k, v):
        res = {}

        if not k or v.recordType != kHFSPlusFolderRecord:
            return None
        for k, v in self.catalogTree.getFolderContents(v.data.folderID):
            if v.recordType == kHFSPlusFolderRecord:
                #.HFS+ Private Directory Data\r
                res[getString(k).replace("\r", "") + "/"] = v.data
            elif v.recordType == kHFSPlusFileRecord:
                res[getString(k)] = v.data
        return res

    def listXattrs(self, path):
        k, v = self.catalogTree.getRecordFromPath(path)
        if k and v.recordType == kHFSPlusFileRecord:
            return self.xattrTree.getAllXattrs(v.data.fileID)
        elif k and v.recordType == kHFSPlusFolderThreadRecord:
            return self.xattrTree.getAllXattrs(v.data.folderID)

    def readFileByRecord(self, record):
        assert record.recordType == kHFSPlusFileRecord
        xattr = self.getXattr(record.data.fileID, "com.apple.decmpfs")
        data = None
        if xattr:
            decmpfs = HFSPlusDecmpfs.parse(xattr)
            if decmpfs.compression_type == 1:
                return xattr[16:]
            elif decmpfs.compression_type == 3:
                if decmpfs.uncompressed_size == len(xattr) - 16:
                    return xattr[16:]
                return zlib.decompress(xattr[16:])
            elif decmpfs.compression_type == 4:
                f = HFSCompressedResourceFork(self, record.data.resourceFork,
                                              record.data.fileID)
                data = f.readAllBuffer()
            return data

        f = HFSFile(self, record.data.dataFork, record.data.fileID)
        return f.readAllBuffer()

    #TODO: returnString compress
    def readFile(self, path, outFolder="./", returnString=False):
        k, v = self.catalogTree.getRecordFromPath(path)
        if not v:
            print "File %s not found" % path
            return
        assert v.recordType == kHFSPlusFileRecord
        xattr = self.getXattr(v.data.fileID, "com.apple.decmpfs")
        if xattr:
            decmpfs = HFSPlusDecmpfs.parse(xattr)

            if decmpfs.compression_type == 1:
                return xattr[16:]
            elif decmpfs.compression_type == 3:
                if decmpfs.uncompressed_size == len(xattr) - 16:
                    z = xattr[16:]
                else:
                    z = zlib.decompress(xattr[16:])
                open(outFolder + os.path.basename(path), "wb").write(z)
                return
            elif decmpfs.compression_type == 4:
                f = HFSCompressedResourceFork(self, v.data.resourceFork,
                                              v.data.fileID)
                z = f.readAllBuffer()
                open(outFolder + os.path.basename(path), "wb").write(z)
                return z

        f = HFSFile(self, v.data.dataFork, v.data.fileID)
        if returnString:
            return f.readAllBuffer()
        else:
            f.readAll(outFolder + os.path.basename(path))

    def readJournal(self):
        #jb = self.read(self.header.journalInfoBlock * self.blockSize, self.blockSize)
        #jib = JournalInfoBlock.parse(jb)
        #return self.read(jib.offset,jib.size)
        return self.readFile("/.journal", returnString=True)

    def listAllFileIds(self):
        self.fileids = {}
        self.catalogTree.traverseLeafNodes(callback=self.grabFileId)
        return self.fileids

    def grabFileId(self, k, v):
        if v.recordType == kHFSPlusFileRecord:
            self.fileids[v.data.fileID] = True

    def getFileRecordForFileID(self, fileID):
        k, v = self.catalogTree.searchByCNID(fileID)
        return v

    def getFullPath(self, fileID):
        k, v = self.catalogTree.search((fileID, ""))
        if not k:
            print "File ID %d not found" % fileID
            return ""
        p = getString(v.data)
        while k:
            k, v = self.catalogTree.search((v.data.parentID, ""))
            if k.parentID == kHFSRootFolderID:
                break
            p = getString(v.data) + "/" + p

        return "/" + p

    def getFileRecordForPath(self, path):
        k, v = self.catalogTree.getRecordFromPath(path)
        if not k:
            return
        return v.data

    def getAllExtents(self, hfsplusfork, fileID):
        b = 0
        extents = []
        for extent in hfsplusfork.HFSPlusExtentDescriptor:
            extents.append(extent)
            b += extent.blockCount
        while b != hfsplusfork.totalBlocks:
            k, v = self.getExtentsOverflowForFile(fileID, b)
            if not v:
                print "extents overflow missing, startblock=%d" % b
                break
            for extent in v:
                extents.append(extent)
                b += extent.blockCount
        return extents

    def dohashFiles(self, k, v):
        if v.recordType == kHFSPlusFileRecord:
            filename = getString(k)
            f = HFSFile(self, v.data.dataFork, v.data.fileID)
            print filename, hashlib.sha1(f.readAllBuffer()).hexdigest()

    def hashFiles(self):
        self.catalogTree.traverseLeafNodes(callback=self.dohashFiles)
示例#13
0
class HFSVolume(object):
    def __init__(self, pytsk_image, offset=0):
        self.img = pytsk_image
        self.offset = offset

        try:
            data = self.read(0, 0x1000)
            self.header = HFSPlusVolumeHeader.parse(data[0x400:0x800])
            assert self.header.signature == 0x4858 or self.header.signature == 0x482B
        except:
            raise Exception("Not an HFS+ image")
        #self.is_hfsx = self.header.signature == 0x4858
        self.blockSize = self.header.blockSize
        self.allocationFile = HFSFile(self, self.header.allocationFile,
                                      kHFSAllocationFileID)
        self.allocationBitmap = self.allocationFile.readAllBuffer()
        self.extentsFile = HFSFile(self, self.header.extentsFile,
                                   kHFSExtentsFileID)
        self.extentsTree = ExtentsOverflowTree(self.extentsFile)
        self.catalogFile = HFSFile(self, self.header.catalogFile,
                                   kHFSCatalogFileID)
        self.xattrFile = HFSFile(self, self.header.attributesFile,
                                 kHFSAttributesFileID)
        self.catalogTree = CatalogTree(self.catalogFile)
        self.xattrTree = AttributesTree(self.xattrFile)

        self.hasJournal = self.header.attributes & (
            1 << kHFSVolumeJournaledBit)

    def read(self, offset, size):
        return self.img.read(self.offset + offset, size)

    def volumeID(self):
        return struct.pack(">LL", self.header.finderInfo[6],
                           self.header.finderInfo[7])

    def isBlockInUse(self, block):
        thisByte = ord(self.allocationBitmap[block / 8])
        return (thisByte & (1 << (7 - (block % 8)))) != 0

    def unallocatedBlocks(self):
        for i in xrange(self.header.totalBlocks):
            if not self.isBlockInUse(i):
                yield i, self.read(i * self.blockSize, self.blockSize)

    def getExtentsOverflowForFile(self,
                                  fileID,
                                  startBlock,
                                  forkType=kForkTypeData):
        return self.extentsTree.searchExtents(fileID, forkType, startBlock)

    def getXattr(self, fileID, name):
        return self.xattrTree.searchXattr(fileID, name)

    def getFileByPath(self, path):
        return self.catalogTree.getRecordFromPath(path)

    def getFinderDateAdded(self, path):
        k, v = self.catalogTree.getRecordFromPath(path)
        if k and v.recordType == kHFSPlusFileRecord:
            return v.data.ExtendedFileInfo.finderDateAdded
        elif k and v.recordType == kHFSPlusFolderRecord:
            return v.data.ExtendedFolderInfo.finderDateAdded
        return 0

    def listFolderContents(self, path):
        k, v = self.catalogTree.getRecordFromPath(path)
        if not k or v.recordType != kHFSPlusFolderRecord:
            return
        for k, v in self.catalogTree.getFolderContents(v.data.folderID):
            if v.recordType == kHFSPlusFolderRecord:
                print(v.data.folderID, getString(k) + "/")
            elif v.recordType == kHFSPlusFileRecord:
                print(v.data.fileID, getString(k))

    def listFinderData(self, path):
        '''Returns finder data'''
        finder_data = {}
        k, v = self.catalogTree.getRecordFromPath(path)
        date_added = 0
        if k and v.recordType == kHFSPlusFileRecord:
            date_added = v.data.ExtendedFileInfo.finderDateAdded
            if v.data.FileInfo.fileType:
                finder_data['fileType'] = v.data.FileInfo.fileType
            if v.data.FileInfo.fileCreator:
                finder_data['fileCreator'] = v.data.FileInfo.fileCreator
            if v.data.FileInfo.finderFlags:
                finder_data['finderFlags'] = v.data.FileInfo.finderFlags
            if v.data.ExtendedFileInfo.extendedFinderFlags:
                finder_data[
                    'extendedFinderFlags'] = v.data.ExtendedFileInfo.extendedFinderFlags
        elif k and v.recordType == kHFSPlusFolderRecord:
            date_added = v.data.ExtendedFolderInfo.finderDateAdded
            if v.data.FolderInfo.finderFlags:
                finder_data['FinderFlags'] = v.data.FolderInfo.finderFlags
            if v.data.ExtendedFolderInfo.extendedFinderFlags:
                finder_data[
                    'extendedFinderFlags'] = v.data.ExtendedFolderInfo.extendedFinderFlags
        if date_added: finder_data['DateAdded'] = date_added

        return finder_data

    def listXattrs(self, path):
        k, v = self.catalogTree.getRecordFromPath(path)
        if k and v.recordType == kHFSPlusFileRecord:
            return self.xattrTree.getAllXattrs(v.data.fileID)
        elif k and v.recordType == kHFSPlusFolderThreadRecord:
            return self.xattrTree.getAllXattrs(v.data.folderID)

    '''	Compression type in Xattr as per apple:
        Source: https://opensource.apple.com/source/copyfile/copyfile-138/copyfile.c.auto.html
        case 3:  /* zlib-compressed data in xattr */
        case 4:  /* 64k chunked zlib-compressed data in resource fork */
        case 7:  /* LZVN-compressed data in xattr */
        case 8:  /* 64k chunked LZVN-compressed data in resource fork */
        case 9:  /* uncompressed data in xattr (similar to but not identical to CMP_Type1) */
        case 10: /* 64k chunked uncompressed data in resource fork */
        case 11: /* LZFSE-compressed data in xattr */
        case 12: /* 64k chunked LZFSE-compressed data in resource fork */
            /* valid compression type, we want to copy. */
            break;
        case 5: /* specifies de-dup within the generation store. Don't copy decmpfs xattr. */
            copyfile_debug(3, "compression_type <5> on attribute com.apple.decmpfs for src file %s is not copied.",
                    s->src ? s->src : "(null string)");
            continue;
        case 6: /* unused */
    '''

    def readFile(self, path, output_file=None):
        '''Reads file specified by 'path' and copies it out into output_file if valid, else returns as string'''
        k, v = self.catalogTree.getRecordFromPath(path)
        if not v:
            raise ValueError("File not found")
        data = b''
        assert v.recordType == kHFSPlusFileRecord
        xattr = self.getXattr(v.data.fileID, "com.apple.decmpfs")
        if xattr:
            decmpfs = HFSPlusDecmpfs.parse(xattr)
            log.debug("decmpfs.compression_type={}".format(
                str(decmpfs.compression_type)))
            if decmpfs.compression_type == 1:
                data = xattr[16:]
                if output_file: output_file.write(data)
            elif decmpfs.compression_type == 3:
                if decmpfs.uncompressed_size == len(xattr) - 16:
                    data = xattr[16:]
                else:
                    data = zlib.decompress(xattr[16:])
                if output_file: output_file.write(data)
            elif decmpfs.compression_type == 4:
                f = HFSCompressedResourceFork(self, v.data.resourceFork,
                                              v.data.fileID,
                                              decmpfs.compression_type,
                                              decmpfs.uncompressed_size)
                data = f.readAllBuffer(True, output_file)
            elif decmpfs.compression_type in [7, 11]:
                if xattr[16] == b'\x06':  # perhaps even 0xF?
                    data = xattr[17:]  #tested OK
                else:  #tested OK
                    uncompressed_size = struct.unpack('<I', xattr[8:12])[0]
                    compressed_size = len(xattr) - 16
                    compressed_stream = xattr[16:]
                    data = lzvn_decompress(compressed_stream, compressed_size,
                                           uncompressed_size)
                if output_file: output_file.write(data)
            elif decmpfs.compression_type in [8, 12]:
                # tested for type 8 , OK
                f = HFSCompressedResourceFork(self, v.data.resourceFork,
                                              v.data.fileID,
                                              decmpfs.compression_type,
                                              decmpfs.uncompressed_size)
                data = f.readAllBuffer(True, output_file)
                if output_file: output_file.write(data)
        else:
            f = HFSFile(self, v.data.dataFork, v.data.fileID)
            data = f.readAllBuffer(True, output_file)
        return data

    def readJournal(self):
        jb = self.read(self.header.journalInfoBlock * self.blockSize,
                       self.blockSize)
        jib = JournalInfoBlock.parse(jb)
        return self.read(jib.offset, jib.size)

    def GetFileMACTimesFromFileRecord(self, v):
        times = {
            'c_time': None,
            'm_time': None,
            'cr_time': None,
            'a_time': None
        }
        catalog_file = v.data
        times['c_time'] = CommonFunctions.ReadMacHFSTime(
            catalog_file.attributeModDate)
        times['m_time'] = CommonFunctions.ReadMacHFSTime(
            catalog_file.contentModDate)
        times['cr_time'] = CommonFunctions.ReadMacHFSTime(
            catalog_file.createDate)
        times['a_time'] = CommonFunctions.ReadMacHFSTime(
            catalog_file.accessDate)
        return times

    def GetFileMACTimes(self, file_path):
        '''
           Returns dictionary {c_time, m_time, cr_time, a_time} 
           where cr_time = created time and c_time = Last time inode/mft modified
        '''
        k, v = self.catalogTree.getRecordFromPath(file_path)
        if k and v.recordType in (kHFSPlusFileRecord, kHFSPlusFolderRecord):
            return self.GetFileMACTimesFromFileRecord(v)
        raise Exception("Path not found or not file/folder!")

    def IsValidFilePath(self, path):
        '''Check if a file path is valid, does not check for folders!'''
        k, v = self.catalogTree.getRecordFromPath(path)
        if not v:
            return False
        return v.recordType == kHFSPlusFileRecord  #TODO: Check for hard links , sym links?

    def IsValidFolderPath(self, path):
        '''Check if a folder path is valid'''
        k, v = self.catalogTree.getRecordFromPath(path)
        if not v:
            return False
        return v.recordType == kHFSPlusFolderRecord  #TODO: Check for hard links , sym links?

    def GetFileSizeFromFileRecord(self, v):
        xattr = self.getXattr(v.data.fileID, "com.apple.decmpfs")
        if xattr:
            decmpfs = HFSPlusDecmpfs.parse(xattr)
            return decmpfs.uncompressed_size  #TODO verify for all cases!
        else:
            return v.data.dataFork.logicalSize

    def GetFileSize(self, path):
        '''For a given file path, gets logical file size'''
        k, v = self.catalogTree.getRecordFromPath(path)
        if k and v.recordType == kHFSPlusFileRecord:
            return self.GetFileSizeFromFileRecord(v)
        else:
            raise Exception("Path not found")

    def GetUserAndGroupID(self, path):
        k, v = self.catalogTree.getRecordFromPath(path)
        if k and v.recordType in (kHFSPlusFileRecord, kHFSPlusFolderRecord):
            return (v.data.HFSPlusBSDInfo.ownerID,
                    v.data.HFSPlusBSDInfo.groupID)
        else:
            raise Exception("Path not found")