def __init__(self, volume, cluster_size=None): oem_id = volume[3:7] assert oem_id == 'NTFS', 'Wrong OEM signature' super(NTFSFilesystem, self).__init__() self._volume = volume self._cluster_size = cluster_size vbr = self._vbr = NTFSVBR(volume) self._cluster_size = cluster_size = (cluster_size or vbr.bytes_per_sector() * vbr.sectors_per_cluster()) self._clusters = ClusterAccessor(volume, cluster_size) self._logger = logging.getLogger("NTFSFilesystem") # balance memory usage with performance try: b = self.get_mft_buffer() # test we can access last MFT byte, demonstrating we can # reach all runs _ = b[-1] except OverrunBufferException as e: g_logger.warning( "failed to read MFT from image, will fall back to MFTMirr: %s", e) try: b = self.get_mftmirr_buffer() # test we can access last MFTMirr byte, demonstrating # we can reach all runs _ = b[-1] except OverrunBufferException as e: g_logger.error("failed to read MFTMirr from image: %s", e) raise CorruptNTFSFilesystemError( "failed to read MFT or MFTMirr from image") if len(b) > 1024 * 1024 * 500: self._mft_data = b else: # note optimization: copy entire mft buffer from NonResidentNTFSAttribute # to avoid getslice lookups self._mft_data = b[:] self._enumerator = MFTEnumerator(self._mft_data) # test there's at least some user content (aside from root), or we'll # assume something's up try: _ = self.get_record(INODE_FIRST_USER) except OverrunBufferException: g_logger.error("overrun reading first user MFT record") raise CorruptNTFSFilesystemError( "failed to read first user record (MFT not large enough)")
def __init__(self, volume, cluster_size=None): oem_id = volume[3:7] assert oem_id == 'NTFS', 'Wrong OEM signature' super(NTFSFilesystem, self).__init__() self._volume = volume self._cluster_size = cluster_size vbr = self._vbr = NTFSVBR(volume) self._cluster_size = cluster_size = (cluster_size or vbr.bytes_per_sector() * vbr.sectors_per_cluster()) self._clusters = ClusterAccessor(volume, cluster_size) self._logger = logging.getLogger("NTFSFilesystem") # balance memory usage with performance try: b = self.get_mft_buffer() # test we can access last MFT byte, demonstrating we can # reach all runs _ = b[-1] except OverrunBufferException as e: g_logger.warning("failed to read MFT from image, will fall back to MFTMirr: %s", e) try: b = self.get_mftmirr_buffer() # test we can access last MFTMirr byte, demonstrating # we can reach all runs _ = b[-1] except OverrunBufferException as e: g_logger.error("failed to read MFTMirr from image: %s", e) raise CorruptNTFSFilesystemError("failed to read MFT or MFTMirr from image") if len(b) > 1024 * 1024 * 500: self._mft_data = b else: # note optimization: copy entire mft buffer from NonResidentNTFSAttribute # to avoid getslice lookups self._mft_data = b[:] self._enumerator = MFTEnumerator(self._mft_data) # test there's at least some user content (aside from root), or we'll # assume something's up try: _ = self.get_record(INODE_FIRST_USER) except OverrunBufferException: g_logger.error("overrun reading first user MFT record") raise CorruptNTFSFilesystemError("failed to read first user record (MFT not large enough)")
class NTFSFilesystem(object): def __init__(self, volume, cluster_size=None): oem_id = volume[3:7] assert oem_id == 'NTFS', 'Wrong OEM signature' super(NTFSFilesystem, self).__init__() self._volume = volume self._cluster_size = cluster_size vbr = self._vbr = NTFSVBR(volume) self._cluster_size = cluster_size = (cluster_size or vbr.bytes_per_sector() * vbr.sectors_per_cluster()) self._clusters = ClusterAccessor(volume, cluster_size) self._logger = logging.getLogger("NTFSFilesystem") # balance memory usage with performance try: b = self.get_mft_buffer() # test we can access last MFT byte, demonstrating we can # reach all runs _ = b[-1] except OverrunBufferException as e: g_logger.warning( "failed to read MFT from image, will fall back to MFTMirr: %s", e) try: b = self.get_mftmirr_buffer() # test we can access last MFTMirr byte, demonstrating # we can reach all runs _ = b[-1] except OverrunBufferException as e: g_logger.error("failed to read MFTMirr from image: %s", e) raise CorruptNTFSFilesystemError( "failed to read MFT or MFTMirr from image") if len(b) > 1024 * 1024 * 500: self._mft_data = b else: # note optimization: copy entire mft buffer from NonResidentNTFSAttribute # to avoid getslice lookups self._mft_data = b[:] self._enumerator = MFTEnumerator(self._mft_data) # test there's at least some user content (aside from root), or we'll # assume something's up try: _ = self.get_record(INODE_FIRST_USER) except OverrunBufferException: g_logger.error("overrun reading first user MFT record") raise CorruptNTFSFilesystemError( "failed to read first user record (MFT not large enough)") def get_attribute_data(self, attribute): if attribute.non_resident() == 0: return attribute.value() else: return NonResidentAttributeData(self._clusters, attribute.runlist()) def get_mft_record(self): mft_lcn = self._vbr.mft_lcn() g_logger.debug("mft: %x", mft_lcn * 4096) mft_chunk = self._clusters[mft_lcn] mft_record = MFTRecord(mft_chunk, 0, None, inode=INODE_MFT) return mft_record def get_mft_buffer(self): mft_lcn = self._vbr.mft_lcn() g_logger.debug("mft: %x", mft_lcn * 4096) mft_chunk = self._clusters[mft_lcn] mft_record = MFTRecord(mft_chunk, 0, None, inode=INODE_MFT) mft_data_attribute = mft_record.data_attribute() return self.get_attribute_data(mft_data_attribute) def get_mftmirr_buffer(self): g_logger.debug("mft mirr: %s", hex(self._vbr.mftmirr_lcn() * 4096)) mftmirr_chunk = self._clusters[self._vbr.mftmirr_lcn()] mftmirr_mft_record = MFTRecord(mftmirr_chunk, INODE_MFTMIRR * MFT_RECORD_SIZE, None, inode=INODE_MFTMIRR) mftmirr_data_attribute = mftmirr_mft_record.data_attribute() return self.get_attribute_data(mftmirr_data_attribute) def get_root_directory(self): return NTFSDirectory(self, self._enumerator.get_record(INODE_ROOT)) def get_record(self, record_number): g_logger.debug("get_record: %d", record_number) return self._enumerator.get_record(record_number) def get_record_path(self, record): return self._enumerator.get_path(record) def get_record_parent(self, record): """ @raises NoParentError: on various error conditions """ if record.mft_record_number() == 5: raise NoParentError("Root directory has no parent") fn = record.filename_information() if not fn: raise NoParentError("File has no filename attribute") parent_record_num = MREF(fn.mft_parent_reference()) parent_seq_num = MSEQNO(fn.mft_parent_reference()) try: parent_record = self._enumerator.get_record(parent_record_num) except (OverrunBufferException, InvalidRecordException): raise NoParentError("Invalid parent MFT record") if parent_record.sequence_number() != parent_seq_num: raise NoParentError( "Invalid parent MFT record (bad sequence number)") return NTFSDirectory(self, parent_record) def get_record_children(self, record): # we use a map here to de-dup entries with different filename types # such as 8.3, POSIX, or Windows, but the same ultimate MFT reference ret = {} # type: dict(int, MFTRecord) if not record.is_directory(): return ret.values() # TODO: cleanup the duplication here try: indx_alloc_attr = record.attribute(ATTR_TYPE.INDEX_ALLOCATION) indx_alloc = INDEX_ALLOCATION( self.get_attribute_data(indx_alloc_attr), 0) #g_logger.debug("INDEX_ALLOCATION len: %s", hex(len(indx_alloc))) #g_logger.debug("alloc:\n%s", indx_alloc.get_all_string(indent=2)) indx = indx_alloc for block in indx.blocks(): for entry in block.index().entries(): ref = MREF(entry.header().mft_reference()) if ref == INODE_ROOT and \ entry.filename_information().filename() == ".": continue ret[ref] = self._enumerator.get_record(ref) except AttributeNotFoundError: indx_root_attr = record.attribute(ATTR_TYPE.INDEX_ROOT) indx_root = INDEX_ROOT(self.get_attribute_data(indx_root_attr), 0) indx = indx_root for entry in indx.index().entries(): ref = MREF(entry.header().mft_reference()) if ref == INODE_ROOT and \ entry.filename_information().filename() == ".": continue ret[ref] = self._enumerator.get_record(ref) return ret.values()
class NTFSFilesystem(object): def __init__(self, volume, cluster_size=None): super(NTFSFilesystem, self).__init__() self._volume = volume self._cluster_size = cluster_size self._vbr = NTFSVBR(self._volume) if cluster_size is not None: self._cluster_size = cluster_size else: self._cluster_size = self._vbr.bytes_per_sector() * \ self._vbr.sectors_per_cluster() self._clusters = ClusterAccessor(self._volume, self._cluster_size) self._logger = logging.getLogger("NTFSFilesystem") # balance memory usage with performance try: b = self.get_mft_buffer() # test we can access last MFT byte, demonstrating we can # reach all runs _ = b[-1] except OverrunBufferException as e: g_logger.warning("failed to read MFT from image, will fall back to MFTMirr: %s", e) try: b = self.get_mftmirr_buffer() # test we can access last MFTMirr byte, demonstrating # we can reach all runs _ = b[-1] except OverrunBufferException as e: g_logger.error("failed to read MFTMirr from image: %s", e) raise CorruptNTFSFilesystemError("failed to read MFT or MFTMirr from image") if len(b) > 1024 * 1024 * 500: self._mft_data = b else: # note optimization: copy entire mft buffer from NonResidentNTFSAttribute # to avoid getslice lookups self._mft_data = b[:] self._enumerator = MFTEnumerator(self._mft_data) # test there's at least some user content (aside from root), or we'll # assume something's up try: _ = self.get_record(INODE_FIRST_USER) except OverrunBufferException: g_logger.error("overrun reading first user MFT record") raise CorruptNTFSFilesystemError("failed to read first user record (MFT not large enough)") def get_attribute_data(self, attribute): if attribute.non_resident() == 0: return attribute.value() else: return NonResidentAttributeData(self._clusters, attribute.runlist()) def get_mft_buffer(self): g_logger.debug("mft: %s", hex(self._vbr.mft_lcn() * 4096)) mft_chunk = self._clusters[self._vbr.mft_lcn()] mft_mft_record = MFTRecord(mft_chunk, 0, None, inode=INODE_MFT) mft_data_attribute = mft_mft_record.data_attribute() return self.get_attribute_data(mft_data_attribute) def get_mftmirr_buffer(self): g_logger.debug("mft mirr: %s", hex(self._vbr.mftmirr_lcn() * 4096)) mftmirr_chunk = self._clusters[self._vbr.mftmirr_lcn()] mftmirr_mft_record = MFTRecord(mftmirr_chunk, INODE_MFTMIRR * MFT_RECORD_SIZE, None, inode=INODE_MFTMIRR) mftmirr_data_attribute = mftmirr_mft_record.data_attribute() return self.get_attribute_data(mftmirr_data_attribute) def get_root_directory(self): return NTFSDirectory(self, self._enumerator.get_record(INODE_ROOT)) def get_record(self, record_number): g_logger.debug("get_record: %d", record_number) return self._enumerator.get_record(record_number) def get_record_path(self, record): return self._enumerator.get_path(record) def get_record_parent(self, record): """ @raises NoParentError: on various error conditions """ if record.mft_record_number() == 5: raise NoParentError("Root directory has no parent") fn = record.filename_information() if not fn: raise NoParentError("File has no filename attribute") parent_record_num = MREF(fn.mft_parent_reference()) parent_seq_num = MSEQNO(fn.mft_parent_reference()) try: parent_record = self._enumerator.get_record(parent_record_num) except (BinaryParser.OverrunBufferException, InvalidRecordException): raise NoParentError("Invalid parent MFT record") if parent_record.sequence_number() != parent_seq_num: raise NoParentError("Invalid parent MFT record (bad sequence number)") return NTFSDirectory(self, parent_record) def get_record_children(self, record): # we use a map here to de-dup entries with different filename types # such as 8.3, POSIX, or Windows, but the same ultimate MFT reference ret = {} # type: dict(int, MFTRecord) if not record.is_directory(): return ret.values() # TODO: cleanup the duplication here try: indx_alloc_attr = record.attribute(ATTR_TYPE.INDEX_ALLOCATION) indx_alloc = INDEX_ALLOCATION(self.get_attribute_data(indx_alloc_attr), 0) #g_logger.debug("INDEX_ALLOCATION len: %s", hex(len(indx_alloc))) #g_logger.debug("alloc:\n%s", indx_alloc.get_all_string(indent=2)) indx = indx_alloc for block in indx.blocks(): for entry in block.index().entries(): ref = MREF(entry.header().mft_reference()) if ref == INODE_ROOT and \ entry.filename_information().filename() == ".": continue ret[ref] = self._enumerator.get_record(ref) except AttributeNotFoundError: indx_root_attr = record.attribute(ATTR_TYPE.INDEX_ROOT) indx_root = INDEX_ROOT(self.get_attribute_data(indx_root_attr), 0) indx = indx_root for entry in indx.index().entries(): ref = MREF(entry.header().mft_reference()) if ref == INODE_ROOT and \ entry.filename_information().filename() == ".": continue ret[ref] = self._enumerator.get_record(ref) return ret.values()