def __init__(self, buf, offset, parent): debug("SHITEM_UNKNOWNENTRY0 @ %s." % (hex(offset))) super(SHITEM_UNKNOWNENTRY0, self).__init__(buf, offset, parent) self.declare_field("word", "size", 0x0) if self.size() == 0x20: self.declare_field("guid", "guid", 0xE)
def __init__(self, buf, offset, parent): super(Attribute, self).__init__(buf, offset) debug("ATTRIBUTE @ %s." % (hex(offset))) self.declare_field("dword", "type") self.declare_field("dword", "size") self.declare_field("byte", "non_resident") self.declare_field("byte", "name_length") self.declare_field("word", "name_offset") self.declare_field("word", "flags") self.declare_field("word", "instance") if self.non_resident() > 0: self.declare_field("qword", "lowest_vcn", 0x10) self.declare_field("qword", "highest_vcn") self.declare_field("word", "runlist_offset") self.declare_field("byte", "compression_unit") self.declare_field("byte", "reserved1") self.declare_field("byte", "reserved2") self.declare_field("byte", "reserved3") self.declare_field("byte", "reserved4") self.declare_field("byte", "reserved5") self.declare_field("qword", "allocated_size") self.declare_field("qword", "data_size") self.declare_field("qword", "initialized_size") self.declare_field("qword", "compressed_size") else: self.declare_field("dword", "value_length", 0x10) self.declare_field("word", "value_offset") self.declare_field("byte", "value_flags") self.declare_field("byte", "reserved") self.declare_field("binary", "value", self.value_offset(), self.value_length())
def __init__(self, buf, offset, parent): debug("SHITEM_NETWORKSHAREENTRY @ %s." % (hex(offset))) super(SHITEM_NETWORKSHAREENTRY, self).__init__(buf, offset, parent) self.declare_field("byte", "flags", 0x4) self.declare_field("string", "path", 0x5) self.declare_field("string", "description", 0x5 + len(self.path()) + 1)
def mft_record_build_path(self, record, cycledetector=None): if cycledetector is None: cycledetector = {} rec_num = record.mft_record_number() & 0xFFFFFFFFFFFF if record.mft_record_number() & 0xFFFFFFFFFFFF == 0x0005: if self.prefix: return self.prefix else: return "\\." fn = record.filename_information() if not fn: return "\\??" parent_record_num = fn.mft_parent_reference() & 0xFFFFFFFFFFFF parent_buf = self.mft_get_record_buf(parent_record_num) if parent_buf == array.array("B", ""): return "\\??\\" + fn.filename() parent = MFTRecord(parent_buf, 0, False) if parent.sequence_number() != fn.mft_parent_reference() >> 48: return "\\$OrphanFiles\\" + fn.filename() if rec_num in cycledetector: debug("Cycle detected") if self.prefix: return self.prefix + "\\<CYCLE>" else: return "\\<CYCLE>" cycledetector[rec_num] = True return self.mft_record_build_path(parent, cycledetector) + "\\" + fn.filename()
def get_all_shellbags(reg): """ Given a python-registry Registry object, look for and return a list of shellbag items. A shellbag item is a dict with the keys (mtime, atime, crtime, path). Arguments: - `reg`: A python-registry Registry object. Throws: """ shellbags = [] paths = [ # xp "Software\\Microsoft\\Windows\\Shell", "Software\\Microsoft\\Windows\\ShellNoRoam", # win7 "Local Settings\\Software\\Microsoft\\Windows\\ShellNoRoam", "Local Settings\\Software\\Microsoft\\Windows\\Shell", ] for path in paths: try: debug("Processing: %s" % (path)) shell_key = reg.open(path) new = get_shellbags(shell_key) debug("Found %s new shellbags" % (len(new))) shellbags.extend(new) except Registry.RegistryKeyNotFoundException: pass return shellbags
def _calculate_mftoffset(self): with open(self.filename, "rb") as f: f.seek(self.offset) f.seek(0x30, 1) # relative buf = f.read(8) relmftoffset = struct.unpack_from("<Q", buf, 0)[0] self.mftoffset = self.offset + relmftoffset * self.clustersize debug("MFT offset is %s" % (hex(self.mftoffset)))
def __init__(self, buf, offset, parent): debug("STANDARD INFORMATION ATTRIBUTE at %s." % (hex(offset))) super(StandardInformation, self).__init__(buf, offset) self.declare_field("filetime", "created_time", 0x0) self.declare_field("filetime", "modified_time") self.declare_field("filetime", "changed_time") self.declare_field("filetime", "accessed_time") self.declare_field("dword", "attributes") self.declare_field("binary", "reserved", self.current_field_offset(), 0xC)
def __init__(self, buf, offset, parent): debug("INDEX NODE HEADER at %s." % (hex(offset))) super(NTATTR_STANDARD_INDEX_HEADER, self).__init__(buf, offset) self.declare_field("dword", "entry_list_start", 0x0) self.declare_field("dword", "entry_list_end") self.declare_field("dword", "entry_list_allocation_end") self.declare_field("dword", "flags") self.declare_field("binary", "list_buffer", \ self.entry_list_start(), self.entry_list_allocation_end() - self.entry_list_start())
def __init__(self, buf, offset, parent): debug("INDEX RECORD HEADER at %s." % (hex(offset))) super(IndexRecordHeader, self).__init__(buf, offset, parent) self.declare_field("dword", "magic", 0x0) self.declare_field("word", "usa_offset") self.declare_field("word", "usa_count") self.declare_field("qword", "lsn") self.declare_field("qword", "vcn") self._node_header_offset = self.current_field_offset() self.fixup(self.usa_count(), self.usa_offset())
def items(self): off = self.offset() while True: size = self.unpack_word(off) if size == 0: return # UNKNOWN1 _type = self.unpack_byte(off + 2) if _type == SHITEMTYPE.FILE_ENTRY0 or \ _type == SHITEMTYPE.FILE_ENTRY1 or \ _type == SHITEMTYPE.FILE_ENTRY2: try: item = SHITEM_FILEENTRY(self._buf, off, self) except OverrunBufferException: item = FILEENTRY_FRAGMENT(self._buf, off, self, 0x4) elif _type == SHITEMTYPE.FOLDER_ENTRY: item = SHITEM_FOLDERENTRY(self._buf, off, self) elif _type == SHITEMTYPE.VOLUME_NAME: item = SHITEM_VOLUMEENTRY(self._buf, off, self) elif _type == SHITEMTYPE.NETWORK_VOLUME_NAME0 or \ _type == SHITEMTYPE.NETWORK_VOLUME_NAME1 or \ _type == SHITEMTYPE.NETWORK_VOLUME_NAME2 or \ _type == SHITEMTYPE.NETWORK_VOLUME_NAME3: item = SHITEM_NETWORKVOLUMEENTRY(self._buf, off, self) elif _type == SHITEMTYPE.NETWORK_SHARE: item = SHITEM_NETWORKSHAREENTRY(self._buf, off, self) elif _type == SHITEMTYPE.URI: item = SHITEM_URIENTRY(self._buf, off, self) elif _type == SHITEMTYPE.CONTROL_PANEL: item = SHITEM_CONTROLPANELENTRY(self._buf, off, self) elif _type == SHITEMTYPE.UNKNOWN0: item = SHITEM_UNKNOWNENTRY0(self._buf, off, self) elif _type == SHITEMTYPE.UNKNOWN2: item = SHITEM_UNKNOWNENTRY2(self._buf, off, self) elif _type == SHITEMTYPE.UNKNOWN3: item = SHITEM_UNKNOWNENTRY3(self._buf, off, self) else: debug("Unknown type: %s" % hex(_type)) item = SHITEM(self._buf, off, self) yield item off += item.size()
def __init__(self, buf, offset, parent): debug("ITEMPOS_FILEENTRY @ %s." % (hex(offset))) super(ITEMPOS_FILEENTRY, self).__init__(buf, offset, parent) self.declare_field("word", "size", 0x0) # override self.declare_field("word", "flags", 0x2) if self.flags() & 0xFF == 0xC3: # network share type, printers, etc self.declare_field("string", "long_name", 0x5) return off = 4 self.declare_field("dword", "filesize", off); off += 4 self.declare_field("dosdate", "m_date", off); off += 4 self.declare_field("word", "fileattrs", off); off += 2 self.declare_field("string", "short_name", off) off += len(self.short_name()) + 1 off = align(off, 2) self.declare_field("word", "ext_size", off); off += 2 self.declare_field("word", "ext_version", off); off += 2 if self.ext_version() >= 0x03: off += 4 # unknown self.declare_field("dosdate", "cr_date", off); off += 4 self.declare_field("dosdate", "a_date", off); off += 4 off += 4 # unknown else: self.cr_date = lambda: datetime.datetime.min self.a_date = lambda: datetime.datetime.min if self.ext_version() >= 0x0007: off += 8 # fileref off += 8 # unknown self._off_long_name_size = off off += 2 if self.ext_version() >= 0x0008: off += 4 # unknown self._off_long_name = off off += self.long_name_size() elif self.ext_version() >= 0x0003: self._off_long_name_size = False self._off_long_name = off debug("(WSTRING) long_name @ %s" % (hex(self.absolute_offset(off)))) else: self._off_long_name_size = False self._off_long_name = False
def __init__(self, buf, offset, parent): debug("INDEX ROOT HEADER at %s." % (hex(offset))) super(IndexRootHeader, self).__init__(buf, offset) self.declare_field("dword", "type", 0x0) self.declare_field("dword", "collation_rule") self.declare_field("dword", "index_record_size_bytes") self.declare_field("byte", "index_record_size_clusters") self.declare_field("byte", "unused1") self.declare_field("byte", "unused2") self.declare_field("byte", "unused3") self._node_header_offset = self.current_field_offset()
def __init__(self, buf, offset, parent): debug("INDEX ENTRY at %s." % (hex(offset))) super(IndexEntry, self).__init__(buf, offset) self.declare_field("qword", "mft_reference", 0x0) self.declare_field("word", "length") self.declare_field("word", "filename_information_length") self.declare_field("dword", "flags") self.declare_field("binary", "filename_information_buffer", \ self.current_field_offset(), self.filename_information_length()) self.declare_field("qword", "child_vcn", align(self.current_field_offset(), 0x8))
def __init__(self, buf, offset, parent): debug("SHITEM_UNKNOWNENTRY3 @ %s." % (hex(offset))) super(SHITEM_UNKNOWNENTRY3, self).__init__(buf, offset, parent, 0x4) self.declare_field("word", "size", 0x0) # most of this is unknown offs = 0x18 self.declare_field("string", "short_name", offs) offs += len(self.short_name()) + 1 offs = align(offs, 2) offs += 0x4C self.declare_field("wstring", "long_name", offs)
def __init__(self, buf, offset, parent): super(Runentry, self).__init__(buf, offset) debug("RUNENTRY @ %s." % (hex(offset))) self.declare_field("byte", "header") self._offset_length = self.header() >> 4 self._length_length = self.header() & 0xF self.declare_field("binary", "length_binary", self.current_field_offset(), self._length_length) self.declare_field("binary", "offset_binary", self.current_field_offset(), self._offset_length)
def __init__(self, buf, offset, parent, filesize_offset): debug("FILEENTRY_FRAGMENT @ %s." % (hex(offset))) super(FILEENTRY_FRAGMENT, self).__init__(buf, offset, parent) off = filesize_offset self.declare_field("dword", "filesize", off); off += 4 self.declare_field("dosdate", "m_date", off); off += 4 self.declare_field("word", "fileattrs", off); off += 2 self.declare_field("string", "short_name", off) off += len(self.short_name()) + 1 off = align(off, 2)
def items(self): off = self.offset() while True: size = self.unpack_word(off) if size == 0: return # UNKNOWN1 _type = self.unpack_byte(off + 2) if _type == SHITEMTYPE.FILE_ENTRY0 or \ _type == SHITEMTYPE.FILE_ENTRY1 or \ _type == SHITEMTYPE.FILE_ENTRY2: item = SHITEM_FILEENTRY(self._buf, off, self) elif _type == SHITEMTYPE.FOLDER_ENTRY: item = SHITEM_FOLDERENTRY(self._buf, off, self) elif _type == SHITEMTYPE.VOLUME_NAME: item = SHITEM_VOLUMEENTRY(self._buf, off, self) elif _type == SHITEMTYPE.NETWORK_VOLUME_NAME0 or \ _type == SHITEMTYPE.NETWORK_VOLUME_NAME1 or \ _type == SHITEMTYPE.NETWORK_VOLUME_NAME2 or \ _type == SHITEMTYPE.NETWORK_VOLUME_NAME3: item = SHITEM_NETWORKVOLUMEENTRY(self._buf, off, self) elif _type == SHITEMTYPE.NETWORK_SHARE: item = SHITEM_NETWORKSHAREENTRY(self._buf, off, self) elif _type == SHITEMTYPE.URI: item = SHITEM_URIENTRY(self._buf, off, self) elif _type == SHITEMTYPE.CONTROL_PANEL: item = SHITEM_CONTROLPANELENTRY(self._buf, off, self) elif _type == SHITEMTYPE.UNKNOWN0: item = SHITEM_UNKNOWNENTRY0(self._buf, off, self) elif _type == SHITEMTYPE.UNKNOWN2: item = SHITEM_UNKNOWNENTRY2(self._buf, off, self) elif _type == SHITEMTYPE.UNKNOWN3: item = SHITEM_UNKNOWNENTRY3(self._buf, off, self) else: debug("Unknown type: %s" % hex(_type)) item = SHITEM(self._buf, off, self) yield item off += item.size()
def __init__(self, buf, offset, chunk): debug("Record at %s." % (hex(offset))) super(Record, self).__init__(buf, offset) self._chunk = chunk self.declare_field("dword", "magic", 0x0) # 0x00002a2a self.declare_field("dword", "size") self.declare_field("qword", "record_num") self.declare_field("filetime", "timestamp") if self.size() > 0x10000: raise InvalidRecordException() self.declare_field("dword", "size2", self.size() - 4)
def __init__(self, buf, offset, parent): debug("FILENAME ATTRIBUTE at %s." % (hex(offset))) super(FilenameAttribute, self).__init__(buf, offset) self.declare_field("qword", "mft_parent_reference", 0x0) self.declare_field("filetime", "created_time") self.declare_field("filetime", "modified_time") self.declare_field("filetime", "changed_time") self.declare_field("filetime", "accessed_time") self.declare_field("qword", "physical_size") self.declare_field("qword", "logical_size") self.declare_field("dword", "flags") self.declare_field("dword", "reparse_value") self.declare_field("byte", "filename_length") self.declare_field("byte", "filename_type") self.declare_field("wstring", "filename", 0x42, self.filename_length())
def __init__(self, buf, offset): debug("FILE HEADER at %s." % (hex(offset))) super(FileHeader, self).__init__(buf, offset) self.declare_field("string", "magic", 0x0, length=8) self.declare_field("qword", "oldest_chunk") self.declare_field("qword", "current_chunk_number") self.declare_field("qword", "next_record_number") self.declare_field("dword", "header_size") self.declare_field("word", "minor_version") self.declare_field("word", "major_version") self.declare_field("word", "header_chunk_size") self.declare_field("word", "chunk_count") self.declare_field("binary", "unused1", length=0x4c) self.declare_field("dword", "flags") self.declare_field("dword", "checksum")
def __init__(self, buf, offset): debug("FILE HEADER at %s." % (hex(offset))) super(FileHeader, self).__init__(buf, offset) self.declare_field("string", "magic", 0x0, length=8) self.declare_field("qword", "oldest_chunk") self.declare_field("qword", "current_chunk_number") self.declare_field("qword", "next_record_number") self.declare_field("dword", "header_size") self.declare_field("word", "minor_version") self.declare_field("word", "major_version") self.declare_field("word", "header_chunk_size") self.declare_field("word", "chunk_count") self.declare_field("binary", "unused1", length=0x4C) self.declare_field("dword", "flags") self.declare_field("dword", "checksum")
def __init__(self, buf, offset, parent, filesize_offset): debug("Fileentry @ %s." % (hex(offset))) super(Fileentry, self).__init__(buf, offset, parent) off = filesize_offset self.declare_field("dword", "filesize", off); off += 4 self.declare_field("dosdate", "m_date", off); off += 4 self.declare_field("word", "fileattrs", off); off += 2 self.declare_field("string", "short_name", off) off += len(self.short_name()) + 1 off = align(off, 2) self.declare_field("word", "ext_size", off); off += 2 self.declare_field("word", "ext_version", off); off += 2 if self.ext_version() >= 0x03: off += 4 # unknown self.declare_field("dosdate", "cr_date", off); off += 4 self.declare_field("dosdate", "a_date", off); off += 4 off += 4 # unknown else: self.cr_date = lambda: datetime.datetime.min self.a_date = lambda: datetime.datetime.min if self.ext_version() >= 0x0007: off += 8 # fileref off += 8 # unknown self._off_long_name_size = off off += 2 if self.ext_version() >= 0x0008: off += 4 # unknown self._off_long_name = off off += self.long_name_size() elif self.ext_version() >= 0x0003: self._off_long_name_size = False self._off_long_name = off debug("(WSTRING) long_name @ %s" % (hex(self.absolute_offset(off)))) else: self._off_long_name_size = False self._off_long_name = False
def __init__(self, buf, offset): debug("CHUNK HEADER at %s." % (hex(offset))) super(ChunkHeader, self).__init__(buf, offset) self._strings = None self._templates = None self.declare_field("string", "magic", 0x0, length=8) self.declare_field("qword", "file_first_record_number") self.declare_field("qword", "file_last_record_number") self.declare_field("qword", "log_first_record_number") self.declare_field("qword", "log_last_record_number") self.declare_field("dword", "header_size") self.declare_field("dword", "last_record_offset") self.declare_field("dword", "next_record_offset") self.declare_field("dword", "data_checksum") self.declare_field("binary", "unused", length=0x44) self.declare_field("dword", "header_checksum")
def fixup(self, num_fixups, fixup_value_offset): fixup_value = self.unpack_word(fixup_value_offset) for i in range(0, num_fixups - 1): fixup_offset = 512 * (i + 1) - 2 check_value = self.unpack_word(fixup_offset) if check_value != fixup_value: warning("Bad fixup at %s" % \ (hex(self.offset() + fixup_offset))) continue new_value = self.unpack_word(fixup_value_offset + 2 + 2 * i) self.pack_word(fixup_offset, new_value) check_value = self.unpack_word(fixup_offset) debug("Fixup verified at %s and patched from %s to %s." % \ (hex(self.offset() + fixup_offset), hex(fixup_value), hex(check_value)))
def __init__(self, buf, offset, parent, inode=None): super(MFTRecord, self).__init__(buf, offset, parent) debug("MFTRECORD @ %s." % (hex(offset))) self.inode = inode or 0 self.declare_field("dword", "magic") self.declare_field("word", "usa_offset") self.declare_field("word", "usa_count") self.declare_field("qword", "lsn") self.declare_field("word", "sequence_number") self.declare_field("word", "link_count") self.declare_field("word", "attrs_offset") self.declare_field("word", "flags") self.declare_field("dword", "bytes_in_use") self.declare_field("dword", "bytes_allocated") self.declare_field("qword", "base_mft_record") self.declare_field("word", "next_attr_instance") self.declare_field("word", "reserved") self.declare_field("dword", "mft_record_number") self.fixup(self.usa_count(), self.usa_offset())
def record_generator(self): if self.filetype == "indx": return if self.filetype == "mft": size = os.path.getsize(self.filename) is_redirected = os.fstat(0) != os.fstat(1) should_progress = is_redirected and self.progress with open(self.filename, "rb") as f: record = True count = -1 while record: if count % 100 == 0 and should_progress: n = (count * 1024 * 100) / float(size) sys.stderr.write("\rCompleted: %0.4f%%" % (n)) sys.stderr.flush() count += 1 buf = array.array("B", f.read(1024)) if not buf: return try: record = MFTRecord(buf, 0, False, inode=count) except OverrunBufferException: debug("Failed to parse MFT record %s" % (str(count))) continue debug("Yielding record " + str(count)) yield record if should_progress: sys.stderr.write("\n") if self.filetype == "image": # TODO this overruns the MFT... # TODO this doesnt account for a fragmented MFT with open(self.filename, "rb") as f: if not self.mftoffset: self._calculate_mftoffset() f.seek(self.mftoffset) record = True count = -1 while record: count += 1 buf = array.array("B", f.read(1024)) if not buf: return try: record = MFTRecord(buf, 0, False, inode=count) except OverrunBufferException: debug("Failed to parse MFT record %s" % (str(count))) continue debug("Yielding record " + str(count)) yield record
def slack_entries(self): """ A generator that yields INDX entries found in the slack space associated with this header. """ offset = self.entry_list_end() try: while offset <= self.entry_list_allocation_end() - 0x52: try: debug("Trying to find slack entry at %s." % (hex(offset))) e = SlackIndexEntry(self._buf, offset, self) if e.is_valid(): debug("Slack entry is valid.") offset += e.length() or 1 yield e else: debug("Slack entry is invalid.") raise ParseException("Not a deleted entry") except ParseException: debug("Scanning one byte forward.") offset += 1 except struct.error: debug("Slack entry parsing overran buffer.") pass
def print_bodyfile(options): if options.filetype == "mft" or options.filetype == "image": f = NTFSFile(options) if options.filter: refilter = re.compile(options.filter) for record in f.record_generator(): debug("Considering MFT record %s" % (record.mft_record_number())) try: if record.magic() != 0x454C4946: debug("Record has a bad magic value") continue if options.filter: path = f.mft_record_build_path(record, {}) if not refilter.search(path): debug("Skipping listing path " "due to regex filter: " + path) continue if record.is_active() and options.mftlist: try_write(record_bodyfile(f, record)) if options.indxlist or options.slack: try_write(record_indx_entries_bodyfile(options, f, record)) elif (not record.is_active()) and options.deleted: try_write( record_bodyfile(f, record, attributes=["deleted"])) if options.filetype == "image" and \ (options.indxlist or options.slack): extractbuf = array.array("B") found_indxalloc = False for attr in record.attributes(): if attr.type() != ATTR_TYPE.INDEX_ALLOCATION: continue found_indxalloc = True if attr.non_resident() != 0: for (offset, length) in attr.runlist().runs(): ooff = offset * options.clustersize + options.offset llen = length * options.clustersize extractbuf += f.read(ooff, llen) else: pass # This shouldn't happen. if found_indxalloc and len(extractbuf) > 0: path = f.mft_record_build_path(record, {}) print_nonresident_indx_bodyfile(options, extractbuf, basepath=path) except InvalidAttributeException: pass elif options.filetype == "indx": with open(options.filename, "rb") as f: buf = array.array("B", f.read()) print_nonresident_indx_bodyfile(options, buf)
def print_bodyfile(options): if options.filetype == "mft" or options.filetype == "image": f = NTFSFile(options) if options.filter: refilter = re.compile(options.filter) for record in f.record_generator(): debug("Considering MFT record %s" % (record.mft_record_number())) try: if record.magic() != 0x454C4946: debug("Record has a bad magic value") continue if options.filter: path = f.mft_record_build_path(record, {}) if not refilter.search(path): debug("Skipping listing path " "due to regex filter: " + path) continue if record.is_active() and options.mftlist: try_write(record_bodyfile(f, record)) if options.indxlist or options.slack: try_write(record_indx_entries_bodyfile(options, f, record)) elif (not record.is_active()) and options.deleted: try_write(record_bodyfile(f, record, attributes=["deleted"])) if options.filetype == "image" and \ (options.indxlist or options.slack): extractbuf = array.array("B") found_indxalloc = False for attr in record.attributes(): if attr.type() != ATTR_TYPE.INDEX_ALLOCATION: continue found_indxalloc = True if attr.non_resident() != 0: for (offset, length) in attr.runlist().runs(): ooff = offset * options.clustersize + options.offset llen = length * options.clustersize extractbuf += f.read(ooff, llen) else: pass # This shouldn't happen. if found_indxalloc and len(extractbuf) > 0: path = f.mft_record_build_path(record, {}) print_nonresident_indx_bodyfile(options, extractbuf, basepath=path) except InvalidAttributeException: pass elif options.filetype == "indx": with open(options.filename, "rb") as f: buf = array.array("B", f.read()) print_nonresident_indx_bodyfile(options, buf)
def entries(self): """ A generator that returns each INDEX_ENTRY associated with this node. """ offset = self.header().entries_offset() if offset == 0: debug("No entries in this allocation block.") return while offset <= self.header().index_length() - 0x52: debug("Entry has another entry after it.") e = self._INDEX_ENTRY(self._buf, self.offset() + offset, self) offset += e.length() yield e debug("No more entries.")
def entries(self): """ A generator that returns each INDX entry associated with this node. """ offset = self.entry_list_start() if offset == 0: debug("No entries in this allocation block.") return while offset <= self.entry_list_end() - 0x52: debug("Entry has another entry after it.") e = IndexEntry(self._buf, self.offset() + offset, self) offset += e.length() yield e debug("No more entries.")
def __init__(self, buf, offset, parent): debug("SHITEM_NETWORKVOLUMEENTRY @ %s." % (hex(offset))) super(SHITEM_NETWORKVOLUMEENTRY, self).__init__(buf, offset, parent) self.declare_field("byte", "flags", 0x4) self._off_name = 0x5
def __init__(self, buf, offset, parent): debug("SHITEM_UNKNOWNENTRY2 @ %s." % (hex(offset))) super(SHITEM_UNKNOWNENTRY2, self).__init__(buf, offset, parent) self.declare_field("byte", "flags", 0x3) self.declare_field("guid", "guid", 0x4)
def __init__(self, buf, offset, parent): debug("SHITEM_URIENTRY @ %s." % (hex(offset))) super(SHITEM_URIENTRY, self).__init__(buf, offset, parent) self.declare_field("dword", "flags", 0x3) self.declare_field("wstring", "uri", 0x7)
def __init__(self, buf, offset, parent): debug("SHITEMLIST @ %s." % (hex(offset))) super(SHITEMLIST, self).__init__(buf, offset, parent)
def __init__(self, buf, offset, parent): debug("SHITEM_CONTROLPANELENTRY @ %s." % (hex(offset))) super(SHITEM_CONTROLPANELENTRY, self).__init__(buf, offset, parent) self.declare_field("byte", "flags", 0x3) self.declare_field("guid", "guid", 0xD)
def __init__(self, buf, offset, parent): debug("SHITEM_VOLUMEENTRY @ %s." % (hex(offset))) super(SHITEM_VOLUMEENTRY, self).__init__(buf, offset, parent) self.declare_field("string", "name", 0x3)
def __init__(self, buf, offset, parent): super(SHITEM, self).__init__(buf, offset, parent) self.declare_field("word", "size", 0x0) self.declare_field("byte", "type", 0x2) debug("SHITEM @ %s of type %s." % (hex(offset), hex(self.type())))
def __init__(self, buf, offset, parent): debug("SHITEM_FILEENTRY @ %s." % (hex(offset))) super(SHITEM_FILEENTRY, self).__init__(buf, offset, parent, 0x4) self.declare_field("byte", "flags", 0x3)
def __init__(self, buf, offset, parent): super(Runlist, self).__init__(buf, offset) debug("RUNLIST @ %s." % (hex(offset)))
def shellbag_rec(key, bag_prefix, path_prefix): """ Function to recursively parse the BagMRU Registry key structure. Arguments: `key`: The current 'BagsMRU' key to recurse into. `bag_prefix`: A string containing the current subkey path of the relevant 'Bags' key. It will look something like '1\\2\\3\\4'. `path_prefix` A string containing the current human-readable, file system path so far constructed. Throws: """ debug("Considering BagMRU key %s" % (key.path())) debug_increase_indent() try: # First, consider the current key, and extract shellbag items slot = key.value("NodeSlot").value() for bag in bags_key.subkey(str(slot)).subkeys(): for value in [value for value in bag.values() if "ItemPos" in value.name()]: buf = value.value() debug("Slot %s ITEMPOS @ %s" % (str(slot), value.name())) block = Block(buf, 0x0, False) offset = 0x10 while True: offset += 0x8 size = block.unpack_word(offset) if size == 0: break elif size < 0x15: pass else: item = ITEMPOS_FILEENTRY(buf, offset, False) debug("Name: " + item.name()) shellbags.append( { "path": path_prefix + "\\" + item.name(), "mtime": item.m_date(), "atime": item.a_date(), "crtime": item.cr_date(), "source": bag.path() + " @ " + hex(item.offset()), "regsource": bag.path() + "\\" + value.name(), "klwt": key.timestamp(), } ) offset += size except Registry.RegistryValueNotFoundException: debug("Registry.RegistryValueNotFoundException") pass except Registry.RegistryKeyNotFoundException: debug("Registry.RegistryKeyNotFoundException") pass except: debug("Unexpected error %s" % sys.exc_info()[0]) # Next, recurse into each BagMRU key for value in [value for value in key.values() if re.match("\d+", value.name())]: debug("BagMRU value %s (%s)" % (value.name(), key.path())) l = SHITEMLIST(value.value(), 0, False) for item in l.items(): # assume there is only one entry in the value, or take the last # as the path component debug("Name: " + item.name()) path = path_prefix + "\\" + item.name() shellbags.append( { "path": path, "mtime": item.m_date(), "atime": item.a_date(), "crtime": item.cr_date(), "source": key.path() + " @ " + hex(item.offset()), "regsource": key.path() + "\\" + value.name(), "klwt": key.timestamp(), } ) shellbag_rec(key.subkey(value.name()), bag_prefix + "\\" + value.name(), path) debug_decrease_indent()
def __init__(self, buf, offset, parent): debug("SHITEM_FOLDERENTRY @ %s." % (hex(offset))) super(SHITEM_FOLDERENTRY, self).__init__(buf, offset, parent) self._off_folderid = 0x3 # UINT8 self.declare_field("guid", "guid", 0x4)