def get_e_mail_attachments(self): """Checks OST and PST windows in correct directories and zip it in a given archive""" outlook_dirs = look_for_outlook_dirs(get_userprofiles_from_reg()) for outlook_dir in outlook_dirs: outlook_pst_files = look_for_files(outlook_dir + '\\*.pst') outlook_ost_files = look_for_files(outlook_dir + '\\*.ost') if len(outlook_pst_files) > 0: zip_archive(outlook_pst_files, self.output_dir, 'pst', self.logger) if len(outlook_ost_files) > 0: zip_archive(outlook_ost_files, self.output_dir, 'ost', self.logger)
def _list_windows_prefetch(self): """Outputs windows prefetch files in a csv""" """See http://www.forensicswiki.org/wiki/Windows_Prefetch_File_Format""" prefetch_path = self.systemroot + '\Prefetch\*.pf' list_prefetch_files = look_for_files(prefetch_path) for prefetch_file in list_prefetch_files: content = '' with open(prefetch_file, 'rb') as file_input: content = file_input.read() try: format_version = content[:4] format_version = get_int_from_reversed_string(format_version) # scca_sig = content[0x4:][:4] unknown_values = content[0x0008:0x0008 + 4] unknown_values = ' '.join(c.encode('hex') for c in unknown_values) file_size = content[0x000c:0x000c + 4] file_size = get_int_from_reversed_string(file_size) exec_name = content[0x0010:0x0010 + 60] for i in range(30): # 60 / 2 if 2 * i + 1 < len(exec_name): if exec_name[2 * i] == '\x00' and exec_name[2 * i + 1] == '\x00': exec_name = exec_name[:2 * (i + 1)].decode('utf-16-le') prefetch_hash = content[0x004c:0x004c + 4] tc = os.path.getctime(prefetch_file) tm = os.path.getmtime(prefetch_file) section_a = get_int_from_reversed_string(content[0x0054:0x0054 + 4]) num_entries_a = get_int_from_reversed_string(content[0x0058:0x0058 + 4]) section_b = get_int_from_reversed_string(content[0x005c:0x005c + 4]) num_entries_b = get_int_from_reversed_string(content[0x0060:0x0060 + 4]) section_c = get_int_from_reversed_string(content[0x0064:0x0064 + 4]) length_c = get_int_from_reversed_string(content[0x0068:0x0068 + 4]) section_d = get_int_from_reversed_string(content[0x006c:0x006c + 4]) num_entries_d = get_int_from_reversed_string(content[0x0070:0x0070 + 4]) length_d = get_int_from_reversed_string(content[0x0074:0x0074 + 4]) if format_version == 17: latest_exec_date = content[0x0078:0x0078 + 8] exec_count = get_int_from_reversed_string(content[0x0090:0x0090 + 4]) # section a elif format_version == 23: latest_exec_date = content[0x0080:0x0080 + 8] exec_count = get_int_from_reversed_string(content[0x0098:0x0098 + 4]) else: # format version 26 latest_exec_date = [] for i in range(8): latest_exec_date.append(content[0x0088 + i * 8:0x0088 + (i + 1) * 8]) exec_count = get_int_from_reversed_string(content[0x00D0:0x00D0 + 4]) hash_table_a = self.__decode_section_a(format_version, content, section_a) list_str_c = self.__decode_section_c(content, section_c, length_c) yield prefetch_file, format_version, file_size, exec_name, datetime.datetime.fromtimestamp( tc), datetime.datetime.fromtimestamp(tm), exec_count, hash_table_a, list_str_c except: logging.error(traceback.format_exc())
def _get_startup_files(self, path): files = look_for_files(path) zip_archive(files, self.output_dir, 'autoruns', self.logger, 'a') for start_file in files: md5, sha1, sha256 = process_hashes(start_file) user = start_file.replace(self.userprofile + '\\', '').split('\\', 1)[0] filename = os.path.split(start_file)[1] yield [ self.computer_name, 'startup_file', filename, user, md5, sha1, sha256 ]
def __data_from_userprofile(self, zipname, directories_to_search): """Retrieves data from userprofile. Creates a zip archive containing windows from the directories given in parameters.""" userprofiles = get_userprofiles_from_reg() # File mode is write and truncate for the first iteration, append after file_mode = 'w' for userprofile in userprofiles: if userprofile.startswith('%'): usrp_tokens = userprofile.split('\\') prefix = usrp_tokens[0] env = prefix.replace('%', '') userprofile = userprofile.replace(prefix, os.environ[env.upper()]) for directory_to_search in directories_to_search: full_path = userprofile + '\\' + directory_to_search # construct the list of windows in the directory_to_search for the zip function list_directories = look_for_files(full_path) for directory in list_directories: list_files = self.__enum_directory(directory) zip_archive(list_files, self.output_dir, zipname, self.logger, file_mode) file_mode = 'a'
def __data_from_userprofile(self, zipname, directories_to_search): """Retrieves data from userprofile. Creates a zip archive containing windows from the directories given in parameters.""" userprofiles = get_userprofiles_from_reg() # File mode is write and truncate for the first iteration, append after file_mode = 'w' for userprofile in userprofiles: if userprofile.startswith('%'): usrp_tokens = userprofile.split('\\') prefix = usrp_tokens[0] env = prefix.replace('%', '') userprofile = userprofile.replace(prefix, os.environ[env]) for directory_to_search in directories_to_search: full_path = userprofile + '\\' + directory_to_search # construct the list of windows in the directory_to_search for the zip function list_directories = look_for_files(full_path) for directory in list_directories: list_files = self.__enum_directory(directory) zip_archive(list_files, self.output_dir, zipname, self.logger, file_mode) file_mode = 'a'
def _list_windows_prefetch(self, is_compressed=False): """Outputs windows prefetch files in a csv""" """See http://www.forensicswiki.org/wiki/Windows_Prefetch_File_Format""" prefetch_path = self.systemroot + '\Prefetch\*.pf' list_prefetch_files = look_for_files(prefetch_path) for prefetch_file in list_prefetch_files: content = '' with open(prefetch_file, 'rb') as file_input: content = file_input.read() try: if is_compressed: header = content[:8] content = content[8:] signature, uncompressed_size = struct.unpack('<LL', header) algo = (signature & 0x0F000000) >> 24 RtlDecompressBufferEx = ctypes.windll.ntdll.RtlDecompressBufferEx RtlGetCompressionWorkSpaceSize = ctypes.windll.ntdll.RtlGetCompressionWorkSpaceSize CompressBufferWorkSpaceSize = ctypes.c_uint32() CompressFragmentWorkSpaceSize = ctypes.c_uint32() RtlGetCompressionWorkSpaceSize(algo, ctypes.byref(CompressBufferWorkSpaceSize), ctypes.byref(CompressFragmentWorkSpaceSize)) Compressed = (ctypes.c_ubyte * len(content)).from_buffer_copy(content) Uncompressed = (ctypes.c_ubyte * uncompressed_size)() FinalUncompressedSize = ctypes.c_uint32() Workspace = (ctypes.c_ubyte * CompressFragmentWorkSpaceSize.value)() ntstatus = RtlDecompressBufferEx( ctypes.c_uint16(algo), ctypes.byref(Uncompressed), ctypes.c_uint32(uncompressed_size), ctypes.byref(Compressed), ctypes.c_uint32(len(content)), ctypes.byref(FinalUncompressedSize), ctypes.byref(Workspace)) uncompressed = list(Uncompressed) content = b"".join([chr(c) for c in uncompressed]) format_version = content[:4] format_version = struct.unpack("<I", format_version)[0] # scca_sig = content[0x4:][:4] unknown_values = content[0x0008:0x0008 + 4] unknown_values = ' '.join(c.encode('hex') for c in unknown_values) file_size = content[0x000c:0x000c + 4] file_size = struct.unpack("<I", file_size) exec_name = content[0x0010:0x0010 + 60] try: exec_name = exec_name.decode('utf-16-le').replace("\x00", "") exec_name = exec_name.split('.EXE')[0] + '.EXE' except: exec_name = 'N\A' prefetch_hash = content[0x004c:0x004c + 4] tc = os.path.getctime(prefetch_file) tm = os.path.getmtime(prefetch_file) section_a = struct.unpack("<I", content[0x0054:0x0054 + 4])[0] num_entries_a = struct.unpack("<I", content[0x0058:0x0058 + 4])[0] section_b = struct.unpack("<I", content[0x005c:0x005c + 4])[0] num_entries_b = struct.unpack("<I", content[0x0060:0x0060 + 4])[0] section_c = struct.unpack("<I", content[0x0064:0x0064 + 4])[0] length_c = struct.unpack("<I", content[0x0068:0x0068 + 4])[0] section_d = struct.unpack("<I", content[0x006c:0x006c + 4])[0] num_entries_d = struct.unpack("<I", content[0x0070:0x0070 + 4])[0] length_d = struct.unpack("<I", content[0x0074:0x0074 + 4])[0] if format_version == 17: latest_exec_date = content[0x0078:0x0078 + 8] exec_count = struct.unpack("<I", content[0x0090:0x0090 + 4])[0] # section a elif format_version == 23: latest_exec_date = content[0x0080:0x0080 + 8] exec_count = struct.unpack("<I", content[0x0098:0x0098 + 4])[0] else: # format version 26 latest_exec_date = [] for i in range(8): latest_exec_date.append(content[0x0088 + i * 8:0x0088 + (i + 1) * 8]) exec_count = struct.unpack("<I", content[0x00D0:0x00D0 + 4])[0] hash_table_a = self.__decode_section_a(format_version, content, section_a) try: list_str_c = self.__decode_section_c(content, section_c, length_c) yield prefetch_file, format_version, file_size, exec_name, datetime.datetime.fromtimestamp( tc), datetime.datetime.fromtimestamp(tm), exec_count, hash_table_a, list_str_c except: pass except: self.logger.error(traceback.format_exc()) self.logger.error('Error decoding prefetc %s' % prefetch_file)
def _list_named_pipes(self): for p in look_for_files('\\\\.\\pipe\\*'): yield p
def _list_windows_prefetch(self, is_compressed=False): """Outputs windows prefetch files in a csv""" """See http://www.forensicswiki.org/wiki/Windows_Prefetch_File_Format""" prefetch_path = self.systemroot + '\\Prefetch\\*.pf' list_prefetch_files = look_for_files(prefetch_path) for prefetch_file in list_prefetch_files: content = '' with open(prefetch_file, 'rb') as file_input: content = file_input.read() try: if is_compressed: header = content[:8] content = content[8:] signature, uncompressed_size = struct.unpack('<LL', header) algo = (signature & 0x0F000000) >> 24 RtlDecompressBufferEx = ctypes.windll.ntdll.RtlDecompressBufferEx RtlGetCompressionWorkSpaceSize = ctypes.windll.ntdll.RtlGetCompressionWorkSpaceSize CompressBufferWorkSpaceSize = ctypes.c_uint32() CompressFragmentWorkSpaceSize = ctypes.c_uint32() RtlGetCompressionWorkSpaceSize( algo, ctypes.byref(CompressBufferWorkSpaceSize), ctypes.byref(CompressFragmentWorkSpaceSize)) Compressed = (ctypes.c_ubyte * len(content)).from_buffer_copy(content) Uncompressed = (ctypes.c_ubyte * uncompressed_size)() FinalUncompressedSize = ctypes.c_uint32() Workspace = (ctypes.c_ubyte * CompressFragmentWorkSpaceSize.value)() ntstatus = RtlDecompressBufferEx( ctypes.c_uint16(algo), ctypes.byref(Uncompressed), ctypes.c_uint32(uncompressed_size), ctypes.byref(Compressed), ctypes.c_uint32(len(content)), ctypes.byref(FinalUncompressedSize), ctypes.byref(Workspace)) uncompressed = list(Uncompressed) content = b"".join([chr(c) for c in uncompressed]) format_version = content[:4] format_version = struct.unpack("<I", format_version)[0] # scca_sig = content[0x4:][:4] unknown_values = content[0x0008:0x0008 + 4] unknown_values = ' '.join( c.encode('hex') for c in unknown_values) file_size = content[0x000c:0x000c + 4] file_size = struct.unpack("<I", file_size)[0] exec_name = content[0x0010:0x0010 + 60] try: exec_name = exec_name.decode('utf-16-le').replace( "\x00", "") exec_name = exec_name.split('.EXE')[0] + '.EXE' except: exec_name = 'N\A' prefetch_hash = content[0x004c:0x004c + 4] tc = os.path.getctime(prefetch_file) tm = os.path.getmtime(prefetch_file) section_a = struct.unpack("<I", content[0x0054:0x0054 + 4])[0] num_entries_a = struct.unpack("<I", content[0x0058:0x0058 + 4])[0] section_b = struct.unpack("<I", content[0x005c:0x005c + 4])[0] num_entries_b = struct.unpack("<I", content[0x0060:0x0060 + 4])[0] section_c = struct.unpack("<I", content[0x0064:0x0064 + 4])[0] length_c = struct.unpack("<I", content[0x0068:0x0068 + 4])[0] section_d = struct.unpack("<I", content[0x006c:0x006c + 4])[0] num_entries_d = struct.unpack("<I", content[0x0070:0x0070 + 4])[0] length_d = struct.unpack("<I", content[0x0074:0x0074 + 4])[0] if format_version == 17: latest_exec_date = content[0x0078:0x0078 + 8] exec_count = struct.unpack("<I", content[0x0090:0x0090 + 4])[0] # section a elif format_version == 23: latest_exec_date = content[0x0080:0x0080 + 8] exec_count = struct.unpack("<I", content[0x0098:0x0098 + 4])[0] else: # format version 26 latest_exec_date = [] for i in range(8): latest_exec_date.append(content[0x0088 + i * 8:0x0088 + (i + 1) * 8]) exec_count = struct.unpack("<I", content[0x00D0:0x00D0 + 4])[0] hash_table_a = self.__decode_section_a(format_version, content, section_a) try: list_str_c = self.__decode_section_c( content, section_c, length_c) yield prefetch_file, format_version, file_size, exec_name, datetime.datetime.utcfromtimestamp( tc), datetime.datetime.utcfromtimestamp( tm), exec_count, hash_table_a, list_str_c except: pass except: self.logger.error(traceback.format_exc()) self.logger.error('Error decoding prefetch %s' % prefetch_file)
def _list_windows_prefetch(self): """Outputs windows prefetch files in a csv""" """See http://www.forensicswiki.org/wiki/Windows_Prefetch_File_Format""" prefetch_path = self.systemroot + '\Prefetch\*.pf' list_prefetch_files = look_for_files(prefetch_path) for prefetch_file in list_prefetch_files: content = '' with open(prefetch_file, 'rb') as file_input: content = file_input.read() try: format_version = content[:4] format_version = get_int_from_reversed_string(format_version) # scca_sig = content[0x4:][:4] unknown_values = content[0x0008:0x0008 + 4] unknown_values = ' '.join( c.encode('hex') for c in unknown_values) file_size = content[0x000c:0x000c + 4] file_size = get_int_from_reversed_string(file_size) exec_name = content[0x0010:0x0010 + 60] for i in range(30): # 60 / 2 if 2 * i + 1 < len(exec_name): if exec_name[2 * i] == '\x00' and exec_name[2 * i + 1] == '\x00': exec_name = exec_name[:2 * (i + 1)].decode('utf-16-le') prefetch_hash = content[0x004c:0x004c + 4] tc = os.path.getctime(prefetch_file) tm = os.path.getmtime(prefetch_file) section_a = get_int_from_reversed_string( content[0x0054:0x0054 + 4]) num_entries_a = get_int_from_reversed_string( content[0x0058:0x0058 + 4]) section_b = get_int_from_reversed_string( content[0x005c:0x005c + 4]) num_entries_b = get_int_from_reversed_string( content[0x0060:0x0060 + 4]) section_c = get_int_from_reversed_string( content[0x0064:0x0064 + 4]) length_c = get_int_from_reversed_string(content[0x0068:0x0068 + 4]) section_d = get_int_from_reversed_string( content[0x006c:0x006c + 4]) num_entries_d = get_int_from_reversed_string( content[0x0070:0x0070 + 4]) length_d = get_int_from_reversed_string(content[0x0074:0x0074 + 4]) if format_version == 17: latest_exec_date = content[0x0078:0x0078 + 8] exec_count = get_int_from_reversed_string( content[0x0090:0x0090 + 4]) # section a elif format_version == 23: latest_exec_date = content[0x0080:0x0080 + 8] exec_count = get_int_from_reversed_string( content[0x0098:0x0098 + 4]) else: # format version 26 latest_exec_date = [] for i in range(8): latest_exec_date.append(content[0x0088 + i * 8:0x0088 + (i + 1) * 8]) exec_count = get_int_from_reversed_string( content[0x00D0:0x00D0 + 4]) hash_table_a = self.__decode_section_a(format_version, content, section_a) list_str_c = self.__decode_section_c(content, section_c, length_c) yield prefetch_file, format_version, file_size, exec_name, datetime.datetime.fromtimestamp( tc), datetime.datetime.fromtimestamp( tm), exec_count, hash_table_a, list_str_c except: logging.error(traceback.format_exc())