def test_open(self): """Tests the open function.""" if not unittest.source: raise unittest.SkipTest("missing source") esedb_file = pyesedb.open(unittest.source) self.assertIsNotNone(esedb_file) esedb_file.close() with self.assertRaises(TypeError): pyesedb.open(None) with self.assertRaises(ValueError): pyesedb.open(unittest.source, mode="w")
def __init__(self, ese_file): self.ese_db = pyesedb.open(ese_file) self.GUID_tables = { 'SruDbIdMapTable': 'SruDbIdMapTable', 'NetworkDataUsageMonitor': '{973F5D5C-1D90-4944-BE8E-24B94231A174}', 'ApplicationResourceUsage': '{D10CA2FE-6FCF-4F6D-848E-B2E99266FA89}', 'EnergyEstimator': '{DA73FB89-2BEA-4DDC-86B8-6E048C6DA477}', 'NetworkConnectivityUsageMonitor': '{DD6636C4-8929-4683-974E-22C046A43763}', 'EnergyUsage': '{FEE4E14F-02A9-4550-B5CE-5FA2DA202E37}', 'LongTermEnergyUsage': '{FEE4E14F-02A9-4550-B5CE-5FA2DA202E37}LT' } # list all types used in ese DBs """ self.ese_db_columns_types = { 'BINARY_DATA' : pyesedb.column_types.BINARY_DATA, 'BOOLEAN' : pyesedb.column_types.BOOLEAN, 'CURRENCY' : pyesedb.column_types.CURRENCY, 'DATE_TIME' : pyesedb.column_types.DATE_TIME, 'DOUBLE_64BIT' : pyesedb.column_types.DOUBLE_64BIT, 'FLOAT_32BIT' : pyesedb.column_types.FLOAT_32BIT, 'GUID' : pyesedb.column_types.GUID, 'INTEGER_16BIT_SIGNED' : pyesedb.column_types.INTEGER_16BIT_SIGNED, 'INTEGER_16BIT_UNSIGNED' : pyesedb.column_types.INTEGER_16BIT_UNSIGNED, 'INTEGER_32BIT_SIGNED' : pyesedb.column_types.INTEGER_32BIT_SIGNED, 'INTEGER_32BIT_UNSIGNED' : pyesedb.column_types.INTEGER_32BIT_UNSIGNED, 'INTEGER_64BIT_SIGNED' : pyesedb.column_types.INTEGER_64BIT_SIGNED, 'INTEGER_8BIT_UNSIGNED' : pyesedb.column_types.INTEGER_8BIT_UNSIGNED, 'LARGE_BINARY_DATA' : pyesedb.column_types.LARGE_BINARY_DATA, 'LARGE_TEXT' : pyesedb.column_types.LARGE_TEXT, 'NULL' : pyesedb.column_types.NULL, 'SUPER_LARGE_VALUE' : pyesedb.column_types.SUPER_LARGE_VALUE, 'TEXT' : pyesedb.column_types.TEXT } """ # get all indices from the table self.SruDbIdMapTable = self.get_SruDbIdMapTable_details() # get all application usage from table self.ApplicationResourceUsage = self.get_ApplicationResourceUsage_details( ) # get all network data usage from the table self.NetworkDataUsageMonitor = self.get_NetworkDataUsageMonitor_details( ) # get all network connectivity usage from the table self.NetworkConnectivityUsageMonitor = self.get_NetworkConnectivityUsageMonitor_details( )
def analyse_edge_history(USER_FOLDER, API_KEY, API_SECRET): """ using pyesedb python wrapper around libesedb libesedb is a library to access the Extensible Storage Engine (ESE) Database File (EDB) format https://github.com/libyal/libesedb Edge stores many things including cookies and browsing history in this format """ # TODO: on some systems there can be more than one WebCacheV??.dat file esedbfile = os.path.join(USER_FOLDER, 'AppData/Local/Microsoft/Windows/WebCache/WebCacheV01.dat') db = pyesedb.open(esedbfile) for table in db.tables: if "HstsEntryEx" in table.get_name(): # TODO: other tables may contain another interesting artifacts in the future versions for record in table.records: lastused = record.get_value_data_as_integer(5) url = record.get_value_data_as_string(6) if url[0] != ":": # urls starting with colon have wrong date! url = '.'.join(url.split('.')[::-1]) # urls are stored reversed timestamp = filetime_to_dt(lastused) cat, details = get_url_category(url, API_KEY, API_SECRET) if cat: add_url_to_ela(timestamp, url, cat, category_into_group(cat), "edge", None) else: print(details) db.close()
options = parser.parse_args() if not options.ESE_INFILE: options.ESE_INFILE = raw_input( r"What is the path to the ESE file? (Ex: \image-mount-point\Windows\system32\Windows.edb) : " ) options.XLSX_OUTFILE = raw_input( r"What is my output file name (including path) (Ex: \users\me\Desktop\resultx.xlsx) : " ) options.XLSX_TEMPLATE = raw_input( "What XLS Template should I use? (The tool 'ese_template' can create one for you) : " ) warnings.simplefilter("ignore") try: ese_db = pyesedb.open(options.ESE_INFILE) except Exception as e: print "I could not open the specified SRUM file. Check your path and file name." print "Error : ", str(e) sys.exit(1) try: template_wb = openpyxl.load_workbook(filename=options.XLSX_TEMPLATE, read_only=True) except Exception as e: print "I could not open the specified template file %s. Check your path and file name." % ( options.XLSX_TEMPLATE) print "Error : ", str(e) sys.exit(1) target_wb = openpyxl.Workbook()
def ie_edge_open(path): if pyesedb.check_file_signature(path): return pyesedb.open(path, 'rb') else: print("[Error] input file error by fortools\nPlease check your file") return -1
def main(): # Parsing the Arguments parser = argparse.ArgumentParser() parser.add_argument( "-i", "--inputfile", dest="InputFile", help= 'Path to the IE history file. Syntax of command in Python Browser_IE_History.py -i IEHistoryFile.dat -o Results.CSV' ) parser.add_argument( "-o", "--outputfile", dest="OutputFile", help= "Path to Export your CSV. Syntax of command in Python Browser_IE_History.py -i IEHistoryFile.dat -o Results.CSV" ) args = parser.parse_args() # Now running through the command try: db = pyesedb.open(args.InputFile, mode='r') # Now grabbing the number of files NumberOfTables = db.get_number_of_tables() # Getting the total number of tables NumberOfTables = db.get_number_of_tables() # Now getting each table name and adding it to a list AllTableNames = [] for i in range(0, NumberOfTables): table = db.get_table(i) tableName = table.get_name() AllTableNames.append(tableName) # Now extracting the internet history from each Container table URLHistory = [] Headers = [ 'AccessTime', 'URL', 'AccessCount', 'RedirectURL', 'FileName' ] URLHistory.append(Headers) for i in AllTableNames: if re.match(r'Container_', i) is not None: table = db.get_table_by_name(i) NumberOfRecords = table.get_number_of_records() for h in range(0, NumberOfRecords): record = table.get_record(h) ticks = record.get_value_data_as_integer(13) converted_time = filetime.to_datetime(ticks) time = converted_time.strftime("%Y-%m-%d %H:%M:%S") sublist = [ time, record.get_value_data_as_string(17), record.get_value_data_as_integer(8), record.get_value_data_as_string(22), record.get_value_data_as_string(18) ] URLHistory.append(sublist) # Now saving the results to a CSV with open(args.OutputFile, 'w') as f: wr = csv.writer(f, lineterminator='\n') for row in URLHistory: wr.writerow(row) except: print( "check your syntax and make sure you are using the full file path, make sure you have libesedb-python and winfiletime installed... python Browser_IE_History.py -i /fullpath/IEHistoryfile.dat -o /fullpath/history.csv" )
if not os.path.exists(options.XLSX_TEMPLATE): print "Template File Not found: " + options.XLSX_TEMPLATE abort(1) if options.reghive: options.reghive = os.path.exists(options.reghive) if not os.path.exists(options.reghive): print "Registry File Not found: " + options.reghive abort(1) if options.reghive: interface_table = load_interfaces(options.reghive) try: warnings.simplefilter("ignore") ese_db = pyesedb.open(options.SRUM_INFILE) except Exception as e: print "I could not open the specified SRUM file. Check your path and file name." print "Error : ", str(e) abort(1) try: template_wb = openpyxl.load_workbook(filename=options.XLSX_TEMPLATE, read_only=True) except Exception as e: print "I could not open the specified template file %s. Check your path and file name." % ( options.XLSX_TEMPLATE) print "Error : ", str(e) abort(1) id_table = load_lookups(ese_db)
def main(evidence, image_type): # Create TSK object and query for Internet Explorer index.dat files tsk_util = TSKUtil(evidence, image_type) path = "/Windows/System32/sru" srum_dir = tsk_util.query_directory(path) if srum_dir is not None: srum_files = tsk_util.recurse_files("SRUDB.dat", path=path, logic="equal") if srum_files is not None: print("[+] Identified {} potential SRUDB.dat file(s)".format( len(srum_files))) for hit in srum_files: srum_file = hit[2] srum_tables = {} temp_srum = write_file(srum_file) if pyesedb.check_file_signature(temp_srum): srum_dat = pyesedb.open(temp_srum) print("[+] Process {} tables within database".format( srum_dat.number_of_tables)) for table in srum_dat.tables: if table.name != "SruDbIdMapTable": continue global APP_ID_LOOKUP for entry in table.records: app_id = entry.get_value_data_as_integer(1) try: app = entry.get_value_data(2).replace( "\x00", "") except AttributeError: app = "" APP_ID_LOOKUP[app_id] = app for table in srum_dat.tables: t_name = table.name print("[+] Processing {} table with {} records".format( t_name, table.number_of_records)) srum_tables[t_name] = {"columns": [], "data": []} columns = [x.name for x in table.columns] srum_tables[t_name]["columns"] = columns for entry in table.records: data = [] for x in range(entry.number_of_values): data.append( convert_data(entry.get_value_data(x), columns[x], entry.get_column_type(x))) srum_tables[t_name]["data"].append(data) write_output(t_name, srum_tables) else: print("[-] {} not a valid SRUDB.dat file. Removing " "temp file...".format(temp_srum)) os.remove(temp_srum) continue else: print("[-] SRUDB.dat files not found in {} " "directory".format(path)) sys.exit(3) else: print("[-] Directory {} not found".format(path)) sys.exit(2)